Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 #include <deque>
     17 
     18 #include "bump_pointer_space-inl.h"
     19 #include "bump_pointer_space.h"
     20 #include "base/dumpable.h"
     21 #include "base/logging.h"
     22 #include "gc/accounting/read_barrier_table.h"
     23 #include "mirror/class-inl.h"
     24 #include "mirror/object-inl.h"
     25 #include "thread_list.h"
     26 
     27 namespace art {
     28 namespace gc {
     29 namespace space {
     30 
     31 // If a region has live objects whose size is less than this percent
     32 // value of the region size, evaculate the region.
     33 static constexpr uint kEvacuateLivePercentThreshold = 75U;
     34 
     35 // Whether we protect the unused and cleared regions.
     36 static constexpr bool kProtectClearedRegions = true;
     37 
     38 // Wether we poison memory areas occupied by dead objects in unevacuated regions.
     39 static constexpr bool kPoisonDeadObjectsInUnevacuatedRegions = true;
     40 
     41 // Special 32-bit value used to poison memory areas occupied by dead
     42 // objects in unevacuated regions. Dereferencing this value is expected
     43 // to trigger a memory protection fault, as it is unlikely that it
     44 // points to a valid, non-protected memory area.
     45 static constexpr uint32_t kPoisonDeadObject = 0xBADDB01D;  // "BADDROID"
     46 
     47 // Whether we check a region's live bytes count against the region bitmap.
     48 static constexpr bool kCheckLiveBytesAgainstRegionBitmap = kIsDebugBuild;
     49 
     50 MemMap RegionSpace::CreateMemMap(const std::string& name,
     51                                  size_t capacity,
     52                                  uint8_t* requested_begin) {
     53   CHECK_ALIGNED(capacity, kRegionSize);
     54   std::string error_msg;
     55   // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
     56   // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
     57   MemMap mem_map;
     58   while (true) {
     59     mem_map = MemMap::MapAnonymous(name.c_str(),
     60                                    requested_begin,
     61                                    capacity + kRegionSize,
     62                                    PROT_READ | PROT_WRITE,
     63                                    /*low_4gb=*/ true,
     64                                    /*reuse=*/ false,
     65                                    /*reservation=*/ nullptr,
     66                                    &error_msg);
     67     if (mem_map.IsValid() || requested_begin == nullptr) {
     68       break;
     69     }
     70     // Retry with no specified request begin.
     71     requested_begin = nullptr;
     72   }
     73   if (!mem_map.IsValid()) {
     74     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
     75         << PrettySize(capacity) << " with message " << error_msg;
     76     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
     77     MemMap::DumpMaps(LOG_STREAM(ERROR));
     78     return MemMap::Invalid();
     79   }
     80   CHECK_EQ(mem_map.Size(), capacity + kRegionSize);
     81   CHECK_EQ(mem_map.Begin(), mem_map.BaseBegin());
     82   CHECK_EQ(mem_map.Size(), mem_map.BaseSize());
     83   if (IsAlignedParam(mem_map.Begin(), kRegionSize)) {
     84     // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
     85     // kRegionSize at the end.
     86     mem_map.SetSize(capacity);
     87   } else {
     88     // Got an unaligned map. Align the both ends.
     89     mem_map.AlignBy(kRegionSize);
     90   }
     91   CHECK_ALIGNED(mem_map.Begin(), kRegionSize);
     92   CHECK_ALIGNED(mem_map.End(), kRegionSize);
     93   CHECK_EQ(mem_map.Size(), capacity);
     94   return mem_map;
     95 }
     96 
     97 RegionSpace* RegionSpace::Create(
     98     const std::string& name, MemMap&& mem_map, bool use_generational_cc) {
     99   return new RegionSpace(name, std::move(mem_map), use_generational_cc);
    100 }
    101 
    102 RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc)
    103     : ContinuousMemMapAllocSpace(name,
    104                                  std::move(mem_map),
    105                                  mem_map.Begin(),
    106                                  mem_map.End(),
    107                                  mem_map.End(),
    108                                  kGcRetentionPolicyAlwaysCollect),
    109       region_lock_("Region lock", kRegionSpaceRegionLock),
    110       use_generational_cc_(use_generational_cc),
    111       time_(1U),
    112       num_regions_(mem_map_.Size() / kRegionSize),
    113       num_non_free_regions_(0U),
    114       num_evac_regions_(0U),
    115       max_peak_num_non_free_regions_(0U),
    116       non_free_region_index_limit_(0U),
    117       current_region_(&full_region_),
    118       evac_region_(nullptr),
    119       cyclic_alloc_region_index_(0U) {
    120   CHECK_ALIGNED(mem_map_.Size(), kRegionSize);
    121   CHECK_ALIGNED(mem_map_.Begin(), kRegionSize);
    122   DCHECK_GT(num_regions_, 0U);
    123   regions_.reset(new Region[num_regions_]);
    124   uint8_t* region_addr = mem_map_.Begin();
    125   for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
    126     regions_[i].Init(i, region_addr, region_addr + kRegionSize);
    127   }
    128   mark_bitmap_.reset(
    129       accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity()));
    130   if (kIsDebugBuild) {
    131     CHECK_EQ(regions_[0].Begin(), Begin());
    132     for (size_t i = 0; i < num_regions_; ++i) {
    133       CHECK(regions_[i].IsFree());
    134       CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
    135       if (i + 1 < num_regions_) {
    136         CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
    137       }
    138     }
    139     CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
    140   }
    141   DCHECK(!full_region_.IsFree());
    142   DCHECK(full_region_.IsAllocated());
    143   size_t ignored;
    144   DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
    145   // Protect the whole region space from the start.
    146   Protect();
    147 }
    148 
    149 size_t RegionSpace::FromSpaceSize() {
    150   uint64_t num_regions = 0;
    151   MutexLock mu(Thread::Current(), region_lock_);
    152   for (size_t i = 0; i < num_regions_; ++i) {
    153     Region* r = &regions_[i];
    154     if (r->IsInFromSpace()) {
    155       ++num_regions;
    156     }
    157   }
    158   return num_regions * kRegionSize;
    159 }
    160 
    161 size_t RegionSpace::UnevacFromSpaceSize() {
    162   uint64_t num_regions = 0;
    163   MutexLock mu(Thread::Current(), region_lock_);
    164   for (size_t i = 0; i < num_regions_; ++i) {
    165     Region* r = &regions_[i];
    166     if (r->IsInUnevacFromSpace()) {
    167       ++num_regions;
    168     }
    169   }
    170   return num_regions * kRegionSize;
    171 }
    172 
    173 size_t RegionSpace::ToSpaceSize() {
    174   uint64_t num_regions = 0;
    175   MutexLock mu(Thread::Current(), region_lock_);
    176   for (size_t i = 0; i < num_regions_; ++i) {
    177     Region* r = &regions_[i];
    178     if (r->IsInToSpace()) {
    179       ++num_regions;
    180     }
    181   }
    182   return num_regions * kRegionSize;
    183 }
    184 
    185 void RegionSpace::Region::SetAsUnevacFromSpace(bool clear_live_bytes) {
    186   // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
    187   DCHECK(GetUseGenerationalCC() || clear_live_bytes);
    188   DCHECK(!IsFree() && IsInToSpace());
    189   type_ = RegionType::kRegionTypeUnevacFromSpace;
    190   if (IsNewlyAllocated()) {
    191     // A newly allocated region set as unevac from-space must be
    192     // a large or large tail region.
    193     DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
    194     // Always clear the live bytes of a newly allocated (large or
    195     // large tail) region.
    196     clear_live_bytes = true;
    197     // Clear the "newly allocated" status here, as we do not want the
    198     // GC to see it when encountering (and processing) references in the
    199     // from-space.
    200     //
    201     // Invariant: There should be no newly-allocated region in the
    202     // from-space (when the from-space exists, which is between the calls
    203     // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
    204     is_newly_allocated_ = false;
    205   }
    206   if (clear_live_bytes) {
    207     // Reset the live bytes, as we have made a non-evacuation
    208     // decision (possibly based on the percentage of live bytes).
    209     live_bytes_ = 0;
    210   }
    211 }
    212 
    213 bool RegionSpace::Region::GetUseGenerationalCC() {
    214   // We are retrieving the info from Heap, instead of the cached version in
    215   // RegionSpace, because accessing the Heap from a Region object is easier
    216   // than accessing the RegionSpace.
    217   return art::Runtime::Current()->GetHeap()->GetUseGenerationalCC();
    218 }
    219 
    220 inline bool RegionSpace::Region::ShouldBeEvacuated(EvacMode evac_mode) {
    221   // Evacuation mode `kEvacModeNewlyAllocated` is only used during sticky-bit CC collections.
    222   DCHECK(GetUseGenerationalCC() || (evac_mode != kEvacModeNewlyAllocated));
    223   DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
    224   // The region should be evacuated if:
    225   // - the evacuation is forced (`evac_mode == kEvacModeForceAll`); or
    226   // - the region was allocated after the start of the previous GC (newly allocated region); or
    227   // - the live ratio is below threshold (`kEvacuateLivePercentThreshold`).
    228   if (UNLIKELY(evac_mode == kEvacModeForceAll)) {
    229     return true;
    230   }
    231   bool result = false;
    232   if (is_newly_allocated_) {
    233     // Invariant: newly allocated regions have an undefined live bytes count.
    234     DCHECK_EQ(live_bytes_, static_cast<size_t>(-1));
    235     if (IsAllocated()) {
    236       // We always evacuate newly-allocated non-large regions as we
    237       // believe they contain many dead objects (a very simple form of
    238       // the generational hypothesis, even before the Sticky-Bit CC
    239       // approach).
    240       //
    241       // TODO: Verify that assertion by collecting statistics on the
    242       // number/proportion of live objects in newly allocated regions
    243       // in RegionSpace::ClearFromSpace.
    244       //
    245       // Note that a side effect of evacuating a newly-allocated
    246       // non-large region is that the "newly allocated" status will
    247       // later be removed, as its live objects will be copied to an
    248       // evacuation region, which won't be marked as "newly
    249       // allocated" (see RegionSpace::AllocateRegion).
    250       result = true;
    251     } else {
    252       DCHECK(IsLarge());
    253       // We never want to evacuate a large region (and the associated
    254       // tail regions), except if:
    255       // - we are forced to do so (see the `kEvacModeForceAll` case
    256       //   above); or
    257       // - we know that the (sole) object contained in this region is
    258       //   dead (see the corresponding logic below, in the
    259       //   `kEvacModeLivePercentNewlyAllocated` case).
    260       // For a newly allocated region (i.e. allocated since the
    261       // previous GC started), we don't have any liveness information
    262       // (the live bytes count is -1 -- also note this region has been
    263       // a to-space one between the time of its allocation and now),
    264       // so we prefer not to evacuate it.
    265       result = false;
    266     }
    267   } else if (evac_mode == kEvacModeLivePercentNewlyAllocated) {
    268     bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1));
    269     if (is_live_percent_valid) {
    270       DCHECK(IsInToSpace());
    271       DCHECK(!IsLargeTail());
    272       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
    273       DCHECK_LE(live_bytes_, BytesAllocated());
    274       const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
    275       DCHECK_LE(live_bytes_, bytes_allocated);
    276       if (IsAllocated()) {
    277         // Side node: live_percent == 0 does not necessarily mean
    278         // there's no live objects due to rounding (there may be a
    279         // few).
    280         result = (live_bytes_ * 100U < kEvacuateLivePercentThreshold * bytes_allocated);
    281       } else {
    282         DCHECK(IsLarge());
    283         result = (live_bytes_ == 0U);
    284       }
    285     } else {
    286       result = false;
    287     }
    288   }
    289   return result;
    290 }
    291 
    292 void RegionSpace::ZeroLiveBytesForLargeObject(mirror::Object* obj) {
    293   // This method is only used when Generational CC collection is enabled.
    294   DCHECK(use_generational_cc_);
    295 
    296   // This code uses a logic similar to the one used in RegionSpace::FreeLarge
    297   // to traverse the regions supporting `obj`.
    298   // TODO: Refactor.
    299   DCHECK(IsLargeObject(obj));
    300   DCHECK_ALIGNED(obj, kRegionSize);
    301   size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
    302   DCHECK_GT(obj_size, space::RegionSpace::kRegionSize);
    303   // Size of the memory area allocated for `obj`.
    304   size_t obj_alloc_size = RoundUp(obj_size, space::RegionSpace::kRegionSize);
    305   uint8_t* begin_addr = reinterpret_cast<uint8_t*>(obj);
    306   uint8_t* end_addr = begin_addr + obj_alloc_size;
    307   DCHECK_ALIGNED(end_addr, kRegionSize);
    308 
    309   // Zero the live bytes of the large region and large tail regions containing the object.
    310   MutexLock mu(Thread::Current(), region_lock_);
    311   for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
    312     Region* region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
    313     if (addr == begin_addr) {
    314       DCHECK(region->IsLarge());
    315     } else {
    316       DCHECK(region->IsLargeTail());
    317     }
    318     region->ZeroLiveBytes();
    319   }
    320   if (kIsDebugBuild && end_addr < Limit()) {
    321     // If we aren't at the end of the space, check that the next region is not a large tail.
    322     Region* following_region = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
    323     DCHECK(!following_region->IsLargeTail());
    324   }
    325 }
    326 
    327 // Determine which regions to evacuate and mark them as
    328 // from-space. Mark the rest as unevacuated from-space.
    329 void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table,
    330                                EvacMode evac_mode,
    331                                bool clear_live_bytes) {
    332   // Live bytes are only preserved (i.e. not cleared) during sticky-bit CC collections.
    333   DCHECK(use_generational_cc_ || clear_live_bytes);
    334   ++time_;
    335   if (kUseTableLookupReadBarrier) {
    336     DCHECK(rb_table->IsAllCleared());
    337     rb_table->SetAll();
    338   }
    339   MutexLock mu(Thread::Current(), region_lock_);
    340   // Counter for the number of expected large tail regions following a large region.
    341   size_t num_expected_large_tails = 0U;
    342   // Flag to store whether the previously seen large region has been evacuated.
    343   // This is used to apply the same evacuation policy to related large tail regions.
    344   bool prev_large_evacuated = false;
    345   VerifyNonFreeRegionLimit();
    346   const size_t iter_limit = kUseTableLookupReadBarrier
    347       ? num_regions_
    348       : std::min(num_regions_, non_free_region_index_limit_);
    349   for (size_t i = 0; i < iter_limit; ++i) {
    350     Region* r = &regions_[i];
    351     RegionState state = r->State();
    352     RegionType type = r->Type();
    353     if (!r->IsFree()) {
    354       DCHECK(r->IsInToSpace());
    355       if (LIKELY(num_expected_large_tails == 0U)) {
    356         DCHECK((state == RegionState::kRegionStateAllocated ||
    357                 state == RegionState::kRegionStateLarge) &&
    358                type == RegionType::kRegionTypeToSpace);
    359         bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
    360         bool is_newly_allocated = r->IsNewlyAllocated();
    361         if (should_evacuate) {
    362           r->SetAsFromSpace();
    363           DCHECK(r->IsInFromSpace());
    364         } else {
    365           r->SetAsUnevacFromSpace(clear_live_bytes);
    366           DCHECK(r->IsInUnevacFromSpace());
    367         }
    368         if (UNLIKELY(state == RegionState::kRegionStateLarge &&
    369                      type == RegionType::kRegionTypeToSpace)) {
    370           prev_large_evacuated = should_evacuate;
    371           // In 2-phase full heap GC, this function is called after marking is
    372           // done. So, it is possible that some newly allocated large object is
    373           // marked but its live_bytes is still -1. We need to clear the
    374           // mark-bit otherwise the live_bytes will not be updated in
    375           // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
    376           // logic.
    377           if (use_generational_cc_ && !should_evacuate && is_newly_allocated) {
    378             GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
    379           }
    380           num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
    381           DCHECK_GT(num_expected_large_tails, 0U);
    382         }
    383       } else {
    384         DCHECK(state == RegionState::kRegionStateLargeTail &&
    385                type == RegionType::kRegionTypeToSpace);
    386         if (prev_large_evacuated) {
    387           r->SetAsFromSpace();
    388           DCHECK(r->IsInFromSpace());
    389         } else {
    390           r->SetAsUnevacFromSpace(clear_live_bytes);
    391           DCHECK(r->IsInUnevacFromSpace());
    392         }
    393         --num_expected_large_tails;
    394       }
    395     } else {
    396       DCHECK_EQ(num_expected_large_tails, 0U);
    397       if (kUseTableLookupReadBarrier) {
    398         // Clear the rb table for to-space regions.
    399         rb_table->Clear(r->Begin(), r->End());
    400       }
    401     }
    402     // Invariant: There should be no newly-allocated region in the from-space.
    403     DCHECK(!r->is_newly_allocated_);
    404   }
    405   DCHECK_EQ(num_expected_large_tails, 0U);
    406   current_region_ = &full_region_;
    407   evac_region_ = &full_region_;
    408 }
    409 
    410 static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
    411   ZeroAndReleasePages(begin, end - begin);
    412   if (kProtectClearedRegions) {
    413     CheckedCall(mprotect, __FUNCTION__, begin, end - begin, PROT_NONE);
    414   }
    415 }
    416 
    417 void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
    418                                  /* out */ uint64_t* cleared_objects,
    419                                  const bool clear_bitmap) {
    420   DCHECK(cleared_bytes != nullptr);
    421   DCHECK(cleared_objects != nullptr);
    422   *cleared_bytes = 0;
    423   *cleared_objects = 0;
    424   size_t new_non_free_region_index_limit = 0;
    425   // We should avoid calling madvise syscalls while holding region_lock_.
    426   // Therefore, we split the working of this function into 2 loops. The first
    427   // loop gathers memory ranges that must be madvised. Then we release the lock
    428   // and perform madvise on the gathered memory ranges. Finally, we reacquire
    429   // the lock and loop over the regions to clear the from-space regions and make
    430   // them availabe for allocation.
    431   std::deque<std::pair<uint8_t*, uint8_t*>> madvise_list;
    432   // Gather memory ranges that need to be madvised.
    433   {
    434     MutexLock mu(Thread::Current(), region_lock_);
    435     // Lambda expression `expand_madvise_range` adds a region to the "clear block".
    436     //
    437     // As we iterate over from-space regions, we maintain a "clear block", composed of
    438     // adjacent to-be-cleared regions and whose bounds are `clear_block_begin` and
    439     // `clear_block_end`. When processing a new region which is not adjacent to
    440     // the clear block (discontinuity in cleared regions), the clear block
    441     // is added to madvise_list and the clear block is reset (to the most recent
    442     // to-be-cleared region).
    443     //
    444     // This is done in order to combine zeroing and releasing pages to reduce how
    445     // often madvise is called. This helps reduce contention on the mmap semaphore
    446     // (see b/62194020).
    447     uint8_t* clear_block_begin = nullptr;
    448     uint8_t* clear_block_end = nullptr;
    449     auto expand_madvise_range = [&madvise_list, &clear_block_begin, &clear_block_end] (Region* r) {
    450       if (clear_block_end != r->Begin()) {
    451         if (clear_block_begin != nullptr) {
    452           DCHECK(clear_block_end != nullptr);
    453           madvise_list.push_back(std::pair(clear_block_begin, clear_block_end));
    454         }
    455         clear_block_begin = r->Begin();
    456       }
    457       clear_block_end = r->End();
    458     };
    459     for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
    460       Region* r = &regions_[i];
    461       // The following check goes through objects in the region, therefore it
    462       // must be performed before madvising the region. Therefore, it can't be
    463       // executed in the following loop.
    464       if (kCheckLiveBytesAgainstRegionBitmap) {
    465         CheckLiveBytesAgainstRegionBitmap(r);
    466       }
    467       if (r->IsInFromSpace()) {
    468         expand_madvise_range(r);
    469       } else if (r->IsInUnevacFromSpace()) {
    470         // We must skip tails of live large objects.
    471         if (r->LiveBytes() == 0 && !r->IsLargeTail()) {
    472           // Special case for 0 live bytes, this means all of the objects in the region are
    473           // dead and we can to clear it. This is important for large objects since we must
    474           // not visit dead ones in RegionSpace::Walk because they may contain dangling
    475           // references to invalid objects. It is also better to clear these regions now
    476           // instead of at the end of the next GC to save RAM. If we don't clear the regions
    477           // here, they will be cleared in next GC by the normal live percent evacuation logic.
    478           expand_madvise_range(r);
    479           // Also release RAM for large tails.
    480           while (i + 1 < num_regions_ && regions_[i + 1].IsLargeTail()) {
    481             expand_madvise_range(&regions_[i + 1]);
    482             i++;
    483           }
    484         }
    485       }
    486     }
    487     // There is a small probability that we may reach here with
    488     // clear_block_{begin, end} = nullptr. If all the regions allocated since
    489     // last GC have been for large objects and all of them survive till this GC
    490     // cycle, then there will be no regions in from-space.
    491     if (LIKELY(clear_block_begin != nullptr)) {
    492       DCHECK(clear_block_end != nullptr);
    493       madvise_list.push_back(std::pair(clear_block_begin, clear_block_end));
    494     }
    495   }
    496 
    497   // Madvise the memory ranges.
    498   for (const auto &iter : madvise_list) {
    499     ZeroAndProtectRegion(iter.first, iter.second);
    500     if (clear_bitmap) {
    501       GetLiveBitmap()->ClearRange(
    502           reinterpret_cast<mirror::Object*>(iter.first),
    503           reinterpret_cast<mirror::Object*>(iter.second));
    504     }
    505   }
    506   madvise_list.clear();
    507 
    508   // Iterate over regions again and actually make the from space regions
    509   // available for allocation.
    510   MutexLock mu(Thread::Current(), region_lock_);
    511   VerifyNonFreeRegionLimit();
    512 
    513   // Update max of peak non free region count before reclaiming evacuated regions.
    514   max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
    515                                             num_non_free_regions_);
    516 
    517   for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
    518     Region* r = &regions_[i];
    519     if (r->IsInFromSpace()) {
    520       DCHECK(!r->IsTlab());
    521       *cleared_bytes += r->BytesAllocated();
    522       *cleared_objects += r->ObjectsAllocated();
    523       --num_non_free_regions_;
    524       r->Clear(/*zero_and_release_pages=*/false);
    525     } else if (r->IsInUnevacFromSpace()) {
    526       if (r->LiveBytes() == 0) {
    527         DCHECK(!r->IsLargeTail());
    528         *cleared_bytes += r->BytesAllocated();
    529         *cleared_objects += r->ObjectsAllocated();
    530         r->Clear(/*zero_and_release_pages=*/false);
    531         size_t free_regions = 1;
    532         // Also release RAM for large tails.
    533         while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
    534           regions_[i + free_regions].Clear(/*zero_and_release_pages=*/false);
    535           ++free_regions;
    536         }
    537         num_non_free_regions_ -= free_regions;
    538         // When clear_bitmap is true, this clearing of bitmap is taken care in
    539         // clear_region().
    540         if (!clear_bitmap) {
    541           GetLiveBitmap()->ClearRange(
    542               reinterpret_cast<mirror::Object*>(r->Begin()),
    543               reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
    544         }
    545         continue;
    546       }
    547       r->SetUnevacFromSpaceAsToSpace();
    548       if (r->AllAllocatedBytesAreLive()) {
    549         // Try to optimize the number of ClearRange calls by checking whether the next regions
    550         // can also be cleared.
    551         size_t regions_to_clear_bitmap = 1;
    552         while (i + regions_to_clear_bitmap < num_regions_) {
    553           Region* const cur = &regions_[i + regions_to_clear_bitmap];
    554           if (!cur->AllAllocatedBytesAreLive()) {
    555             DCHECK(!cur->IsLargeTail());
    556             break;
    557           }
    558           CHECK(cur->IsInUnevacFromSpace());
    559           cur->SetUnevacFromSpaceAsToSpace();
    560           ++regions_to_clear_bitmap;
    561         }
    562 
    563         // Optimization (for full CC only): If the live bytes are *all* live
    564         // in a region then the live-bit information for these objects is
    565         // superfluous:
    566         // - We can determine that these objects are all live by using
    567         //   Region::AllAllocatedBytesAreLive (which just checks whether
    568         //   `LiveBytes() == static_cast<size_t>(Top() - Begin())`.
    569         // - We can visit the objects in this region using
    570         //   RegionSpace::GetNextObject, i.e. without resorting to the
    571         //   live bits (see RegionSpace::WalkInternal).
    572         // Therefore, we can clear the bits for these objects in the
    573         // (live) region space bitmap (and release the corresponding pages).
    574         //
    575         // This optimization is incompatible with Generational CC, because:
    576         // - minor (young-generation) collections need to know which objects
    577         //   where marked during the previous GC cycle, meaning all mark bitmaps
    578         //   (this includes the region space bitmap) need to be preserved
    579         //   between a (minor or major) collection N and a following minor
    580         //   collection N+1;
    581         // - at this stage (in the current GC cycle), we cannot determine
    582         //   whether the next collection will be a minor or a major one;
    583         // This means that we need to be conservative and always preserve the
    584         // region space bitmap when using Generational CC.
    585         // Note that major collections do not require the previous mark bitmaps
    586         // to be preserved, and as matter of fact they do clear the region space
    587         // bitmap. But they cannot do so before we know the next GC cycle will
    588         // be a major one, so this operation happens at the beginning of such a
    589         // major collection, before marking starts.
    590         if (!use_generational_cc_) {
    591           GetLiveBitmap()->ClearRange(
    592               reinterpret_cast<mirror::Object*>(r->Begin()),
    593               reinterpret_cast<mirror::Object*>(r->Begin()
    594                                                 + regions_to_clear_bitmap * kRegionSize));
    595         }
    596         // Skip over extra regions for which we cleared the bitmaps: we shall not clear them,
    597         // as they are unevac regions that are live.
    598         // Subtract one for the for-loop.
    599         i += regions_to_clear_bitmap - 1;
    600       } else {
    601         // TODO: Explain why we do not poison dead objects in region
    602         // `r` when it has an undefined live bytes count (i.e. when
    603         // `r->LiveBytes() == static_cast<size_t>(-1)`) with
    604         // Generational CC.
    605         if (!use_generational_cc_ || (r->LiveBytes() != static_cast<size_t>(-1))) {
    606           // Only some allocated bytes are live in this unevac region.
    607           // This should only happen for an allocated non-large region.
    608           DCHECK(r->IsAllocated()) << r->State();
    609           if (kPoisonDeadObjectsInUnevacuatedRegions) {
    610             PoisonDeadObjectsInUnevacuatedRegion(r);
    611           }
    612         }
    613       }
    614     }
    615     // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
    616     Region* last_checked_region = &regions_[i];
    617     if (!last_checked_region->IsFree()) {
    618       new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
    619                                                  last_checked_region->Idx() + 1);
    620     }
    621   }
    622   // Update non_free_region_index_limit_.
    623   SetNonFreeRegionLimit(new_non_free_region_index_limit);
    624   evac_region_ = nullptr;
    625   num_non_free_regions_ += num_evac_regions_;
    626   num_evac_regions_ = 0;
    627 }
    628 
    629 void RegionSpace::CheckLiveBytesAgainstRegionBitmap(Region* r) {
    630   if (r->LiveBytes() == static_cast<size_t>(-1)) {
    631     // Live bytes count is undefined for `r`; nothing to check here.
    632     return;
    633   }
    634 
    635   // Functor walking the region space bitmap for the range corresponding
    636   // to region `r` and calculating the sum of live bytes.
    637   size_t live_bytes_recount = 0u;
    638   auto recount_live_bytes =
    639       [&r, &live_bytes_recount](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
    640     DCHECK_ALIGNED(obj, kAlignment);
    641     if (r->IsLarge()) {
    642       // If `r` is a large region, then it contains at most one
    643       // object, which must start at the beginning of the
    644       // region. The live byte count in that case is equal to the
    645       // allocated regions (large region + large tails regions).
    646       DCHECK_EQ(reinterpret_cast<uint8_t*>(obj), r->Begin());
    647       DCHECK_EQ(live_bytes_recount, 0u);
    648       live_bytes_recount = r->Top() - r->Begin();
    649     } else {
    650       DCHECK(r->IsAllocated())
    651           << "r->State()=" << r->State() << " r->LiveBytes()=" << r->LiveBytes();
    652       size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
    653       size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
    654       live_bytes_recount += alloc_size;
    655     }
    656   };
    657   // Visit live objects in `r` and recount the live bytes.
    658   GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(r->Begin()),
    659                                     reinterpret_cast<uintptr_t>(r->Top()),
    660                                     recount_live_bytes);
    661   // Check that this recount matches the region's current live bytes count.
    662   DCHECK_EQ(live_bytes_recount, r->LiveBytes());
    663 }
    664 
    665 // Poison the memory area in range [`begin`, `end`) with value `kPoisonDeadObject`.
    666 static void PoisonUnevacuatedRange(uint8_t* begin, uint8_t* end) {
    667   static constexpr size_t kPoisonDeadObjectSize = sizeof(kPoisonDeadObject);
    668   static_assert(IsPowerOfTwo(kPoisonDeadObjectSize) &&
    669                 IsPowerOfTwo(RegionSpace::kAlignment) &&
    670                 (kPoisonDeadObjectSize < RegionSpace::kAlignment),
    671                 "RegionSpace::kAlignment should be a multiple of kPoisonDeadObjectSize"
    672                 " and both should be powers of 2");
    673   DCHECK_ALIGNED(begin, kPoisonDeadObjectSize);
    674   DCHECK_ALIGNED(end, kPoisonDeadObjectSize);
    675   uint32_t* begin_addr = reinterpret_cast<uint32_t*>(begin);
    676   uint32_t* end_addr = reinterpret_cast<uint32_t*>(end);
    677   std::fill(begin_addr, end_addr, kPoisonDeadObject);
    678 }
    679 
    680 void RegionSpace::PoisonDeadObjectsInUnevacuatedRegion(Region* r) {
    681   // The live byte count of `r` should be different from -1, as this
    682   // region should neither be a newly allocated region nor an
    683   // evacuated region.
    684   DCHECK_NE(r->LiveBytes(), static_cast<size_t>(-1))
    685       << "Unexpected live bytes count of -1 in " << Dumpable<Region>(*r);
    686 
    687   // Past-the-end address of the previously visited (live) object (or
    688   // the beginning of the region, if `maybe_poison` has not run yet).
    689   uint8_t* prev_obj_end = reinterpret_cast<uint8_t*>(r->Begin());
    690 
    691   // Functor poisoning the space between `obj` and the previously
    692   // visited (live) object (or the beginng of the region), if any.
    693   auto maybe_poison = [&prev_obj_end](mirror::Object* obj) REQUIRES(Locks::mutator_lock_) {
    694     DCHECK_ALIGNED(obj, kAlignment);
    695     uint8_t* cur_obj_begin = reinterpret_cast<uint8_t*>(obj);
    696     if (cur_obj_begin != prev_obj_end) {
    697       // There is a gap (dead object(s)) between the previously
    698       // visited (live) object (or the beginning of the region) and
    699       // `obj`; poison that space.
    700       PoisonUnevacuatedRange(prev_obj_end, cur_obj_begin);
    701     }
    702     prev_obj_end = reinterpret_cast<uint8_t*>(GetNextObject(obj));
    703   };
    704 
    705   // Visit live objects in `r` and poison gaps (dead objects) between them.
    706   GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(r->Begin()),
    707                                     reinterpret_cast<uintptr_t>(r->Top()),
    708                                     maybe_poison);
    709   // Poison memory between the last live object and the end of the region, if any.
    710   if (prev_obj_end < r->Top()) {
    711     PoisonUnevacuatedRange(prev_obj_end, r->Top());
    712   }
    713 }
    714 
    715 void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
    716                                                size_t /* failed_alloc_bytes */) {
    717   size_t max_contiguous_allocation = 0;
    718   MutexLock mu(Thread::Current(), region_lock_);
    719   if (current_region_->End() - current_region_->Top() > 0) {
    720     max_contiguous_allocation = current_region_->End() - current_region_->Top();
    721   }
    722   if (num_non_free_regions_ * 2 < num_regions_) {
    723     // We reserve half of the regions for evaluation only. If we
    724     // occupy more than half the regions, do not report the free
    725     // regions as available.
    726     size_t max_contiguous_free_regions = 0;
    727     size_t num_contiguous_free_regions = 0;
    728     bool prev_free_region = false;
    729     for (size_t i = 0; i < num_regions_; ++i) {
    730       Region* r = &regions_[i];
    731       if (r->IsFree()) {
    732         if (!prev_free_region) {
    733           CHECK_EQ(num_contiguous_free_regions, 0U);
    734           prev_free_region = true;
    735         }
    736         ++num_contiguous_free_regions;
    737       } else {
    738         if (prev_free_region) {
    739           CHECK_NE(num_contiguous_free_regions, 0U);
    740           max_contiguous_free_regions = std::max(max_contiguous_free_regions,
    741                                                  num_contiguous_free_regions);
    742           num_contiguous_free_regions = 0U;
    743           prev_free_region = false;
    744         }
    745       }
    746     }
    747     max_contiguous_allocation = std::max(max_contiguous_allocation,
    748                                          max_contiguous_free_regions * kRegionSize);
    749   }
    750   os << "; failed due to fragmentation (largest possible contiguous allocation "
    751      <<  max_contiguous_allocation << " bytes)";
    752   // Caller's job to print failed_alloc_bytes.
    753 }
    754 
    755 void RegionSpace::Clear() {
    756   MutexLock mu(Thread::Current(), region_lock_);
    757   for (size_t i = 0; i < num_regions_; ++i) {
    758     Region* r = &regions_[i];
    759     if (!r->IsFree()) {
    760       --num_non_free_regions_;
    761     }
    762     r->Clear(/*zero_and_release_pages=*/true);
    763   }
    764   SetNonFreeRegionLimit(0);
    765   DCHECK_EQ(num_non_free_regions_, 0u);
    766   current_region_ = &full_region_;
    767   evac_region_ = &full_region_;
    768 }
    769 
    770 void RegionSpace::Protect() {
    771   if (kProtectClearedRegions) {
    772     CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_NONE);
    773   }
    774 }
    775 
    776 void RegionSpace::Unprotect() {
    777   if (kProtectClearedRegions) {
    778     CheckedCall(mprotect, __FUNCTION__, Begin(), Size(), PROT_READ | PROT_WRITE);
    779   }
    780 }
    781 
    782 void RegionSpace::ClampGrowthLimit(size_t new_capacity) {
    783   MutexLock mu(Thread::Current(), region_lock_);
    784   CHECK_LE(new_capacity, NonGrowthLimitCapacity());
    785   size_t new_num_regions = new_capacity / kRegionSize;
    786   if (non_free_region_index_limit_ > new_num_regions) {
    787     LOG(WARNING) << "Couldn't clamp region space as there are regions in use beyond growth limit.";
    788     return;
    789   }
    790   num_regions_ = new_num_regions;
    791   if (kCyclicRegionAllocation && cyclic_alloc_region_index_ >= num_regions_) {
    792     cyclic_alloc_region_index_ = 0u;
    793   }
    794   SetLimit(Begin() + new_capacity);
    795   if (Size() > new_capacity) {
    796     SetEnd(Limit());
    797   }
    798   GetMarkBitmap()->SetHeapSize(new_capacity);
    799   GetMemMap()->SetSize(new_capacity);
    800 }
    801 
    802 void RegionSpace::Dump(std::ostream& os) const {
    803   os << GetName() << " "
    804      << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
    805 }
    806 
    807 void RegionSpace::DumpRegionForObject(std::ostream& os, mirror::Object* obj) {
    808   CHECK(HasAddress(obj));
    809   MutexLock mu(Thread::Current(), region_lock_);
    810   RefToRegionUnlocked(obj)->Dump(os);
    811 }
    812 
    813 void RegionSpace::DumpRegions(std::ostream& os) {
    814   MutexLock mu(Thread::Current(), region_lock_);
    815   for (size_t i = 0; i < num_regions_; ++i) {
    816     regions_[i].Dump(os);
    817   }
    818 }
    819 
    820 void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
    821   MutexLock mu(Thread::Current(), region_lock_);
    822   for (size_t i = 0; i < num_regions_; ++i) {
    823     Region* reg = &regions_[i];
    824     if (!reg->IsFree()) {
    825       reg->Dump(os);
    826     }
    827   }
    828 }
    829 
    830 void RegionSpace::RecordAlloc(mirror::Object* ref) {
    831   CHECK(ref != nullptr);
    832   Region* r = RefToRegion(ref);
    833   r->objects_allocated_.fetch_add(1, std::memory_order_relaxed);
    834 }
    835 
    836 bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
    837   MutexLock mu(self, region_lock_);
    838   RevokeThreadLocalBuffersLocked(self);
    839   // Retain sufficient free regions for full evacuation.
    840 
    841   Region* r = AllocateRegion(/*for_evac=*/ false);
    842   if (r != nullptr) {
    843     r->is_a_tlab_ = true;
    844     r->thread_ = self;
    845     r->SetTop(r->End());
    846     self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
    847     return true;
    848   }
    849   return false;
    850 }
    851 
    852 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
    853   MutexLock mu(Thread::Current(), region_lock_);
    854   RevokeThreadLocalBuffersLocked(thread);
    855   return 0U;
    856 }
    857 
    858 void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
    859   uint8_t* tlab_start = thread->GetTlabStart();
    860   DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
    861   if (tlab_start != nullptr) {
    862     DCHECK_ALIGNED(tlab_start, kRegionSize);
    863     Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
    864     DCHECK(r->IsAllocated());
    865     DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
    866     r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
    867                                     thread->GetThreadLocalBytesAllocated());
    868     r->is_a_tlab_ = false;
    869     r->thread_ = nullptr;
    870   }
    871   thread->SetTlab(nullptr, nullptr, nullptr);
    872 }
    873 
    874 size_t RegionSpace::RevokeAllThreadLocalBuffers() {
    875   Thread* self = Thread::Current();
    876   MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    877   MutexLock mu2(self, *Locks::thread_list_lock_);
    878   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    879   for (Thread* thread : thread_list) {
    880     RevokeThreadLocalBuffers(thread);
    881   }
    882   return 0U;
    883 }
    884 
    885 void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
    886   if (kIsDebugBuild) {
    887     DCHECK(!thread->HasTlab());
    888   }
    889 }
    890 
    891 void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
    892   if (kIsDebugBuild) {
    893     Thread* self = Thread::Current();
    894     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
    895     MutexLock mu2(self, *Locks::thread_list_lock_);
    896     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    897     for (Thread* thread : thread_list) {
    898       AssertThreadLocalBuffersAreRevoked(thread);
    899     }
    900   }
    901 }
    902 
    903 void RegionSpace::Region::Dump(std::ostream& os) const {
    904   os << "Region[" << idx_ << "]="
    905      << reinterpret_cast<void*>(begin_)
    906      << "-" << reinterpret_cast<void*>(Top())
    907      << "-" << reinterpret_cast<void*>(end_)
    908      << " state=" << state_
    909      << " type=" << type_
    910      << " objects_allocated=" << objects_allocated_
    911      << " alloc_time=" << alloc_time_
    912      << " live_bytes=" << live_bytes_;
    913 
    914   if (live_bytes_ != static_cast<size_t>(-1)) {
    915     os << " ratio over allocated bytes="
    916        << (static_cast<float>(live_bytes_) / RoundUp(BytesAllocated(), kRegionSize));
    917     uint64_t longest_consecutive_free_bytes = GetLongestConsecutiveFreeBytes();
    918     os << " longest_consecutive_free_bytes=" << longest_consecutive_free_bytes
    919        << " (" << PrettySize(longest_consecutive_free_bytes) << ")";
    920   }
    921 
    922   os << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
    923      << " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha
    924      << " thread=" << thread_ << '\n';
    925 }
    926 
    927 uint64_t RegionSpace::Region::GetLongestConsecutiveFreeBytes() const {
    928   if (IsFree()) {
    929     return kRegionSize;
    930   }
    931   if (IsLarge() || IsLargeTail()) {
    932     return 0u;
    933   }
    934   uintptr_t max_gap = 0u;
    935   uintptr_t prev_object_end = reinterpret_cast<uintptr_t>(Begin());
    936   // Iterate through all live objects and find the largest free gap.
    937   auto visitor = [&max_gap, &prev_object_end](mirror::Object* obj)
    938     REQUIRES_SHARED(Locks::mutator_lock_) {
    939     uintptr_t current = reinterpret_cast<uintptr_t>(obj);
    940     uintptr_t diff = current - prev_object_end;
    941     max_gap = std::max(diff, max_gap);
    942     uintptr_t object_end = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
    943     prev_object_end = RoundUp(object_end, kAlignment);
    944   };
    945   space::RegionSpace* region_space = art::Runtime::Current()->GetHeap()->GetRegionSpace();
    946   region_space->WalkNonLargeRegion(visitor, this);
    947   return static_cast<uint64_t>(max_gap);
    948 }
    949 
    950 
    951 size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
    952   size_t num_bytes = obj->SizeOf();
    953   if (usable_size != nullptr) {
    954     if (LIKELY(num_bytes <= kRegionSize)) {
    955       DCHECK(RefToRegion(obj)->IsAllocated());
    956       *usable_size = RoundUp(num_bytes, kAlignment);
    957     } else {
    958       DCHECK(RefToRegion(obj)->IsLarge());
    959       *usable_size = RoundUp(num_bytes, kRegionSize);
    960     }
    961   }
    962   return num_bytes;
    963 }
    964 
    965 void RegionSpace::Region::Clear(bool zero_and_release_pages) {
    966   top_.store(begin_, std::memory_order_relaxed);
    967   state_ = RegionState::kRegionStateFree;
    968   type_ = RegionType::kRegionTypeNone;
    969   objects_allocated_.store(0, std::memory_order_relaxed);
    970   alloc_time_ = 0;
    971   live_bytes_ = static_cast<size_t>(-1);
    972   if (zero_and_release_pages) {
    973     ZeroAndProtectRegion(begin_, end_);
    974   }
    975   is_newly_allocated_ = false;
    976   is_a_tlab_ = false;
    977   thread_ = nullptr;
    978 }
    979 
    980 RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
    981   if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
    982     return nullptr;
    983   }
    984   for (size_t i = 0; i < num_regions_; ++i) {
    985     // When using the cyclic region allocation strategy, try to
    986     // allocate a region starting from the last cyclic allocated
    987     // region marker. Otherwise, try to allocate a region starting
    988     // from the beginning of the region space.
    989     size_t region_index = kCyclicRegionAllocation
    990         ? ((cyclic_alloc_region_index_ + i) % num_regions_)
    991         : i;
    992     Region* r = &regions_[region_index];
    993     if (r->IsFree()) {
    994       r->Unfree(this, time_);
    995       if (use_generational_cc_) {
    996         // TODO: Add an explanation for this assertion.
    997         DCHECK(!for_evac || !r->is_newly_allocated_);
    998       }
    999       if (for_evac) {
   1000         ++num_evac_regions_;
   1001         // Evac doesn't count as newly allocated.
   1002       } else {
   1003         r->SetNewlyAllocated();
   1004         ++num_non_free_regions_;
   1005       }
   1006       if (kCyclicRegionAllocation) {
   1007         // Move the cyclic allocation region marker to the region
   1008         // following the one that was just allocated.
   1009         cyclic_alloc_region_index_ = (region_index + 1) % num_regions_;
   1010       }
   1011       return r;
   1012     }
   1013   }
   1014   return nullptr;
   1015 }
   1016 
   1017 void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) {
   1018   DCHECK(IsFree());
   1019   alloc_time_ = alloc_time;
   1020   region_space->AdjustNonFreeRegionLimit(idx_);
   1021   type_ = RegionType::kRegionTypeToSpace;
   1022   if (kProtectClearedRegions) {
   1023     CheckedCall(mprotect, __FUNCTION__, Begin(), kRegionSize, PROT_READ | PROT_WRITE);
   1024   }
   1025 }
   1026 
   1027 void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) {
   1028   MarkAsAllocated(region_space, alloc_time);
   1029   state_ = RegionState::kRegionStateAllocated;
   1030 }
   1031 
   1032 void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) {
   1033   MarkAsAllocated(region_space, alloc_time);
   1034   state_ = RegionState::kRegionStateLarge;
   1035 }
   1036 
   1037 void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) {
   1038   MarkAsAllocated(region_space, alloc_time);
   1039   state_ = RegionState::kRegionStateLargeTail;
   1040 }
   1041 
   1042 }  // namespace space
   1043 }  // namespace gc
   1044 }  // namespace art
   1045