Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
     18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
     19 
     20 #include "base/macros.h"
     21 #include "base/mutex.h"
     22 #include "space.h"
     23 #include "thread.h"
     24 
     25 namespace art {
     26 namespace gc {
     27 
     28 namespace accounting {
     29 class ReadBarrierTable;
     30 }  // namespace accounting
     31 
     32 namespace space {
     33 
     34 // Cyclic region allocation strategy. If `true`, region allocation
     35 // will not try to allocate a new region from the beginning of the
     36 // region space, but from the last allocated region. This allocation
     37 // strategy reduces region reuse and should help catch some GC bugs
     38 // earlier. However, cyclic region allocation can also create memory
     39 // fragmentation at the region level (see b/33795328); therefore, we
     40 // only enable it in debug mode.
     41 static constexpr bool kCyclicRegionAllocation = kIsDebugBuild;
     42 
     43 // A space that consists of equal-sized regions.
     44 class RegionSpace final : public ContinuousMemMapAllocSpace {
     45  public:
     46   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
     47 
     48   enum EvacMode {
     49     kEvacModeNewlyAllocated,
     50     kEvacModeLivePercentNewlyAllocated,
     51     kEvacModeForceAll,
     52   };
     53 
     54   SpaceType GetType() const override {
     55     return kSpaceTypeRegionSpace;
     56   }
     57 
     58   // Create a region space mem map with the requested sizes. The requested base address is not
     59   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
     60   // space to confirm the request was granted.
     61   static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
     62   static RegionSpace* Create(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
     63 
     64   // Allocate `num_bytes`, returns null if the space is full.
     65   mirror::Object* Alloc(Thread* self,
     66                         size_t num_bytes,
     67                         /* out */ size_t* bytes_allocated,
     68                         /* out */ size_t* usable_size,
     69                         /* out */ size_t* bytes_tl_bulk_allocated)
     70       override REQUIRES(!region_lock_);
     71   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
     72   mirror::Object* AllocThreadUnsafe(Thread* self,
     73                                     size_t num_bytes,
     74                                     /* out */ size_t* bytes_allocated,
     75                                     /* out */ size_t* usable_size,
     76                                     /* out */ size_t* bytes_tl_bulk_allocated)
     77       override REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
     78   // The main allocation routine.
     79   template<bool kForEvac>
     80   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
     81                                                 /* out */ size_t* bytes_allocated,
     82                                                 /* out */ size_t* usable_size,
     83                                                 /* out */ size_t* bytes_tl_bulk_allocated)
     84       REQUIRES(!region_lock_);
     85   // Allocate/free large objects (objects that are larger than the region size).
     86   template<bool kForEvac>
     87   mirror::Object* AllocLarge(size_t num_bytes,
     88                              /* out */ size_t* bytes_allocated,
     89                              /* out */ size_t* usable_size,
     90                              /* out */ size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
     91   template<bool kForEvac>
     92   void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
     93 
     94   // Return the storage space required by obj.
     95   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
     96       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
     97     return AllocationSizeNonvirtual(obj, usable_size);
     98   }
     99   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
    100       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
    101 
    102   size_t Free(Thread*, mirror::Object*) override {
    103     UNIMPLEMENTED(FATAL);
    104     return 0;
    105   }
    106   size_t FreeList(Thread*, size_t, mirror::Object**) override {
    107     UNIMPLEMENTED(FATAL);
    108     return 0;
    109   }
    110   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
    111     return mark_bitmap_.get();
    112   }
    113   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
    114     return mark_bitmap_.get();
    115   }
    116 
    117   void Clear() override REQUIRES(!region_lock_);
    118 
    119   // Remove read and write memory protection from the whole region space,
    120   // i.e. make memory pages backing the region area not readable and not
    121   // writable.
    122   void Protect();
    123 
    124   // Remove memory protection from the whole region space, i.e. make memory
    125   // pages backing the region area readable and writable. This method is useful
    126   // to avoid page protection faults when dumping information about an invalid
    127   // reference.
    128   void Unprotect();
    129 
    130   // Change the non growth limit capacity to new capacity by shrinking or expanding the map.
    131   // Currently, only shrinking is supported.
    132   // Unlike implementations of this function in other spaces, we need to pass
    133   // new capacity as argument here as region space doesn't have any notion of
    134   // growth limit.
    135   void ClampGrowthLimit(size_t new_capacity) REQUIRES(!region_lock_);
    136 
    137   void Dump(std::ostream& os) const override;
    138   void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
    139   // Dump region containing object `obj`. Precondition: `obj` is in the region space.
    140   void DumpRegionForObject(std::ostream& os, mirror::Object* obj) REQUIRES(!region_lock_);
    141   void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
    142 
    143   size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!region_lock_);
    144   void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
    145   size_t RevokeAllThreadLocalBuffers() override
    146       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
    147   void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
    148   void AssertAllThreadLocalBuffersAreRevoked()
    149       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
    150 
    151   enum class RegionType : uint8_t {
    152     kRegionTypeAll,              // All types.
    153     kRegionTypeFromSpace,        // From-space. To be evacuated.
    154     kRegionTypeUnevacFromSpace,  // Unevacuated from-space. Not to be evacuated.
    155     kRegionTypeToSpace,          // To-space.
    156     kRegionTypeNone,             // None.
    157   };
    158 
    159   enum class RegionState : uint8_t {
    160     kRegionStateFree,            // Free region.
    161     kRegionStateAllocated,       // Allocated region.
    162     kRegionStateLarge,           // Large allocated (allocation larger than the region size).
    163     kRegionStateLargeTail,       // Large tail (non-first regions of a large allocation).
    164   };
    165 
    166   template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
    167   template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
    168   uint64_t GetBytesAllocated() override REQUIRES(!region_lock_) {
    169     return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
    170   }
    171   uint64_t GetObjectsAllocated() override REQUIRES(!region_lock_) {
    172     return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
    173   }
    174   uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
    175     return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
    176   }
    177   uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
    178     return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
    179   }
    180   uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
    181     return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
    182   }
    183   uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
    184     return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
    185   }
    186   size_t GetMaxPeakNumNonFreeRegions() const {
    187     return max_peak_num_non_free_regions_;
    188   }
    189   size_t GetNumRegions() const {
    190     return num_regions_;
    191   }
    192 
    193   bool CanMoveObjects() const override {
    194     return true;
    195   }
    196 
    197   bool Contains(const mirror::Object* obj) const override {
    198     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
    199     return byte_obj >= Begin() && byte_obj < Limit();
    200   }
    201 
    202   RegionSpace* AsRegionSpace() override {
    203     return this;
    204   }
    205 
    206   // Go through all of the blocks and visit the continuous objects.
    207   template <typename Visitor>
    208   ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
    209   template <typename Visitor>
    210   ALWAYS_INLINE void WalkToSpace(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
    211 
    212   // Scans regions and calls visitor for objects in unevac-space corresponding
    213   // to the bits set in 'bitmap'.
    214   // Cannot acquire region_lock_ as visitor may need to acquire it for allocation.
    215   // Should not be called concurrently with functions (like SetFromSpace()) which
    216   // change regions' type.
    217   template <typename Visitor>
    218   ALWAYS_INLINE void ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
    219                                          Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
    220 
    221   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
    222     return nullptr;
    223   }
    224   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
    225       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
    226 
    227   // Object alignment within the space.
    228   static constexpr size_t kAlignment = kObjectAlignment;
    229   // The region size.
    230   static constexpr size_t kRegionSize = 256 * KB;
    231 
    232   bool IsInFromSpace(mirror::Object* ref) {
    233     if (HasAddress(ref)) {
    234       Region* r = RefToRegionUnlocked(ref);
    235       return r->IsInFromSpace();
    236     }
    237     return false;
    238   }
    239 
    240   bool IsRegionNewlyAllocated(size_t idx) const NO_THREAD_SAFETY_ANALYSIS {
    241     DCHECK_LT(idx, num_regions_);
    242     return regions_[idx].IsNewlyAllocated();
    243   }
    244 
    245   bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
    246     if (HasAddress(ref)) {
    247       Region* r = RefToRegionUnlocked(ref);
    248       return r->IsNewlyAllocated();
    249     }
    250     return false;
    251   }
    252 
    253   bool IsInUnevacFromSpace(mirror::Object* ref) {
    254     if (HasAddress(ref)) {
    255       Region* r = RefToRegionUnlocked(ref);
    256       return r->IsInUnevacFromSpace();
    257     }
    258     return false;
    259   }
    260 
    261   bool IsLargeObject(mirror::Object* ref) {
    262     if (HasAddress(ref)) {
    263       Region* r = RefToRegionUnlocked(ref);
    264       return r->IsLarge();
    265     }
    266     return false;
    267   }
    268 
    269   bool IsInToSpace(mirror::Object* ref) {
    270     if (HasAddress(ref)) {
    271       Region* r = RefToRegionUnlocked(ref);
    272       return r->IsInToSpace();
    273     }
    274     return false;
    275   }
    276 
    277   // If `ref` is in the region space, return the type of its region;
    278   // otherwise, return `RegionType::kRegionTypeNone`.
    279   RegionType GetRegionType(mirror::Object* ref) {
    280     if (HasAddress(ref)) {
    281       return GetRegionTypeUnsafe(ref);
    282     }
    283     return RegionType::kRegionTypeNone;
    284   }
    285 
    286   // Unsafe version of RegionSpace::GetRegionType.
    287   // Precondition: `ref` is in the region space.
    288   RegionType GetRegionTypeUnsafe(mirror::Object* ref) {
    289     DCHECK(HasAddress(ref)) << ref;
    290     Region* r = RefToRegionUnlocked(ref);
    291     return r->Type();
    292   }
    293 
    294   // Zero live bytes for a large object, used by young gen CC for marking newly allocated large
    295   // objects.
    296   void ZeroLiveBytesForLargeObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
    297 
    298   // Determine which regions to evacuate and tag them as
    299   // from-space. Tag the rest as unevacuated from-space.
    300   void SetFromSpace(accounting::ReadBarrierTable* rb_table,
    301                     EvacMode evac_mode,
    302                     bool clear_live_bytes)
    303       REQUIRES(!region_lock_);
    304 
    305   size_t FromSpaceSize() REQUIRES(!region_lock_);
    306   size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
    307   size_t ToSpaceSize() REQUIRES(!region_lock_);
    308   void ClearFromSpace(/* out */ uint64_t* cleared_bytes,
    309                       /* out */ uint64_t* cleared_objects,
    310                       const bool clear_bitmap)
    311       REQUIRES(!region_lock_);
    312 
    313   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
    314     Region* reg = RefToRegionUnlocked(ref);
    315     reg->AddLiveBytes(alloc_size);
    316   }
    317 
    318   void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_) {
    319     if (kIsDebugBuild) {
    320       MutexLock mu(Thread::Current(), region_lock_);
    321       for (size_t i = 0; i < num_regions_; ++i) {
    322         Region* r = &regions_[i];
    323         size_t live_bytes = r->LiveBytes();
    324         CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
    325       }
    326     }
    327   }
    328 
    329   void SetAllRegionLiveBytesZero() REQUIRES(!region_lock_) {
    330     MutexLock mu(Thread::Current(), region_lock_);
    331     const size_t iter_limit = kUseTableLookupReadBarrier
    332         ? num_regions_
    333         : std::min(num_regions_, non_free_region_index_limit_);
    334     for (size_t i = 0; i < iter_limit; ++i) {
    335       Region* r = &regions_[i];
    336       // Newly allocated regions don't need up-to-date live_bytes_ for deciding
    337       // whether to be evacuated or not. See Region::ShouldBeEvacuated().
    338       if (!r->IsFree() && !r->IsNewlyAllocated()) {
    339         r->ZeroLiveBytes();
    340       }
    341     }
    342   }
    343 
    344   size_t RegionIdxForRefUnchecked(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
    345     DCHECK(HasAddress(ref));
    346     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
    347     size_t reg_idx = offset / kRegionSize;
    348     DCHECK_LT(reg_idx, num_regions_);
    349     Region* reg = &regions_[reg_idx];
    350     DCHECK_EQ(reg->Idx(), reg_idx);
    351     DCHECK(reg->Contains(ref));
    352     return reg_idx;
    353   }
    354   // Return -1 as region index for references outside this region space.
    355   size_t RegionIdxForRef(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
    356     if (HasAddress(ref)) {
    357       return RegionIdxForRefUnchecked(ref);
    358     } else {
    359       return static_cast<size_t>(-1);
    360     }
    361   }
    362 
    363   // Increment object allocation count for region containing ref.
    364   void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
    365 
    366   bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
    367 
    368   uint32_t Time() {
    369     return time_;
    370   }
    371 
    372  private:
    373   RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
    374 
    375   class Region {
    376    public:
    377     Region()
    378         : idx_(static_cast<size_t>(-1)),
    379           live_bytes_(static_cast<size_t>(-1)),
    380           begin_(nullptr),
    381           thread_(nullptr),
    382           top_(nullptr),
    383           end_(nullptr),
    384           objects_allocated_(0),
    385           alloc_time_(0),
    386           is_newly_allocated_(false),
    387           is_a_tlab_(false),
    388           state_(RegionState::kRegionStateAllocated),
    389           type_(RegionType::kRegionTypeToSpace) {}
    390 
    391     void Init(size_t idx, uint8_t* begin, uint8_t* end) {
    392       idx_ = idx;
    393       begin_ = begin;
    394       top_.store(begin, std::memory_order_relaxed);
    395       end_ = end;
    396       state_ = RegionState::kRegionStateFree;
    397       type_ = RegionType::kRegionTypeNone;
    398       objects_allocated_.store(0, std::memory_order_relaxed);
    399       alloc_time_ = 0;
    400       live_bytes_ = static_cast<size_t>(-1);
    401       is_newly_allocated_ = false;
    402       is_a_tlab_ = false;
    403       thread_ = nullptr;
    404       DCHECK_LT(begin, end);
    405       DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
    406     }
    407 
    408     RegionState State() const {
    409       return state_;
    410     }
    411 
    412     RegionType Type() const {
    413       return type_;
    414     }
    415 
    416     void Clear(bool zero_and_release_pages);
    417 
    418     ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes,
    419                                         /* out */ size_t* bytes_allocated,
    420                                         /* out */ size_t* usable_size,
    421                                         /* out */ size_t* bytes_tl_bulk_allocated);
    422 
    423     bool IsFree() const {
    424       bool is_free = (state_ == RegionState::kRegionStateFree);
    425       if (is_free) {
    426         DCHECK(IsInNoSpace());
    427         DCHECK_EQ(begin_, Top());
    428         DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
    429       }
    430       return is_free;
    431     }
    432 
    433     // Given a free region, declare it non-free (allocated).
    434     void Unfree(RegionSpace* region_space, uint32_t alloc_time)
    435         REQUIRES(region_space->region_lock_);
    436 
    437     // Given a free region, declare it non-free (allocated) and large.
    438     void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
    439         REQUIRES(region_space->region_lock_);
    440 
    441     // Given a free region, declare it non-free (allocated) and large tail.
    442     void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
    443         REQUIRES(region_space->region_lock_);
    444 
    445     void MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time)
    446         REQUIRES(region_space->region_lock_);
    447 
    448     void SetNewlyAllocated() {
    449       is_newly_allocated_ = true;
    450     }
    451 
    452     // Non-large, non-large-tail allocated.
    453     bool IsAllocated() const {
    454       return state_ == RegionState::kRegionStateAllocated;
    455     }
    456 
    457     // Large allocated.
    458     bool IsLarge() const {
    459       bool is_large = (state_ == RegionState::kRegionStateLarge);
    460       if (is_large) {
    461         DCHECK_LT(begin_ + kRegionSize, Top());
    462       }
    463       return is_large;
    464     }
    465 
    466     void ZeroLiveBytes() {
    467       live_bytes_ = 0;
    468     }
    469 
    470     // Large-tail allocated.
    471     bool IsLargeTail() const {
    472       bool is_large_tail = (state_ == RegionState::kRegionStateLargeTail);
    473       if (is_large_tail) {
    474         DCHECK_EQ(begin_, Top());
    475       }
    476       return is_large_tail;
    477     }
    478 
    479     size_t Idx() const {
    480       return idx_;
    481     }
    482 
    483     bool IsNewlyAllocated() const {
    484       return is_newly_allocated_;
    485     }
    486 
    487     bool IsTlab() const {
    488       return is_a_tlab_;
    489     }
    490 
    491     bool IsInFromSpace() const {
    492       return type_ == RegionType::kRegionTypeFromSpace;
    493     }
    494 
    495     bool IsInToSpace() const {
    496       return type_ == RegionType::kRegionTypeToSpace;
    497     }
    498 
    499     bool IsInUnevacFromSpace() const {
    500       return type_ == RegionType::kRegionTypeUnevacFromSpace;
    501     }
    502 
    503     bool IsInNoSpace() const {
    504       return type_ == RegionType::kRegionTypeNone;
    505     }
    506 
    507     // Set this region as evacuated from-space. At the end of the
    508     // collection, RegionSpace::ClearFromSpace will clear and reclaim
    509     // the space used by this region, and tag it as unallocated/free.
    510     void SetAsFromSpace() {
    511       DCHECK(!IsFree() && IsInToSpace());
    512       type_ = RegionType::kRegionTypeFromSpace;
    513       if (IsNewlyAllocated()) {
    514         // Clear the "newly allocated" status here, as we do not want the
    515         // GC to see it when encountering references in the from-space.
    516         //
    517         // Invariant: There should be no newly-allocated region in the
    518         // from-space (when the from-space exists, which is between the calls
    519         // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
    520         is_newly_allocated_ = false;
    521       }
    522       // Set live bytes to an invalid value, as we have made an
    523       // evacuation decision (possibly based on the percentage of live
    524       // bytes).
    525       live_bytes_ = static_cast<size_t>(-1);
    526     }
    527 
    528     // Set this region as unevacuated from-space. At the end of the
    529     // collection, RegionSpace::ClearFromSpace will preserve the space
    530     // used by this region, and tag it as to-space (see
    531     // Region::SetUnevacFromSpaceAsToSpace below).
    532     void SetAsUnevacFromSpace(bool clear_live_bytes);
    533 
    534     // Set this region as to-space. Used by RegionSpace::ClearFromSpace.
    535     // This is only valid if it is currently an unevac from-space region.
    536     void SetUnevacFromSpaceAsToSpace() {
    537       DCHECK(!IsFree() && IsInUnevacFromSpace());
    538       type_ = RegionType::kRegionTypeToSpace;
    539     }
    540 
    541     // Return whether this region should be evacuated. Used by RegionSpace::SetFromSpace.
    542     ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
    543 
    544     void AddLiveBytes(size_t live_bytes) {
    545       DCHECK(GetUseGenerationalCC() || IsInUnevacFromSpace());
    546       DCHECK(!IsLargeTail());
    547       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
    548       // For large allocations, we always consider all bytes in the regions live.
    549       live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
    550       DCHECK_LE(live_bytes_, BytesAllocated());
    551     }
    552 
    553     bool AllAllocatedBytesAreLive() const {
    554       return LiveBytes() == static_cast<size_t>(Top() - Begin());
    555     }
    556 
    557     size_t LiveBytes() const {
    558       return live_bytes_;
    559     }
    560 
    561     // Returns the number of allocated bytes.  "Bulk allocated" bytes in active TLABs are excluded.
    562     size_t BytesAllocated() const;
    563 
    564     size_t ObjectsAllocated() const;
    565 
    566     uint8_t* Begin() const {
    567       return begin_;
    568     }
    569 
    570     ALWAYS_INLINE uint8_t* Top() const {
    571       return top_.load(std::memory_order_relaxed);
    572     }
    573 
    574     void SetTop(uint8_t* new_top) {
    575       top_.store(new_top, std::memory_order_relaxed);
    576     }
    577 
    578     uint8_t* End() const {
    579       return end_;
    580     }
    581 
    582     bool Contains(mirror::Object* ref) const {
    583       return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
    584     }
    585 
    586     void Dump(std::ostream& os) const;
    587 
    588     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
    589       DCHECK(IsAllocated());
    590       DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
    591       DCHECK_EQ(Top(), end_);
    592       objects_allocated_.store(num_objects, std::memory_order_relaxed);
    593       top_.store(begin_ + num_bytes, std::memory_order_relaxed);
    594       DCHECK_LE(Top(), end_);
    595     }
    596 
    597     uint64_t GetLongestConsecutiveFreeBytes() const;
    598 
    599    private:
    600     static bool GetUseGenerationalCC();
    601 
    602     size_t idx_;                        // The region's index in the region space.
    603     size_t live_bytes_;                 // The live bytes. Used to compute the live percent.
    604     uint8_t* begin_;                    // The begin address of the region.
    605     Thread* thread_;                    // The owning thread if it's a tlab.
    606     // Note that `top_` can be higher than `end_` in the case of a
    607     // large region, where an allocated object spans multiple regions
    608     // (large region + one or more large tail regions).
    609     Atomic<uint8_t*> top_;              // The current position of the allocation.
    610     uint8_t* end_;                      // The end address of the region.
    611     // objects_allocated_ is accessed using memory_order_relaxed. Treat as approximate when there
    612     // are concurrent updates.
    613     Atomic<size_t> objects_allocated_;  // The number of objects allocated.
    614     uint32_t alloc_time_;               // The allocation time of the region.
    615     // Note that newly allocated and evacuated regions use -1 as
    616     // special value for `live_bytes_`.
    617     bool is_newly_allocated_;           // True if it's allocated after the last collection.
    618     bool is_a_tlab_;                    // True if it's a tlab.
    619     RegionState state_;                 // The region state (see RegionState).
    620     RegionType type_;                   // The region type (see RegionType).
    621 
    622     friend class RegionSpace;
    623   };
    624 
    625   template<bool kToSpaceOnly, typename Visitor>
    626   ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
    627 
    628   // Visitor will be iterating on objects in increasing address order.
    629   template<typename Visitor>
    630   ALWAYS_INLINE void WalkNonLargeRegion(Visitor&& visitor, const Region* r)
    631       NO_THREAD_SAFETY_ANALYSIS;
    632 
    633   Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
    634     MutexLock mu(Thread::Current(), region_lock_);
    635     return RefToRegionLocked(ref);
    636   }
    637 
    638   Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
    639     // For a performance reason (this is frequently called via
    640     // RegionSpace::IsInFromSpace, etc.) we avoid taking a lock here.
    641     // Note that since we only change a region from to-space to (evac)
    642     // from-space during a pause (in RegionSpace::SetFromSpace) and
    643     // from (evac) from-space to free (after GC is done), as long as
    644     // `ref` is a valid reference into an allocated region, it's safe
    645     // to access the region state without the lock.
    646     return RefToRegionLocked(ref);
    647   }
    648 
    649   Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
    650     DCHECK(HasAddress(ref));
    651     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
    652     size_t reg_idx = offset / kRegionSize;
    653     DCHECK_LT(reg_idx, num_regions_);
    654     Region* reg = &regions_[reg_idx];
    655     DCHECK_EQ(reg->Idx(), reg_idx);
    656     DCHECK(reg->Contains(ref));
    657     return reg;
    658   }
    659 
    660   // Return the object location following `obj` in the region space
    661   // (i.e., the object location at `obj + obj->SizeOf()`).
    662   //
    663   // Note that unless
    664   // - the region containing `obj` is fully used; and
    665   // - `obj` is not the last object of that region;
    666   // the returned location is not guaranteed to be a valid object.
    667   static mirror::Object* GetNextObject(mirror::Object* obj)
    668       REQUIRES_SHARED(Locks::mutator_lock_);
    669 
    670   void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
    671     DCHECK_LT(new_non_free_region_index, num_regions_);
    672     non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
    673                                             new_non_free_region_index + 1);
    674     VerifyNonFreeRegionLimit();
    675   }
    676 
    677   void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
    678     DCHECK_LE(new_non_free_region_index_limit, num_regions_);
    679     non_free_region_index_limit_ = new_non_free_region_index_limit;
    680     VerifyNonFreeRegionLimit();
    681   }
    682 
    683   // Implementation of this invariant:
    684   // for all `i >= non_free_region_index_limit_`, `regions_[i].IsFree()` is true.
    685   void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
    686     if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
    687       for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
    688         CHECK(regions_[i].IsFree());
    689       }
    690     }
    691   }
    692 
    693   Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
    694 
    695   // Scan region range [`begin`, `end`) in increasing order to try to
    696   // allocate a large region having a size of `num_regs_in_large_region`
    697   // regions. If there is no space in the region space to allocate this
    698   // large region, return null.
    699   //
    700   // If argument `next_region` is not null, use `*next_region` to
    701   // return the index to the region next to the allocated large region
    702   // returned by this method.
    703   template<bool kForEvac>
    704   mirror::Object* AllocLargeInRange(size_t begin,
    705                                     size_t end,
    706                                     size_t num_regs_in_large_region,
    707                                     /* out */ size_t* bytes_allocated,
    708                                     /* out */ size_t* usable_size,
    709                                     /* out */ size_t* bytes_tl_bulk_allocated,
    710                                     /* out */ size_t* next_region = nullptr) REQUIRES(region_lock_);
    711 
    712   // Check that the value of `r->LiveBytes()` matches the number of
    713   // (allocated) bytes used by live objects according to the live bits
    714   // in the region space bitmap range corresponding to region `r`.
    715   void CheckLiveBytesAgainstRegionBitmap(Region* r);
    716 
    717   // Poison memory areas used by dead objects within unevacuated
    718   // region `r`. This is meant to detect dangling references to dead
    719   // objects earlier in debug mode.
    720   void PoisonDeadObjectsInUnevacuatedRegion(Region* r);
    721 
    722   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    723 
    724   // Cached version of Heap::use_generational_cc_.
    725   const bool use_generational_cc_;
    726   uint32_t time_;                  // The time as the number of collections since the startup.
    727   size_t num_regions_;             // The number of regions in this space.
    728   // The number of non-free regions in this space.
    729   size_t num_non_free_regions_ GUARDED_BY(region_lock_);
    730 
    731   // The number of evac regions allocated during collection. 0 when GC not running.
    732   size_t num_evac_regions_ GUARDED_BY(region_lock_);
    733 
    734   // Maintain the maximum of number of non-free regions collected just before
    735   // reclaim in each GC cycle. At this moment in cycle, highest number of
    736   // regions are in non-free.
    737   size_t max_peak_num_non_free_regions_;
    738 
    739   // The pointer to the region array.
    740   std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
    741 
    742   // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
    743   // RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace.
    744   //
    745   // Invariant (verified by RegionSpace::VerifyNonFreeRegionLimit):
    746   //   for all `i >= non_free_region_index_limit_`, `regions_[i].IsFree()` is true.
    747   size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
    748 
    749   Region* current_region_;         // The region currently used for allocation.
    750   Region* evac_region_;            // The region currently used for evacuation.
    751   Region full_region_;             // The dummy/sentinel region that looks full.
    752 
    753   // Index into the region array pointing to the starting region when
    754   // trying to allocate a new region. Only used when
    755   // `kCyclicRegionAllocation` is true.
    756   size_t cyclic_alloc_region_index_ GUARDED_BY(region_lock_);
    757 
    758   // Mark bitmap used by the GC.
    759   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
    760 
    761   DISALLOW_COPY_AND_ASSIGN(RegionSpace);
    762 };
    763 
    764 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
    765 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
    766 
    767 }  // namespace space
    768 }  // namespace gc
    769 }  // namespace art
    770 
    771 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
    772