Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
     18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
     19 
     20 #include "base/macros.h"
     21 #include "base/mutex.h"
     22 #include "space.h"
     23 #include "thread.h"
     24 
     25 namespace art {
     26 namespace gc {
     27 
     28 namespace accounting {
     29 class ReadBarrierTable;
     30 }  // namespace accounting
     31 
     32 namespace space {
     33 
     34 // A space that consists of equal-sized regions.
     35 class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
     36  public:
     37   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
     38 
     39   SpaceType GetType() const OVERRIDE {
     40     return kSpaceTypeRegionSpace;
     41   }
     42 
     43   // Create a region space mem map with the requested sizes. The requested base address is not
     44   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
     45   // space to confirm the request was granted.
     46   static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
     47   static RegionSpace* Create(const std::string& name, MemMap* mem_map);
     48 
     49   // Allocate num_bytes, returns null if the space is full.
     50   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
     51                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
     52       OVERRIDE REQUIRES(!region_lock_);
     53   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
     54   mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
     55                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated)
     56       OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
     57   // The main allocation routine.
     58   template<bool kForEvac>
     59   ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
     60                                                 size_t* usable_size,
     61                                                 size_t* bytes_tl_bulk_allocated)
     62       REQUIRES(!region_lock_);
     63   // Allocate/free large objects (objects that are larger than the region size.)
     64   template<bool kForEvac>
     65   mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
     66                              size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
     67   void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
     68 
     69   // Return the storage space required by obj.
     70   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
     71       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
     72     return AllocationSizeNonvirtual(obj, usable_size);
     73   }
     74   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
     75       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
     76 
     77   size_t Free(Thread*, mirror::Object*) OVERRIDE {
     78     UNIMPLEMENTED(FATAL);
     79     return 0;
     80   }
     81   size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE {
     82     UNIMPLEMENTED(FATAL);
     83     return 0;
     84   }
     85   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
     86     return mark_bitmap_.get();
     87   }
     88   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
     89     return mark_bitmap_.get();
     90   }
     91 
     92   void Clear() OVERRIDE REQUIRES(!region_lock_);
     93 
     94   void Dump(std::ostream& os) const;
     95   void DumpRegions(std::ostream& os) REQUIRES(!region_lock_);
     96   void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
     97 
     98   size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_);
     99   void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
    100   size_t RevokeAllThreadLocalBuffers()
    101       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
    102   void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
    103   void AssertAllThreadLocalBuffersAreRevoked()
    104       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
    105 
    106   enum class RegionType : uint8_t {
    107     kRegionTypeAll,              // All types.
    108     kRegionTypeFromSpace,        // From-space. To be evacuated.
    109     kRegionTypeUnevacFromSpace,  // Unevacuated from-space. Not to be evacuated.
    110     kRegionTypeToSpace,          // To-space.
    111     kRegionTypeNone,             // None.
    112   };
    113 
    114   enum class RegionState : uint8_t {
    115     kRegionStateFree,            // Free region.
    116     kRegionStateAllocated,       // Allocated region.
    117     kRegionStateLarge,           // Large allocated (allocation larger than the region size).
    118     kRegionStateLargeTail,       // Large tail (non-first regions of a large allocation).
    119   };
    120 
    121   template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_);
    122   template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_);
    123   uint64_t GetBytesAllocated() REQUIRES(!region_lock_) {
    124     return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>();
    125   }
    126   uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) {
    127     return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>();
    128   }
    129   uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) {
    130     return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>();
    131   }
    132   uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) {
    133     return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>();
    134   }
    135   uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
    136     return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
    137   }
    138   uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
    139     return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
    140   }
    141 
    142   bool CanMoveObjects() const OVERRIDE {
    143     return true;
    144   }
    145 
    146   bool Contains(const mirror::Object* obj) const {
    147     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
    148     return byte_obj >= Begin() && byte_obj < Limit();
    149   }
    150 
    151   RegionSpace* AsRegionSpace() OVERRIDE {
    152     return this;
    153   }
    154 
    155   // Go through all of the blocks and visit the continuous objects.
    156   template <typename Visitor>
    157   ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
    158     WalkInternal<false /* kToSpaceOnly */>(visitor);
    159   }
    160   template <typename Visitor>
    161   ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
    162       REQUIRES(Locks::mutator_lock_) {
    163     WalkInternal<true>(visitor);
    164   }
    165 
    166   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
    167     return nullptr;
    168   }
    169   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
    170       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
    171 
    172   // Object alignment within the space.
    173   static constexpr size_t kAlignment = kObjectAlignment;
    174   // The region size.
    175   static constexpr size_t kRegionSize = 256 * KB;
    176 
    177   bool IsInFromSpace(mirror::Object* ref) {
    178     if (HasAddress(ref)) {
    179       Region* r = RefToRegionUnlocked(ref);
    180       return r->IsInFromSpace();
    181     }
    182     return false;
    183   }
    184 
    185   bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
    186     if (HasAddress(ref)) {
    187       Region* r = RefToRegionUnlocked(ref);
    188       return r->IsNewlyAllocated();
    189     }
    190     return false;
    191   }
    192 
    193   bool IsInUnevacFromSpace(mirror::Object* ref) {
    194     if (HasAddress(ref)) {
    195       Region* r = RefToRegionUnlocked(ref);
    196       return r->IsInUnevacFromSpace();
    197     }
    198     return false;
    199   }
    200 
    201   bool IsInToSpace(mirror::Object* ref) {
    202     if (HasAddress(ref)) {
    203       Region* r = RefToRegionUnlocked(ref);
    204       return r->IsInToSpace();
    205     }
    206     return false;
    207   }
    208 
    209   RegionType GetRegionType(mirror::Object* ref) {
    210     if (HasAddress(ref)) {
    211       Region* r = RefToRegionUnlocked(ref);
    212       return r->Type();
    213     }
    214     return RegionType::kRegionTypeNone;
    215   }
    216 
    217   void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all)
    218       REQUIRES(!region_lock_);
    219 
    220   size_t FromSpaceSize() REQUIRES(!region_lock_);
    221   size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
    222   size_t ToSpaceSize() REQUIRES(!region_lock_);
    223   void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_);
    224 
    225   void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
    226     Region* reg = RefToRegionUnlocked(ref);
    227     reg->AddLiveBytes(alloc_size);
    228   }
    229 
    230   void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_) {
    231     if (kIsDebugBuild) {
    232       MutexLock mu(Thread::Current(), region_lock_);
    233       for (size_t i = 0; i < num_regions_; ++i) {
    234         Region* r = &regions_[i];
    235         size_t live_bytes = r->LiveBytes();
    236         CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes;
    237       }
    238     }
    239   }
    240 
    241   void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
    242   bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
    243 
    244   uint32_t Time() {
    245     return time_;
    246   }
    247 
    248  private:
    249   RegionSpace(const std::string& name, MemMap* mem_map);
    250 
    251   template<bool kToSpaceOnly, typename Visitor>
    252   ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
    253 
    254   class Region {
    255    public:
    256     Region()
    257         : idx_(static_cast<size_t>(-1)),
    258           begin_(nullptr), top_(nullptr), end_(nullptr),
    259           state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
    260           objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
    261           is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
    262 
    263     void Init(size_t idx, uint8_t* begin, uint8_t* end) {
    264       idx_ = idx;
    265       begin_ = begin;
    266       top_.StoreRelaxed(begin);
    267       end_ = end;
    268       state_ = RegionState::kRegionStateFree;
    269       type_ = RegionType::kRegionTypeNone;
    270       objects_allocated_.StoreRelaxed(0);
    271       alloc_time_ = 0;
    272       live_bytes_ = static_cast<size_t>(-1);
    273       is_newly_allocated_ = false;
    274       is_a_tlab_ = false;
    275       thread_ = nullptr;
    276       DCHECK_LT(begin, end);
    277       DCHECK_EQ(static_cast<size_t>(end - begin), kRegionSize);
    278     }
    279 
    280     RegionState State() const {
    281       return state_;
    282     }
    283 
    284     RegionType Type() const {
    285       return type_;
    286     }
    287 
    288     void Clear(bool zero_and_release_pages);
    289 
    290     ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
    291                                         size_t* usable_size,
    292                                         size_t* bytes_tl_bulk_allocated);
    293 
    294     bool IsFree() const {
    295       bool is_free = state_ == RegionState::kRegionStateFree;
    296       if (is_free) {
    297         DCHECK(IsInNoSpace());
    298         DCHECK_EQ(begin_, Top());
    299         DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
    300       }
    301       return is_free;
    302     }
    303 
    304     // Given a free region, declare it non-free (allocated).
    305     void Unfree(RegionSpace* region_space, uint32_t alloc_time)
    306         REQUIRES(region_space->region_lock_);
    307 
    308     void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
    309         REQUIRES(region_space->region_lock_);
    310 
    311     void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
    312         REQUIRES(region_space->region_lock_);
    313 
    314     void MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time)
    315         REQUIRES(region_space->region_lock_);
    316 
    317     void SetNewlyAllocated() {
    318       is_newly_allocated_ = true;
    319     }
    320 
    321     // Non-large, non-large-tail allocated.
    322     bool IsAllocated() const {
    323       return state_ == RegionState::kRegionStateAllocated;
    324     }
    325 
    326     // Large allocated.
    327     bool IsLarge() const {
    328       bool is_large = state_ == RegionState::kRegionStateLarge;
    329       if (is_large) {
    330         DCHECK_LT(begin_ + kRegionSize, Top());
    331       }
    332       return is_large;
    333     }
    334 
    335     // Large-tail allocated.
    336     bool IsLargeTail() const {
    337       bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
    338       if (is_large_tail) {
    339         DCHECK_EQ(begin_, Top());
    340       }
    341       return is_large_tail;
    342     }
    343 
    344     size_t Idx() const {
    345       return idx_;
    346     }
    347 
    348     bool IsNewlyAllocated() const {
    349       return is_newly_allocated_;
    350     }
    351 
    352     bool IsInFromSpace() const {
    353       return type_ == RegionType::kRegionTypeFromSpace;
    354     }
    355 
    356     bool IsInToSpace() const {
    357       return type_ == RegionType::kRegionTypeToSpace;
    358     }
    359 
    360     bool IsInUnevacFromSpace() const {
    361       return type_ == RegionType::kRegionTypeUnevacFromSpace;
    362     }
    363 
    364     bool IsInNoSpace() const {
    365       return type_ == RegionType::kRegionTypeNone;
    366     }
    367 
    368     void SetAsFromSpace() {
    369       DCHECK(!IsFree() && IsInToSpace());
    370       type_ = RegionType::kRegionTypeFromSpace;
    371       live_bytes_ = static_cast<size_t>(-1);
    372     }
    373 
    374     void SetAsUnevacFromSpace() {
    375       DCHECK(!IsFree() && IsInToSpace());
    376       type_ = RegionType::kRegionTypeUnevacFromSpace;
    377       live_bytes_ = 0U;
    378     }
    379 
    380     void SetUnevacFromSpaceAsToSpace() {
    381       DCHECK(!IsFree() && IsInUnevacFromSpace());
    382       type_ = RegionType::kRegionTypeToSpace;
    383     }
    384 
    385     ALWAYS_INLINE bool ShouldBeEvacuated();
    386 
    387     void AddLiveBytes(size_t live_bytes) {
    388       DCHECK(IsInUnevacFromSpace());
    389       DCHECK(!IsLargeTail());
    390       DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
    391       // For large allocations, we always consider all bytes in the
    392       // regions live.
    393       live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
    394       DCHECK_LE(live_bytes_, BytesAllocated());
    395     }
    396 
    397     bool AllAllocatedBytesAreLive() const {
    398       return LiveBytes() == static_cast<size_t>(Top() - Begin());
    399     }
    400 
    401     size_t LiveBytes() const {
    402       return live_bytes_;
    403     }
    404 
    405     size_t BytesAllocated() const;
    406 
    407     size_t ObjectsAllocated() const {
    408       if (IsLarge()) {
    409         DCHECK_LT(begin_ + kRegionSize, Top());
    410         DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
    411         return 1;
    412       } else if (IsLargeTail()) {
    413         DCHECK_EQ(begin_, Top());
    414         DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
    415         return 0;
    416       } else {
    417         DCHECK(IsAllocated()) << static_cast<uint>(state_);
    418         return objects_allocated_;
    419       }
    420     }
    421 
    422     uint8_t* Begin() const {
    423       return begin_;
    424     }
    425 
    426     ALWAYS_INLINE uint8_t* Top() const {
    427       return top_.LoadRelaxed();
    428     }
    429 
    430     void SetTop(uint8_t* new_top) {
    431       top_.StoreRelaxed(new_top);
    432     }
    433 
    434     uint8_t* End() const {
    435       return end_;
    436     }
    437 
    438     bool Contains(mirror::Object* ref) const {
    439       return begin_ <= reinterpret_cast<uint8_t*>(ref) && reinterpret_cast<uint8_t*>(ref) < end_;
    440     }
    441 
    442     void Dump(std::ostream& os) const;
    443 
    444     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
    445       DCHECK(IsAllocated());
    446       DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
    447       DCHECK_EQ(Top(), end_);
    448       objects_allocated_.StoreRelaxed(num_objects);
    449       top_.StoreRelaxed(begin_ + num_bytes);
    450       DCHECK_LE(Top(), end_);
    451     }
    452 
    453    private:
    454     size_t idx_;                        // The region's index in the region space.
    455     uint8_t* begin_;                    // The begin address of the region.
    456     Atomic<uint8_t*> top_;              // The current position of the allocation.
    457     uint8_t* end_;                      // The end address of the region.
    458     RegionState state_;                 // The region state (see RegionState).
    459     RegionType type_;                   // The region type (see RegionType).
    460     Atomic<size_t> objects_allocated_;  // The number of objects allocated.
    461     uint32_t alloc_time_;               // The allocation time of the region.
    462     size_t live_bytes_;                 // The live bytes. Used to compute the live percent.
    463     bool is_newly_allocated_;           // True if it's allocated after the last collection.
    464     bool is_a_tlab_;                    // True if it's a tlab.
    465     Thread* thread_;                    // The owning thread if it's a tlab.
    466 
    467     friend class RegionSpace;
    468   };
    469 
    470   Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) {
    471     MutexLock mu(Thread::Current(), region_lock_);
    472     return RefToRegionLocked(ref);
    473   }
    474 
    475   Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
    476     // For a performance reason (this is frequently called via
    477     // IsInFromSpace() etc.) we avoid taking a lock here. Note that
    478     // since we only change a region from to-space to from-space only
    479     // during a pause (SetFromSpace()) and from from-space to free
    480     // (after GC is done) as long as ref is a valid reference into an
    481     // allocated region, it's safe to access the region state without
    482     // the lock.
    483     return RefToRegionLocked(ref);
    484   }
    485 
    486   Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) {
    487     DCHECK(HasAddress(ref));
    488     uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
    489     size_t reg_idx = offset / kRegionSize;
    490     DCHECK_LT(reg_idx, num_regions_);
    491     Region* reg = &regions_[reg_idx];
    492     DCHECK_EQ(reg->Idx(), reg_idx);
    493     DCHECK(reg->Contains(ref));
    494     return reg;
    495   }
    496 
    497   mirror::Object* GetNextObject(mirror::Object* obj)
    498       REQUIRES_SHARED(Locks::mutator_lock_);
    499 
    500   void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
    501     DCHECK_LT(new_non_free_region_index, num_regions_);
    502     non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
    503                                             new_non_free_region_index + 1);
    504     VerifyNonFreeRegionLimit();
    505   }
    506 
    507   void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
    508     DCHECK_LE(new_non_free_region_index_limit, num_regions_);
    509     non_free_region_index_limit_ = new_non_free_region_index_limit;
    510     VerifyNonFreeRegionLimit();
    511   }
    512 
    513   void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
    514     if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
    515       for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
    516         CHECK(regions_[i].IsFree());
    517       }
    518     }
    519   }
    520 
    521   Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
    522 
    523   Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    524 
    525   uint32_t time_;                  // The time as the number of collections since the startup.
    526   size_t num_regions_;             // The number of regions in this space.
    527   size_t num_non_free_regions_;    // The number of non-free regions in this space.
    528   std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
    529                                    // The pointer to the region array.
    530   // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
    531   // SetFromSpace().  Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is
    532   // true.
    533   size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
    534   Region* current_region_;         // The region that's being allocated currently.
    535   Region* evac_region_;            // The region that's being evacuated to currently.
    536   Region full_region_;             // The dummy/sentinel region that looks full.
    537 
    538   // Mark bitmap used by the GC.
    539   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
    540 
    541   DISALLOW_COPY_AND_ASSIGN(RegionSpace);
    542 };
    543 
    544 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionState& value);
    545 std::ostream& operator<<(std::ostream& os, const RegionSpace::RegionType& value);
    546 
    547 }  // namespace space
    548 }  // namespace gc
    549 }  // namespace art
    550 
    551 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
    552