Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_SPACE_H_
     18 #define ART_RUNTIME_GC_SPACE_SPACE_H_
     19 
     20 #include <memory>
     21 #include <string>
     22 
     23 #include "atomic.h"
     24 #include "base/macros.h"
     25 #include "base/mutex.h"
     26 #include "gc/accounting/space_bitmap.h"
     27 #include "gc/collector/garbage_collector.h"
     28 #include "globals.h"
     29 #include "image.h"
     30 #include "mem_map.h"
     31 
     32 namespace art {
     33 namespace mirror {
     34   class Object;
     35 }  // namespace mirror
     36 
     37 namespace gc {
     38 
     39 class Heap;
     40 
     41 namespace space {
     42 
     43 class AllocSpace;
     44 class BumpPointerSpace;
     45 class ContinuousMemMapAllocSpace;
     46 class ContinuousSpace;
     47 class DiscontinuousSpace;
     48 class MallocSpace;
     49 class DlMallocSpace;
     50 class RosAllocSpace;
     51 class ImageSpace;
     52 class LargeObjectSpace;
     53 class RegionSpace;
     54 class ZygoteSpace;
     55 
     56 static constexpr bool kDebugSpaces = kIsDebugBuild;
     57 
     58 // See Space::GetGcRetentionPolicy.
     59 enum GcRetentionPolicy {
     60   // Objects are retained forever with this policy for a space.
     61   kGcRetentionPolicyNeverCollect,
     62   // Every GC cycle will attempt to collect objects in this space.
     63   kGcRetentionPolicyAlwaysCollect,
     64   // Objects will be considered for collection only in "full" GC cycles, ie faster partial
     65   // collections won't scan these areas such as the Zygote.
     66   kGcRetentionPolicyFullCollect,
     67 };
     68 std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
     69 
     70 enum SpaceType {
     71   kSpaceTypeImageSpace,
     72   kSpaceTypeMallocSpace,
     73   kSpaceTypeZygoteSpace,
     74   kSpaceTypeBumpPointerSpace,
     75   kSpaceTypeLargeObjectSpace,
     76   kSpaceTypeRegionSpace,
     77 };
     78 std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
     79 
     80 // A space contains memory allocated for managed objects.
     81 class Space {
     82  public:
     83   // Dump space. Also key method for C++ vtables.
     84   virtual void Dump(std::ostream& os) const;
     85 
     86   // Name of the space. May vary, for example before/after the Zygote fork.
     87   const char* GetName() const {
     88     return name_.c_str();
     89   }
     90 
     91   // The policy of when objects are collected associated with this space.
     92   GcRetentionPolicy GetGcRetentionPolicy() const {
     93     return gc_retention_policy_;
     94   }
     95 
     96   // Is the given object contained within this space?
     97   virtual bool Contains(const mirror::Object* obj) const = 0;
     98 
     99   // The kind of space this: image, alloc, zygote, large object.
    100   virtual SpaceType GetType() const = 0;
    101 
    102   // Is this an image space, ie one backed by a memory mapped image file.
    103   bool IsImageSpace() const {
    104     return GetType() == kSpaceTypeImageSpace;
    105   }
    106   ImageSpace* AsImageSpace();
    107 
    108   // Is this a dlmalloc backed allocation space?
    109   bool IsMallocSpace() const {
    110     SpaceType type = GetType();
    111     return type == kSpaceTypeMallocSpace;
    112   }
    113   MallocSpace* AsMallocSpace();
    114 
    115   virtual bool IsDlMallocSpace() const {
    116     return false;
    117   }
    118   virtual DlMallocSpace* AsDlMallocSpace();
    119 
    120   virtual bool IsRosAllocSpace() const {
    121     return false;
    122   }
    123   virtual RosAllocSpace* AsRosAllocSpace();
    124 
    125   // Is this the space allocated into by the Zygote and no-longer in use for allocation?
    126   bool IsZygoteSpace() const {
    127     return GetType() == kSpaceTypeZygoteSpace;
    128   }
    129   virtual ZygoteSpace* AsZygoteSpace();
    130 
    131   // Is this space a bump pointer space?
    132   bool IsBumpPointerSpace() const {
    133     return GetType() == kSpaceTypeBumpPointerSpace;
    134   }
    135   virtual BumpPointerSpace* AsBumpPointerSpace();
    136 
    137   bool IsRegionSpace() const {
    138     return GetType() == kSpaceTypeRegionSpace;
    139   }
    140   virtual RegionSpace* AsRegionSpace();
    141 
    142   // Does this space hold large objects and implement the large object space abstraction?
    143   bool IsLargeObjectSpace() const {
    144     return GetType() == kSpaceTypeLargeObjectSpace;
    145   }
    146   LargeObjectSpace* AsLargeObjectSpace();
    147 
    148   virtual bool IsContinuousSpace() const {
    149     return false;
    150   }
    151   ContinuousSpace* AsContinuousSpace();
    152 
    153   virtual bool IsDiscontinuousSpace() const {
    154     return false;
    155   }
    156   DiscontinuousSpace* AsDiscontinuousSpace();
    157 
    158   virtual bool IsAllocSpace() const {
    159     return false;
    160   }
    161   virtual AllocSpace* AsAllocSpace();
    162 
    163   virtual bool IsContinuousMemMapAllocSpace() const {
    164     return false;
    165   }
    166   virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace();
    167 
    168   // Returns true if objects in the space are movable.
    169   virtual bool CanMoveObjects() const = 0;
    170 
    171   virtual ~Space() {}
    172 
    173  protected:
    174   Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
    175 
    176   void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
    177     gc_retention_policy_ = gc_retention_policy;
    178   }
    179 
    180   // Name of the space that may vary due to the Zygote fork.
    181   std::string name_;
    182 
    183  protected:
    184   // When should objects within this space be reclaimed? Not constant as we vary it in the case
    185   // of Zygote forking.
    186   GcRetentionPolicy gc_retention_policy_;
    187 
    188  private:
    189   friend class art::gc::Heap;
    190   DISALLOW_IMPLICIT_CONSTRUCTORS(Space);
    191 };
    192 std::ostream& operator<<(std::ostream& os, const Space& space);
    193 
    194 // AllocSpace interface.
    195 class AllocSpace {
    196  public:
    197   // Number of bytes currently allocated.
    198   virtual uint64_t GetBytesAllocated() = 0;
    199   // Number of objects currently allocated.
    200   virtual uint64_t GetObjectsAllocated() = 0;
    201 
    202   // Allocate num_bytes without allowing growth. If the allocation
    203   // succeeds, the output parameter bytes_allocated will be set to the
    204   // actually allocated bytes which is >= num_bytes.
    205   // Alloc can be called from multiple threads at the same time and must be thread-safe.
    206   //
    207   // bytes_tl_bulk_allocated - bytes allocated in bulk ahead of time for a thread local allocation,
    208   // if applicable. It can be
    209   // 1) equal to bytes_allocated if it's not a thread local allocation,
    210   // 2) greater than bytes_allocated if it's a thread local
    211   //    allocation that required a new buffer, or
    212   // 3) zero if it's a thread local allocation in an existing
    213   //    buffer.
    214   // This is what is to be added to Heap::num_bytes_allocated_.
    215   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
    216                                 size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
    217 
    218   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
    219   virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
    220                                             size_t* usable_size,
    221                                             size_t* bytes_tl_bulk_allocated)
    222       REQUIRES(Locks::mutator_lock_) {
    223     return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
    224   }
    225 
    226   // Return the storage space required by obj.
    227   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
    228 
    229   // Returns how many bytes were freed.
    230   virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
    231 
    232   // Returns how many bytes were freed.
    233   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
    234 
    235   // Revoke any sort of thread-local buffers that are used to speed up allocations for the given
    236   // thread, if the alloc space implementation uses any.
    237   // Returns the total free bytes in the revoked thread local runs that's to be subtracted
    238   // from Heap::num_bytes_allocated_ or zero if unnecessary.
    239   virtual size_t RevokeThreadLocalBuffers(Thread* thread) = 0;
    240 
    241   // Revoke any sort of thread-local buffers that are used to speed up allocations for all the
    242   // threads, if the alloc space implementation uses any.
    243   // Returns the total free bytes in the revoked thread local runs that's to be subtracted
    244   // from Heap::num_bytes_allocated_ or zero if unnecessary.
    245   virtual size_t RevokeAllThreadLocalBuffers() = 0;
    246 
    247   virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
    248 
    249  protected:
    250   struct SweepCallbackContext {
    251     SweepCallbackContext(bool swap_bitmaps, space::Space* space);
    252     const bool swap_bitmaps;
    253     space::Space* const space;
    254     Thread* const self;
    255     collector::ObjectBytePair freed;
    256   };
    257 
    258   AllocSpace() {}
    259   virtual ~AllocSpace() {}
    260 
    261  private:
    262   DISALLOW_COPY_AND_ASSIGN(AllocSpace);
    263 };
    264 
    265 // Continuous spaces have bitmaps, and an address range. Although not required, objects within
    266 // continuous spaces can be marked in the card table.
    267 class ContinuousSpace : public Space {
    268  public:
    269   // Address at which the space begins.
    270   uint8_t* Begin() const {
    271     return begin_;
    272   }
    273 
    274   // Current address at which the space ends, which may vary as the space is filled.
    275   uint8_t* End() const {
    276     return end_.LoadRelaxed();
    277   }
    278 
    279   // The end of the address range covered by the space.
    280   uint8_t* Limit() const {
    281     return limit_;
    282   }
    283 
    284   // Change the end of the space. Be careful with use since changing the end of a space to an
    285   // invalid value may break the GC.
    286   void SetEnd(uint8_t* end) {
    287     end_.StoreRelaxed(end);
    288   }
    289 
    290   void SetLimit(uint8_t* limit) {
    291     limit_ = limit;
    292   }
    293 
    294   // Current size of space
    295   size_t Size() const {
    296     return End() - Begin();
    297   }
    298 
    299   virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0;
    300   virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0;
    301 
    302   // Maximum which the mapped space can grow to.
    303   virtual size_t Capacity() const {
    304     return Limit() - Begin();
    305   }
    306 
    307   // Is object within this space? We check to see if the pointer is beyond the end first as
    308   // continuous spaces are iterated over from low to high.
    309   bool HasAddress(const mirror::Object* obj) const {
    310     const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(obj);
    311     return byte_ptr >= Begin() && byte_ptr < Limit();
    312   }
    313 
    314   bool Contains(const mirror::Object* obj) const {
    315     return HasAddress(obj);
    316   }
    317 
    318   virtual bool IsContinuousSpace() const {
    319     return true;
    320   }
    321 
    322   virtual ~ContinuousSpace() {}
    323 
    324  protected:
    325   ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
    326                   uint8_t* begin, uint8_t* end, uint8_t* limit) :
    327       Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
    328   }
    329 
    330   // The beginning of the storage for fast access.
    331   uint8_t* begin_;
    332 
    333   // Current end of the space.
    334   Atomic<uint8_t*> end_;
    335 
    336   // Limit of the space.
    337   uint8_t* limit_;
    338 
    339  private:
    340   DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousSpace);
    341 };
    342 
    343 // A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
    344 // the card table can't cover these objects and so the write barrier shouldn't be triggered. This
    345 // is suitable for use for large primitive arrays.
    346 class DiscontinuousSpace : public Space {
    347  public:
    348   accounting::LargeObjectBitmap* GetLiveBitmap() const {
    349     return live_bitmap_.get();
    350   }
    351 
    352   accounting::LargeObjectBitmap* GetMarkBitmap() const {
    353     return mark_bitmap_.get();
    354   }
    355 
    356   virtual bool IsDiscontinuousSpace() const OVERRIDE {
    357     return true;
    358   }
    359 
    360   virtual ~DiscontinuousSpace() {}
    361 
    362  protected:
    363   DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
    364 
    365   std::unique_ptr<accounting::LargeObjectBitmap> live_bitmap_;
    366   std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_;
    367 
    368  private:
    369   DISALLOW_IMPLICIT_CONSTRUCTORS(DiscontinuousSpace);
    370 };
    371 
    372 class MemMapSpace : public ContinuousSpace {
    373  public:
    374   // Size of the space without a limit on its growth. By default this is just the Capacity, but
    375   // for the allocation space we support starting with a small heap and then extending it.
    376   virtual size_t NonGrowthLimitCapacity() const {
    377     return Capacity();
    378   }
    379 
    380   MemMap* GetMemMap() {
    381     return mem_map_.get();
    382   }
    383 
    384   const MemMap* GetMemMap() const {
    385     return mem_map_.get();
    386   }
    387 
    388   MemMap* ReleaseMemMap() {
    389     return mem_map_.release();
    390   }
    391 
    392  protected:
    393   MemMapSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end, uint8_t* limit,
    394               GcRetentionPolicy gc_retention_policy)
    395       : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
    396         mem_map_(mem_map) {
    397   }
    398 
    399   // Underlying storage of the space
    400   std::unique_ptr<MemMap> mem_map_;
    401 
    402  private:
    403   DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
    404 };
    405 
    406 // Used by the heap compaction interface to enable copying from one type of alloc space to another.
    407 class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
    408  public:
    409   bool IsAllocSpace() const OVERRIDE {
    410     return true;
    411   }
    412   AllocSpace* AsAllocSpace() OVERRIDE {
    413     return this;
    414   }
    415 
    416   bool IsContinuousMemMapAllocSpace() const OVERRIDE {
    417     return true;
    418   }
    419   ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
    420     return this;
    421   }
    422 
    423   bool HasBoundBitmaps() const REQUIRES(Locks::heap_bitmap_lock_);
    424   void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_);
    425   void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
    426   // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
    427   void SwapBitmaps();
    428 
    429   // Clear the space back to an empty space.
    430   virtual void Clear() = 0;
    431 
    432   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
    433     return live_bitmap_.get();
    434   }
    435 
    436   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
    437     return mark_bitmap_.get();
    438   }
    439 
    440   collector::ObjectBytePair Sweep(bool swap_bitmaps);
    441   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
    442 
    443  protected:
    444   std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
    445   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
    446   std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
    447 
    448   ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin,
    449                              uint8_t* end, uint8_t* limit, GcRetentionPolicy gc_retention_policy)
    450       : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
    451   }
    452 
    453  private:
    454   friend class gc::Heap;
    455   DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousMemMapAllocSpace);
    456 };
    457 
    458 }  // namespace space
    459 }  // namespace gc
    460 }  // namespace art
    461 
    462 #endif  // ART_RUNTIME_GC_SPACE_SPACE_H_
    463