Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_SPACE_H_
     18 #define ART_RUNTIME_GC_SPACE_SPACE_H_
     19 
     20 #include <memory>
     21 #include <string>
     22 
     23 #include "base/atomic.h"
     24 #include "base/macros.h"
     25 #include "base/mutex.h"
     26 #include "gc/accounting/space_bitmap.h"
     27 #include "gc/collector/object_byte_pair.h"
     28 #include "globals.h"
     29 #include "mem_map.h"
     30 
     31 namespace art {
     32 namespace mirror {
     33 class Object;
     34 }  // namespace mirror
     35 
     36 namespace gc {
     37 
     38 class Heap;
     39 
     40 namespace space {
     41 
     42 class AllocSpace;
     43 class BumpPointerSpace;
     44 class ContinuousMemMapAllocSpace;
     45 class ContinuousSpace;
     46 class DiscontinuousSpace;
     47 class MallocSpace;
     48 class DlMallocSpace;
     49 class RosAllocSpace;
     50 class ImageSpace;
     51 class LargeObjectSpace;
     52 class RegionSpace;
     53 class ZygoteSpace;
     54 
     55 static constexpr bool kDebugSpaces = kIsDebugBuild;
     56 
     57 // See Space::GetGcRetentionPolicy.
     58 enum GcRetentionPolicy {
     59   // Objects are retained forever with this policy for a space.
     60   kGcRetentionPolicyNeverCollect,
     61   // Every GC cycle will attempt to collect objects in this space.
     62   kGcRetentionPolicyAlwaysCollect,
     63   // Objects will be considered for collection only in "full" GC cycles, ie faster partial
     64   // collections won't scan these areas such as the Zygote.
     65   kGcRetentionPolicyFullCollect,
     66 };
     67 std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
     68 
     69 enum SpaceType {
     70   kSpaceTypeImageSpace,
     71   kSpaceTypeMallocSpace,
     72   kSpaceTypeZygoteSpace,
     73   kSpaceTypeBumpPointerSpace,
     74   kSpaceTypeLargeObjectSpace,
     75   kSpaceTypeRegionSpace,
     76 };
     77 std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
     78 
     79 // A space contains memory allocated for managed objects.
     80 class Space {
     81  public:
     82   // Dump space. Also key method for C++ vtables.
     83   virtual void Dump(std::ostream& os) const;
     84 
     85   // Name of the space. May vary, for example before/after the Zygote fork.
     86   const char* GetName() const {
     87     return name_.c_str();
     88   }
     89 
     90   // The policy of when objects are collected associated with this space.
     91   GcRetentionPolicy GetGcRetentionPolicy() const {
     92     return gc_retention_policy_;
     93   }
     94 
     95   // Is the given object contained within this space?
     96   virtual bool Contains(const mirror::Object* obj) const = 0;
     97 
     98   // The kind of space this: image, alloc, zygote, large object.
     99   virtual SpaceType GetType() const = 0;
    100 
    101   // Is this an image space, ie one backed by a memory mapped image file.
    102   bool IsImageSpace() const {
    103     return GetType() == kSpaceTypeImageSpace;
    104   }
    105   ImageSpace* AsImageSpace();
    106 
    107   // Is this a dlmalloc backed allocation space?
    108   bool IsMallocSpace() const {
    109     SpaceType type = GetType();
    110     return type == kSpaceTypeMallocSpace;
    111   }
    112   MallocSpace* AsMallocSpace();
    113 
    114   virtual bool IsDlMallocSpace() const {
    115     return false;
    116   }
    117   virtual DlMallocSpace* AsDlMallocSpace();
    118 
    119   virtual bool IsRosAllocSpace() const {
    120     return false;
    121   }
    122   virtual RosAllocSpace* AsRosAllocSpace();
    123 
    124   // Is this the space allocated into by the Zygote and no-longer in use for allocation?
    125   bool IsZygoteSpace() const {
    126     return GetType() == kSpaceTypeZygoteSpace;
    127   }
    128   virtual ZygoteSpace* AsZygoteSpace();
    129 
    130   // Is this space a bump pointer space?
    131   bool IsBumpPointerSpace() const {
    132     return GetType() == kSpaceTypeBumpPointerSpace;
    133   }
    134   virtual BumpPointerSpace* AsBumpPointerSpace();
    135 
    136   bool IsRegionSpace() const {
    137     return GetType() == kSpaceTypeRegionSpace;
    138   }
    139   virtual RegionSpace* AsRegionSpace();
    140 
    141   // Does this space hold large objects and implement the large object space abstraction?
    142   bool IsLargeObjectSpace() const {
    143     return GetType() == kSpaceTypeLargeObjectSpace;
    144   }
    145   LargeObjectSpace* AsLargeObjectSpace();
    146 
    147   virtual bool IsContinuousSpace() const {
    148     return false;
    149   }
    150   ContinuousSpace* AsContinuousSpace();
    151 
    152   virtual bool IsDiscontinuousSpace() const {
    153     return false;
    154   }
    155   DiscontinuousSpace* AsDiscontinuousSpace();
    156 
    157   virtual bool IsAllocSpace() const {
    158     return false;
    159   }
    160   virtual AllocSpace* AsAllocSpace();
    161 
    162   virtual bool IsContinuousMemMapAllocSpace() const {
    163     return false;
    164   }
    165   virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace();
    166 
    167   // Returns true if objects in the space are movable.
    168   virtual bool CanMoveObjects() const = 0;
    169 
    170   virtual ~Space() {}
    171 
    172  protected:
    173   Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
    174 
    175   void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
    176     gc_retention_policy_ = gc_retention_policy;
    177   }
    178 
    179   // Name of the space that may vary due to the Zygote fork.
    180   std::string name_;
    181 
    182  protected:
    183   // When should objects within this space be reclaimed? Not constant as we vary it in the case
    184   // of Zygote forking.
    185   GcRetentionPolicy gc_retention_policy_;
    186 
    187  private:
    188   friend class art::gc::Heap;
    189   DISALLOW_IMPLICIT_CONSTRUCTORS(Space);
    190 };
    191 std::ostream& operator<<(std::ostream& os, const Space& space);
    192 
    193 // AllocSpace interface.
    194 class AllocSpace {
    195  public:
    196   // Number of bytes currently allocated.
    197   virtual uint64_t GetBytesAllocated() = 0;
    198   // Number of objects currently allocated.
    199   virtual uint64_t GetObjectsAllocated() = 0;
    200 
    201   // Allocate num_bytes without allowing growth. If the allocation
    202   // succeeds, the output parameter bytes_allocated will be set to the
    203   // actually allocated bytes which is >= num_bytes.
    204   // Alloc can be called from multiple threads at the same time and must be thread-safe.
    205   //
    206   // bytes_tl_bulk_allocated - bytes allocated in bulk ahead of time for a thread local allocation,
    207   // if applicable. It can be
    208   // 1) equal to bytes_allocated if it's not a thread local allocation,
    209   // 2) greater than bytes_allocated if it's a thread local
    210   //    allocation that required a new buffer, or
    211   // 3) zero if it's a thread local allocation in an existing
    212   //    buffer.
    213   // This is what is to be added to Heap::num_bytes_allocated_.
    214   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
    215                                 size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
    216 
    217   // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
    218   virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
    219                                             size_t* usable_size,
    220                                             size_t* bytes_tl_bulk_allocated)
    221       REQUIRES(Locks::mutator_lock_) {
    222     return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
    223   }
    224 
    225   // Return the storage space required by obj.
    226   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
    227 
    228   // Returns how many bytes were freed.
    229   virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
    230 
    231   // Returns how many bytes were freed.
    232   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
    233 
    234   // Revoke any sort of thread-local buffers that are used to speed up allocations for the given
    235   // thread, if the alloc space implementation uses any.
    236   // Returns the total free bytes in the revoked thread local runs that's to be subtracted
    237   // from Heap::num_bytes_allocated_ or zero if unnecessary.
    238   virtual size_t RevokeThreadLocalBuffers(Thread* thread) = 0;
    239 
    240   // Revoke any sort of thread-local buffers that are used to speed up allocations for all the
    241   // threads, if the alloc space implementation uses any.
    242   // Returns the total free bytes in the revoked thread local runs that's to be subtracted
    243   // from Heap::num_bytes_allocated_ or zero if unnecessary.
    244   virtual size_t RevokeAllThreadLocalBuffers() = 0;
    245 
    246   virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
    247 
    248  protected:
    249   struct SweepCallbackContext {
    250     SweepCallbackContext(bool swap_bitmaps, space::Space* space);
    251     const bool swap_bitmaps;
    252     space::Space* const space;
    253     Thread* const self;
    254     collector::ObjectBytePair freed;
    255   };
    256 
    257   AllocSpace() {}
    258   virtual ~AllocSpace() {}
    259 
    260  private:
    261   DISALLOW_COPY_AND_ASSIGN(AllocSpace);
    262 };
    263 
    264 // Continuous spaces have bitmaps, and an address range. Although not required, objects within
    265 // continuous spaces can be marked in the card table.
    266 class ContinuousSpace : public Space {
    267  public:
    268   // Address at which the space begins.
    269   uint8_t* Begin() const {
    270     return begin_;
    271   }
    272 
    273   // Current address at which the space ends, which may vary as the space is filled.
    274   uint8_t* End() const {
    275     return end_.LoadRelaxed();
    276   }
    277 
    278   // The end of the address range covered by the space.
    279   uint8_t* Limit() const {
    280     return limit_;
    281   }
    282 
    283   // Change the end of the space. Be careful with use since changing the end of a space to an
    284   // invalid value may break the GC.
    285   void SetEnd(uint8_t* end) {
    286     end_.StoreRelaxed(end);
    287   }
    288 
    289   void SetLimit(uint8_t* limit) {
    290     limit_ = limit;
    291   }
    292 
    293   // Current size of space
    294   size_t Size() const {
    295     return End() - Begin();
    296   }
    297 
    298   virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0;
    299   virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0;
    300 
    301   // Maximum which the mapped space can grow to.
    302   virtual size_t Capacity() const {
    303     return Limit() - Begin();
    304   }
    305 
    306   // Is object within this space? We check to see if the pointer is beyond the end first as
    307   // continuous spaces are iterated over from low to high.
    308   bool HasAddress(const mirror::Object* obj) const {
    309     const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(obj);
    310     return byte_ptr >= Begin() && byte_ptr < Limit();
    311   }
    312 
    313   bool Contains(const mirror::Object* obj) const {
    314     return HasAddress(obj);
    315   }
    316 
    317   virtual bool IsContinuousSpace() const {
    318     return true;
    319   }
    320 
    321   virtual ~ContinuousSpace() {}
    322 
    323  protected:
    324   ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
    325                   uint8_t* begin, uint8_t* end, uint8_t* limit) :
    326       Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
    327   }
    328 
    329   // The beginning of the storage for fast access.
    330   uint8_t* begin_;
    331 
    332   // Current end of the space.
    333   Atomic<uint8_t*> end_;
    334 
    335   // Limit of the space.
    336   uint8_t* limit_;
    337 
    338  private:
    339   DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousSpace);
    340 };
    341 
    342 // A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
    343 // the card table can't cover these objects and so the write barrier shouldn't be triggered. This
    344 // is suitable for use for large primitive arrays.
    345 class DiscontinuousSpace : public Space {
    346  public:
    347   accounting::LargeObjectBitmap* GetLiveBitmap() const {
    348     return live_bitmap_.get();
    349   }
    350 
    351   accounting::LargeObjectBitmap* GetMarkBitmap() const {
    352     return mark_bitmap_.get();
    353   }
    354 
    355   virtual bool IsDiscontinuousSpace() const OVERRIDE {
    356     return true;
    357   }
    358 
    359   virtual ~DiscontinuousSpace() {}
    360 
    361  protected:
    362   DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
    363 
    364   std::unique_ptr<accounting::LargeObjectBitmap> live_bitmap_;
    365   std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_;
    366 
    367  private:
    368   DISALLOW_IMPLICIT_CONSTRUCTORS(DiscontinuousSpace);
    369 };
    370 
    371 class MemMapSpace : public ContinuousSpace {
    372  public:
    373   // Size of the space without a limit on its growth. By default this is just the Capacity, but
    374   // for the allocation space we support starting with a small heap and then extending it.
    375   virtual size_t NonGrowthLimitCapacity() const {
    376     return Capacity();
    377   }
    378 
    379   MemMap* GetMemMap() {
    380     return mem_map_.get();
    381   }
    382 
    383   const MemMap* GetMemMap() const {
    384     return mem_map_.get();
    385   }
    386 
    387   MemMap* ReleaseMemMap() {
    388     return mem_map_.release();
    389   }
    390 
    391  protected:
    392   MemMapSpace(const std::string& name,
    393               MemMap* mem_map,
    394               uint8_t* begin,
    395               uint8_t* end,
    396               uint8_t* limit,
    397               GcRetentionPolicy gc_retention_policy)
    398       : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
    399         mem_map_(mem_map) {
    400   }
    401 
    402   // Underlying storage of the space
    403   std::unique_ptr<MemMap> mem_map_;
    404 
    405  private:
    406   DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
    407 };
    408 
    409 // Used by the heap compaction interface to enable copying from one type of alloc space to another.
    410 class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
    411  public:
    412   bool IsAllocSpace() const OVERRIDE {
    413     return true;
    414   }
    415   AllocSpace* AsAllocSpace() OVERRIDE {
    416     return this;
    417   }
    418 
    419   bool IsContinuousMemMapAllocSpace() const OVERRIDE {
    420     return true;
    421   }
    422   ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
    423     return this;
    424   }
    425 
    426   bool HasBoundBitmaps() const REQUIRES(Locks::heap_bitmap_lock_);
    427   // Make the mark bitmap an alias of the live bitmap. Save the current mark bitmap into
    428   // `temp_bitmap_`, so that we can restore it later in ContinuousMemMapAllocSpace::UnBindBitmaps.
    429   void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_);
    430   // Unalias the mark bitmap from the live bitmap and restore the old mark bitmap.
    431   void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
    432   // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
    433   void SwapBitmaps();
    434 
    435   // Clear the space back to an empty space.
    436   virtual void Clear() = 0;
    437 
    438   accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
    439     return live_bitmap_.get();
    440   }
    441 
    442   accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
    443     return mark_bitmap_.get();
    444   }
    445 
    446   collector::ObjectBytePair Sweep(bool swap_bitmaps);
    447   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
    448 
    449  protected:
    450   std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
    451   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
    452   std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
    453 
    454   ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin,
    455                              uint8_t* end, uint8_t* limit, GcRetentionPolicy gc_retention_policy)
    456       : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
    457   }
    458 
    459  private:
    460   friend class gc::Heap;
    461   DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousMemMapAllocSpace);
    462 };
    463 
    464 }  // namespace space
    465 }  // namespace gc
    466 }  // namespace art
    467 
    468 #endif  // ART_RUNTIME_GC_SPACE_SPACE_H_
    469