Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
     18 #define ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
     19 
     20 #include "base/allocator.h"
     21 #include "dlmalloc_space.h"
     22 #include "safe_map.h"
     23 #include "space.h"
     24 
     25 #include <set>
     26 #include <vector>
     27 
     28 namespace art {
     29 namespace gc {
     30 namespace space {
     31 
     32 class AllocationInfo;
     33 
     34 enum class LargeObjectSpaceType {
     35   kDisabled,
     36   kMap,
     37   kFreeList,
     38 };
     39 
     40 // Abstraction implemented by all large object spaces.
     41 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
     42  public:
     43   SpaceType GetType() const OVERRIDE {
     44     return kSpaceTypeLargeObjectSpace;
     45   }
     46   void SwapBitmaps();
     47   void CopyLiveToMarked();
     48   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
     49   virtual ~LargeObjectSpace() {}
     50 
     51   uint64_t GetBytesAllocated() OVERRIDE {
     52     return num_bytes_allocated_;
     53   }
     54   uint64_t GetObjectsAllocated() OVERRIDE {
     55     return num_objects_allocated_;
     56   }
     57   uint64_t GetTotalBytesAllocated() const {
     58     return total_bytes_allocated_;
     59   }
     60   uint64_t GetTotalObjectsAllocated() const {
     61     return total_objects_allocated_;
     62   }
     63   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
     64   // LargeObjectSpaces don't have thread local state.
     65   size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
     66     return 0U;
     67   }
     68   size_t RevokeAllThreadLocalBuffers() OVERRIDE {
     69     return 0U;
     70   }
     71   bool IsAllocSpace() const OVERRIDE {
     72     return true;
     73   }
     74   AllocSpace* AsAllocSpace() OVERRIDE {
     75     return this;
     76   }
     77   collector::ObjectBytePair Sweep(bool swap_bitmaps);
     78   virtual bool CanMoveObjects() const OVERRIDE {
     79     return false;
     80   }
     81   // Current address at which the space begins, which may vary as the space is filled.
     82   uint8_t* Begin() const {
     83     return begin_;
     84   }
     85   // Current address at which the space ends, which may vary as the space is filled.
     86   uint8_t* End() const {
     87     return end_;
     88   }
     89   // Current size of space
     90   size_t Size() const {
     91     return End() - Begin();
     92   }
     93   // Return true if we contain the specified address.
     94   bool Contains(const mirror::Object* obj) const {
     95     const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
     96     return Begin() <= byte_obj && byte_obj < End();
     97   }
     98   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
     99       REQUIRES_SHARED(Locks::mutator_lock_);
    100 
    101   // Return true if the large object is a zygote large object. Potentially slow.
    102   virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
    103   // Called when we create the zygote space, mark all existing large objects as zygote large
    104   // objects.
    105   virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
    106 
    107   // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
    108   // End() from different allocations.
    109   virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
    110 
    111  protected:
    112   explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
    113   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
    114 
    115   // Approximate number of bytes which have been allocated into the space.
    116   uint64_t num_bytes_allocated_;
    117   uint64_t num_objects_allocated_;
    118   uint64_t total_bytes_allocated_;
    119   uint64_t total_objects_allocated_;
    120   // Begin and end, may change as more large objects are allocated.
    121   uint8_t* begin_;
    122   uint8_t* end_;
    123 
    124   friend class Space;
    125 
    126  private:
    127   DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
    128 };
    129 
    130 // A discontinuous large object space implemented by individual mmap/munmap calls.
    131 class LargeObjectMapSpace : public LargeObjectSpace {
    132  public:
    133   // Creates a large object space. Allocations into the large object space use memory maps instead
    134   // of malloc.
    135   static LargeObjectMapSpace* Create(const std::string& name);
    136   // Return the storage space required by obj.
    137   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_);
    138   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
    139                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
    140       REQUIRES(!lock_);
    141   size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
    142   void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
    143   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
    144   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
    145 
    146   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
    147 
    148  protected:
    149   struct LargeObject {
    150     MemMap* mem_map;
    151     bool is_zygote;
    152   };
    153   explicit LargeObjectMapSpace(const std::string& name);
    154   virtual ~LargeObjectMapSpace() {}
    155 
    156   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
    157   void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
    158 
    159   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
    160   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    161   AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
    162       GUARDED_BY(lock_);
    163 };
    164 
    165 // A continuous large object space with a free-list to handle holes.
    166 class FreeListSpace FINAL : public LargeObjectSpace {
    167  public:
    168   static constexpr size_t kAlignment = kPageSize;
    169 
    170   virtual ~FreeListSpace();
    171   static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
    172   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
    173       REQUIRES(lock_);
    174   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
    175                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
    176       OVERRIDE REQUIRES(!lock_);
    177   size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
    178   void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
    179   void Dump(std::ostream& os) const REQUIRES(!lock_);
    180 
    181   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
    182 
    183  protected:
    184   FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
    185   size_t GetSlotIndexForAddress(uintptr_t address) const {
    186     DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
    187     return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
    188   }
    189   size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
    190   AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
    191   const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
    192   uintptr_t GetAllocationAddressForSlot(size_t slot) const {
    193     return reinterpret_cast<uintptr_t>(Begin()) + slot * kAlignment;
    194   }
    195   uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
    196     return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
    197   }
    198   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
    199   void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
    200   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
    201   void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
    202 
    203   class SortByPrevFree {
    204    public:
    205     bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
    206   };
    207   typedef std::set<AllocationInfo*, SortByPrevFree,
    208                    TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks;
    209 
    210   // There is not footer for any allocations at the end of the space, so we keep track of how much
    211   // free space there is at the end manually.
    212   std::unique_ptr<MemMap> mem_map_;
    213   // Side table for allocation info, one per page.
    214   std::unique_ptr<MemMap> allocation_info_map_;
    215   AllocationInfo* allocation_info_;
    216 
    217   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    218   // Free bytes at the end of the space.
    219   size_t free_end_ GUARDED_BY(lock_);
    220   FreeBlocks free_blocks_ GUARDED_BY(lock_);
    221 };
    222 
    223 }  // namespace space
    224 }  // namespace gc
    225 }  // namespace art
    226 
    227 #endif  // ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
    228