Home | History | Annotate | Download | only in space
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
     18 #define ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
     19 
     20 #include "space.h"
     21 
     22 #include <iosfwd>
     23 
     24 #include "base/memory_tool.h"
     25 #include "base/mutex.h"
     26 
     27 namespace art {
     28 namespace gc {
     29 
     30 namespace collector {
     31 class MarkSweep;
     32 }  // namespace collector
     33 
     34 namespace space {
     35 
     36 class ZygoteSpace;
     37 
     38 // A common parent of DlMallocSpace and RosAllocSpace.
     39 class MallocSpace : public ContinuousMemMapAllocSpace {
     40  public:
     41   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
     42 
     43   SpaceType GetType() const override {
     44     return kSpaceTypeMallocSpace;
     45   }
     46 
     47   // Allocate num_bytes allowing the underlying space to grow.
     48   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
     49                                           size_t* bytes_allocated, size_t* usable_size,
     50                                           size_t* bytes_tl_bulk_allocated) = 0;
     51   // Allocate num_bytes without allowing the underlying space to grow.
     52   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
     53                         size_t* usable_size, size_t* bytes_tl_bulk_allocated) override = 0;
     54   // Return the storage space required by obj. If usable_size isn't null then it is set to the
     55   // amount of the storage space that may be used by obj.
     56   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override = 0;
     57   size_t Free(Thread* self, mirror::Object* ptr) override
     58       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
     59   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
     60       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
     61 
     62   // Returns the maximum bytes that could be allocated for the given
     63   // size in bulk, that is the maximum value for the
     64   // bytes_allocated_bulk out param returned by MallocSpace::Alloc().
     65   virtual size_t MaxBytesBulkAllocatedFor(size_t num_bytes) = 0;
     66 
     67 #ifndef NDEBUG
     68   virtual void CheckMoreCoreForPrecondition() {}  // to be overridden in the debug build.
     69 #else
     70   void CheckMoreCoreForPrecondition() {}  // no-op in the non-debug build.
     71 #endif
     72 
     73   void* MoreCore(intptr_t increment);
     74 
     75   // Hands unused pages back to the system.
     76   virtual size_t Trim() = 0;
     77 
     78   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
     79   // in use, indicated by num_bytes equaling zero.
     80   virtual void Walk(WalkCallback callback, void* arg) = 0;
     81 
     82   // Returns the number of bytes that the space has currently obtained from the system. This is
     83   // greater or equal to the amount of live data in the space.
     84   virtual size_t GetFootprint() = 0;
     85 
     86   // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
     87   virtual size_t GetFootprintLimit() = 0;
     88 
     89   // Set the maximum number of bytes that the heap is allowed to obtain from the system via
     90   // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
     91   // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
     92   virtual void SetFootprintLimit(size_t limit) = 0;
     93 
     94   // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
     95   // maximum reserved size of the heap.
     96   void ClearGrowthLimit() {
     97     growth_limit_ = NonGrowthLimitCapacity();
     98   }
     99 
    100   // Override capacity so that we only return the possibly limited capacity
    101   size_t Capacity() const override {
    102     return growth_limit_;
    103   }
    104 
    105   // The total amount of memory reserved for the alloc space.
    106   size_t NonGrowthLimitCapacity() const override {
    107     return GetMemMap()->Size();
    108   }
    109 
    110   // Change the non growth limit capacity by shrinking or expanding the map. Currently, only
    111   // shrinking is supported.
    112   void ClampGrowthLimit();
    113 
    114   void Dump(std::ostream& os) const override;
    115 
    116   void SetGrowthLimit(size_t growth_limit);
    117 
    118   virtual MallocSpace* CreateInstance(MemMap&& mem_map,
    119                                       const std::string& name,
    120                                       void* allocator,
    121                                       uint8_t* begin,
    122                                       uint8_t* end,
    123                                       uint8_t* limit,
    124                                       size_t growth_limit,
    125                                       bool can_move_objects) = 0;
    126 
    127   // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
    128   // the low memory mode argument specifies that the heap wishes the created space to be more
    129   // aggressive in releasing unused pages. Invalidates the space its called on.
    130   ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
    131                                  MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
    132   uint64_t GetBytesAllocated() override = 0;
    133   uint64_t GetObjectsAllocated() override = 0;
    134 
    135   // Returns the class of a recently freed object.
    136   mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
    137 
    138   bool CanMoveObjects() const override {
    139     return can_move_objects_;
    140   }
    141 
    142   void DisableMovingObjects() {
    143     can_move_objects_ = false;
    144   }
    145 
    146  protected:
    147   MallocSpace(const std::string& name,
    148               MemMap&& mem_map,
    149               uint8_t* begin,
    150               uint8_t* end,
    151               uint8_t* limit,
    152               size_t growth_limit,
    153               bool create_bitmaps,
    154               bool can_move_objects,
    155               size_t starting_size,
    156               size_t initial_size);
    157 
    158   static MemMap CreateMemMap(const std::string& name,
    159                              size_t starting_size,
    160                              size_t* initial_size,
    161                              size_t* growth_limit,
    162                              size_t* capacity);
    163 
    164   // When true the low memory mode argument specifies that the heap wishes the created allocator to
    165   // be more aggressive in releasing unused pages.
    166   virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
    167                                 size_t maximum_size, bool low_memory_mode) = 0;
    168 
    169   virtual void RegisterRecentFree(mirror::Object* ptr)
    170       REQUIRES_SHARED(Locks::mutator_lock_)
    171       REQUIRES(lock_);
    172 
    173   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
    174     return &SweepCallback;
    175   }
    176 
    177   // Recent allocation buffer.
    178   static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
    179   static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
    180   std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
    181   size_t recent_free_pos_;
    182 
    183   static size_t bitmap_index_;
    184 
    185   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
    186   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    187 
    188   // The capacity of the alloc space until such time that ClearGrowthLimit is called.
    189   // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
    190   // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
    191   // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
    192   // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
    193   // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
    194   // one time by a call to ClearGrowthLimit.
    195   size_t growth_limit_;
    196 
    197   // True if objects in the space are movable.
    198   bool can_move_objects_;
    199 
    200   // Starting and initial sized, used when you reset the space.
    201   const size_t starting_size_;
    202   const size_t initial_size_;
    203 
    204  private:
    205   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
    206       REQUIRES_SHARED(Locks::mutator_lock_);
    207 
    208   DISALLOW_COPY_AND_ASSIGN(MallocSpace);
    209 };
    210 
    211 }  // namespace space
    212 }  // namespace gc
    213 }  // namespace art
    214 
    215 #endif  // ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
    216