Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_HEAP_H_
     18 #define ART_RUNTIME_GC_HEAP_H_
     19 
     20 #include <iosfwd>
     21 #include <string>
     22 #include <unordered_set>
     23 #include <vector>
     24 
     25 #include <android-base/logging.h>
     26 
     27 #include "allocator_type.h"
     28 #include "arch/instruction_set.h"
     29 #include "base/atomic.h"
     30 #include "base/macros.h"
     31 #include "base/mutex.h"
     32 #include "base/runtime_debug.h"
     33 #include "base/safe_map.h"
     34 #include "base/time_utils.h"
     35 #include "gc/collector/gc_type.h"
     36 #include "gc/collector/iteration.h"
     37 #include "gc/collector_type.h"
     38 #include "gc/gc_cause.h"
     39 #include "gc/space/large_object_space.h"
     40 #include "globals.h"
     41 #include "handle.h"
     42 #include "obj_ptr.h"
     43 #include "offsets.h"
     44 #include "process_state.h"
     45 #include "read_barrier_config.h"
     46 #include "verify_object.h"
     47 
     48 namespace art {
     49 
     50 class ConditionVariable;
     51 class IsMarkedVisitor;
     52 class Mutex;
     53 class RootVisitor;
     54 class StackVisitor;
     55 class Thread;
     56 class ThreadPool;
     57 class TimingLogger;
     58 class VariableSizedHandleScope;
     59 
     60 namespace mirror {
     61 class Class;
     62 class Object;
     63 }  // namespace mirror
     64 
     65 namespace gc {
     66 
     67 class AllocationListener;
     68 class AllocRecordObjectMap;
     69 class GcPauseListener;
     70 class ReferenceProcessor;
     71 class TaskProcessor;
     72 class Verification;
     73 
     74 namespace accounting {
     75 template <typename T> class AtomicStack;
     76 typedef AtomicStack<mirror::Object> ObjectStack;
     77 class CardTable;
     78 class HeapBitmap;
     79 class ModUnionTable;
     80 class ReadBarrierTable;
     81 class RememberedSet;
     82 }  // namespace accounting
     83 
     84 namespace collector {
     85 class ConcurrentCopying;
     86 class GarbageCollector;
     87 class MarkCompact;
     88 class MarkSweep;
     89 class SemiSpace;
     90 }  // namespace collector
     91 
     92 namespace allocator {
     93 class RosAlloc;
     94 }  // namespace allocator
     95 
     96 namespace space {
     97 class AllocSpace;
     98 class BumpPointerSpace;
     99 class ContinuousMemMapAllocSpace;
    100 class DiscontinuousSpace;
    101 class DlMallocSpace;
    102 class ImageSpace;
    103 class LargeObjectSpace;
    104 class MallocSpace;
    105 class RegionSpace;
    106 class RosAllocSpace;
    107 class Space;
    108 class ZygoteSpace;
    109 }  // namespace space
    110 
    111 enum HomogeneousSpaceCompactResult {
    112   // Success.
    113   kSuccess,
    114   // Reject due to disabled moving GC.
    115   kErrorReject,
    116   // Unsupported due to the current configuration.
    117   kErrorUnsupported,
    118   // System is shutting down.
    119   kErrorVMShuttingDown,
    120 };
    121 
    122 // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
    123 static constexpr bool kUseRosAlloc = true;
    124 
    125 // If true, use thread-local allocation stack.
    126 static constexpr bool kUseThreadLocalAllocationStack = true;
    127 
    128 class Heap {
    129  public:
    130   // If true, measure the total allocation time.
    131   static constexpr size_t kDefaultStartingSize = kPageSize;
    132   static constexpr size_t kDefaultInitialSize = 2 * MB;
    133   static constexpr size_t kDefaultMaximumSize = 256 * MB;
    134   static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
    135   static constexpr size_t kDefaultMaxFree = 2 * MB;
    136   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
    137   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
    138   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
    139   static constexpr size_t kDefaultTLABSize = 32 * KB;
    140   static constexpr double kDefaultTargetUtilization = 0.5;
    141   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
    142   // Primitive arrays larger than this size are put in the large object space.
    143   static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
    144   static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
    145   // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
    146   static constexpr bool kDefaultEnableParallelGC = false;
    147   static uint8_t* const kPreferredAllocSpaceBegin;
    148 
    149   // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
    150   // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
    151   static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
    152       USE_ART_LOW_4G_ALLOCATOR ?
    153           space::LargeObjectSpaceType::kFreeList
    154         : space::LargeObjectSpaceType::kMap;
    155 
    156   // Used so that we don't overflow the allocation time atomic integer.
    157   static constexpr size_t kTimeAdjust = 1024;
    158 
    159   // How often we allow heap trimming to happen (nanoseconds).
    160   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
    161   // How long we wait after a transition request to perform a collector transition (nanoseconds).
    162   static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
    163   // Whether the transition-wait applies or not. Zero wait will stress the
    164   // transition code and collector, but increases jank probability.
    165   DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition);
    166 
    167   // Create a heap with the requested sizes. The possible empty
    168   // image_file_names names specify Spaces to load based on
    169   // ImageWriter output.
    170   Heap(size_t initial_size,
    171        size_t growth_limit,
    172        size_t min_free,
    173        size_t max_free,
    174        double target_utilization,
    175        double foreground_heap_growth_multiplier,
    176        size_t capacity,
    177        size_t non_moving_space_capacity,
    178        const std::string& original_image_file_name,
    179        InstructionSet image_instruction_set,
    180        CollectorType foreground_collector_type,
    181        CollectorType background_collector_type,
    182        space::LargeObjectSpaceType large_object_space_type,
    183        size_t large_object_threshold,
    184        size_t parallel_gc_threads,
    185        size_t conc_gc_threads,
    186        bool low_memory_mode,
    187        size_t long_pause_threshold,
    188        size_t long_gc_threshold,
    189        bool ignore_max_footprint,
    190        bool use_tlab,
    191        bool verify_pre_gc_heap,
    192        bool verify_pre_sweeping_heap,
    193        bool verify_post_gc_heap,
    194        bool verify_pre_gc_rosalloc,
    195        bool verify_pre_sweeping_rosalloc,
    196        bool verify_post_gc_rosalloc,
    197        bool gc_stress_mode,
    198        bool measure_gc_performance,
    199        bool use_homogeneous_space_compaction,
    200        uint64_t min_interval_homogeneous_space_compaction_by_oom);
    201 
    202   ~Heap();
    203 
    204   // Allocates and initializes storage for an object instance.
    205   template <bool kInstrumented, typename PreFenceVisitor>
    206   mirror::Object* AllocObject(Thread* self,
    207                               ObjPtr<mirror::Class> klass,
    208                               size_t num_bytes,
    209                               const PreFenceVisitor& pre_fence_visitor)
    210       REQUIRES_SHARED(Locks::mutator_lock_)
    211       REQUIRES(!*gc_complete_lock_,
    212                !*pending_task_lock_,
    213                !*backtrace_lock_,
    214                !Roles::uninterruptible_) {
    215     return AllocObjectWithAllocator<kInstrumented, true>(self,
    216                                                          klass,
    217                                                          num_bytes,
    218                                                          GetCurrentAllocator(),
    219                                                          pre_fence_visitor);
    220   }
    221 
    222   template <bool kInstrumented, typename PreFenceVisitor>
    223   mirror::Object* AllocNonMovableObject(Thread* self,
    224                                         ObjPtr<mirror::Class> klass,
    225                                         size_t num_bytes,
    226                                         const PreFenceVisitor& pre_fence_visitor)
    227       REQUIRES_SHARED(Locks::mutator_lock_)
    228       REQUIRES(!*gc_complete_lock_,
    229                !*pending_task_lock_,
    230                !*backtrace_lock_,
    231                !Roles::uninterruptible_) {
    232     return AllocObjectWithAllocator<kInstrumented, true>(self,
    233                                                          klass,
    234                                                          num_bytes,
    235                                                          GetCurrentNonMovingAllocator(),
    236                                                          pre_fence_visitor);
    237   }
    238 
    239   template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
    240   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
    241                                                          ObjPtr<mirror::Class> klass,
    242                                                          size_t byte_count,
    243                                                          AllocatorType allocator,
    244                                                          const PreFenceVisitor& pre_fence_visitor)
    245       REQUIRES_SHARED(Locks::mutator_lock_)
    246       REQUIRES(!*gc_complete_lock_,
    247                !*pending_task_lock_,
    248                !*backtrace_lock_,
    249                !Roles::uninterruptible_);
    250 
    251   AllocatorType GetCurrentAllocator() const {
    252     return current_allocator_;
    253   }
    254 
    255   AllocatorType GetCurrentNonMovingAllocator() const {
    256     return current_non_moving_allocator_;
    257   }
    258 
    259   // Visit all of the live objects in the heap.
    260   template <typename Visitor>
    261   ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
    262       REQUIRES_SHARED(Locks::mutator_lock_)
    263       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
    264   template <typename Visitor>
    265   ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
    266       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
    267 
    268   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
    269       REQUIRES_SHARED(Locks::mutator_lock_);
    270 
    271   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
    272       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
    273   void RegisterNativeFree(JNIEnv* env, size_t bytes);
    274 
    275   // Change the allocator, updates entrypoints.
    276   void ChangeAllocator(AllocatorType allocator)
    277       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
    278 
    279   // Transition the garbage collector during runtime, may copy objects from one space to another.
    280   void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_);
    281 
    282   // Change the collector to be one of the possible options (MS, CMS, SS).
    283   void ChangeCollector(CollectorType collector_type)
    284       REQUIRES(Locks::mutator_lock_);
    285 
    286   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
    287   // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
    288   // proper lock ordering for it.
    289   void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
    290 
    291   // Check sanity of all live references.
    292   void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
    293   // Returns how many failures occured.
    294   size_t VerifyHeapReferences(bool verify_referents = true)
    295       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
    296   bool VerifyMissingCardMarks()
    297       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
    298 
    299   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
    300   // and doesn't abort on error, allowing the caller to report more
    301   // meaningful diagnostics.
    302   bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
    303 
    304   // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
    305   // very slow.
    306   bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
    307       REQUIRES_SHARED(Locks::mutator_lock_);
    308 
    309   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
    310   // Requires the heap lock to be held.
    311   bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
    312                           bool search_allocation_stack = true,
    313                           bool search_live_stack = true,
    314                           bool sorted = false)
    315       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
    316 
    317   // Returns true if there is any chance that the object (obj) will move.
    318   bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
    319 
    320   // Enables us to compacting GC until objects are released.
    321   void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
    322   void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
    323 
    324   // Temporarily disable thread flip for JNI critical calls.
    325   void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
    326   void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
    327   void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
    328   void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
    329 
    330   // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
    331   // Mutator lock is required for GetContinuousSpaces.
    332   void ClearMarkedObjects()
    333       REQUIRES(Locks::heap_bitmap_lock_)
    334       REQUIRES_SHARED(Locks::mutator_lock_);
    335 
    336   // Initiates an explicit garbage collection.
    337   void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit)
    338       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
    339 
    340   // Does a concurrent GC, should only be called by the GC daemon thread
    341   // through runtime.
    342   void ConcurrentGC(Thread* self, GcCause cause, bool force_full)
    343       REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
    344 
    345   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
    346   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
    347   void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
    348                       bool use_is_assignable_from,
    349                       uint64_t* counts)
    350       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
    351       REQUIRES_SHARED(Locks::mutator_lock_);
    352 
    353   // Implements VMDebug.getInstancesOfClasses and JDWP RT_Instances.
    354   void GetInstances(VariableSizedHandleScope& scope,
    355                     Handle<mirror::Class> c,
    356                     bool use_is_assignable_from,
    357                     int32_t max_count,
    358                     std::vector<Handle<mirror::Object>>& instances)
    359       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
    360       REQUIRES_SHARED(Locks::mutator_lock_);
    361 
    362   // Implements JDWP OR_ReferringObjects.
    363   void GetReferringObjects(VariableSizedHandleScope& scope,
    364                            Handle<mirror::Object> o,
    365                            int32_t max_count,
    366                            std::vector<Handle<mirror::Object>>& referring_objects)
    367       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
    368       REQUIRES_SHARED(Locks::mutator_lock_);
    369 
    370   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
    371   // implement dalvik.system.VMRuntime.clearGrowthLimit.
    372   void ClearGrowthLimit();
    373 
    374   // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
    375   // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
    376   void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
    377 
    378   // Target ideal heap utilization ratio, implements
    379   // dalvik.system.VMRuntime.getTargetHeapUtilization.
    380   double GetTargetHeapUtilization() const {
    381     return target_utilization_;
    382   }
    383 
    384   // Data structure memory usage tracking.
    385   void RegisterGCAllocation(size_t bytes);
    386   void RegisterGCDeAllocation(size_t bytes);
    387 
    388   // Set the heap's private space pointers to be the same as the space based on it's type. Public
    389   // due to usage by tests.
    390   void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
    391       REQUIRES(!Locks::heap_bitmap_lock_);
    392   void AddSpace(space::Space* space)
    393       REQUIRES(!Locks::heap_bitmap_lock_)
    394       REQUIRES(Locks::mutator_lock_);
    395   void RemoveSpace(space::Space* space)
    396     REQUIRES(!Locks::heap_bitmap_lock_)
    397     REQUIRES(Locks::mutator_lock_);
    398 
    399   // Set target ideal heap utilization ratio, implements
    400   // dalvik.system.VMRuntime.setTargetHeapUtilization.
    401   void SetTargetHeapUtilization(float target);
    402 
    403   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
    404   // from the system. Doesn't allow the space to exceed its growth limit.
    405   void SetIdealFootprint(size_t max_allowed_footprint);
    406 
    407   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
    408   // waited for.
    409   collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
    410 
    411   // Update the heap's process state to a new value, may cause compaction to occur.
    412   void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
    413       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
    414 
    415   bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
    416     // No lock since vector empty is thread safe.
    417     return !continuous_spaces_.empty();
    418   }
    419 
    420   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
    421       REQUIRES_SHARED(Locks::mutator_lock_) {
    422     return continuous_spaces_;
    423   }
    424 
    425   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
    426     return discontinuous_spaces_;
    427   }
    428 
    429   const collector::Iteration* GetCurrentGcIteration() const {
    430     return &current_gc_iteration_;
    431   }
    432   collector::Iteration* GetCurrentGcIteration() {
    433     return &current_gc_iteration_;
    434   }
    435 
    436   // Enable verification of object references when the runtime is sufficiently initialized.
    437   void EnableObjectValidation() {
    438     verify_object_mode_ = kVerifyObjectSupport;
    439     if (verify_object_mode_ > kVerifyObjectModeDisabled) {
    440       VerifyHeap();
    441     }
    442   }
    443 
    444   // Disable object reference verification for image writing.
    445   void DisableObjectValidation() {
    446     verify_object_mode_ = kVerifyObjectModeDisabled;
    447   }
    448 
    449   // Other checks may be performed if we know the heap should be in a sane state.
    450   bool IsObjectValidationEnabled() const {
    451     return verify_object_mode_ > kVerifyObjectModeDisabled;
    452   }
    453 
    454   // Returns true if low memory mode is enabled.
    455   bool IsLowMemoryMode() const {
    456     return low_memory_mode_;
    457   }
    458 
    459   // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
    460   // Scales heap growth, min free, and max free.
    461   double HeapGrowthMultiplier() const;
    462 
    463   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
    464   // free-list backed space.
    465   void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
    466 
    467   // Record the bytes freed by thread-local buffer revoke.
    468   void RecordFreeRevoke();
    469 
    470   // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
    471   // The call is not needed if null is stored in the field.
    472   ALWAYS_INLINE void WriteBarrierField(ObjPtr<mirror::Object> dst,
    473                                        MemberOffset offset,
    474                                        ObjPtr<mirror::Object> new_value)
    475       REQUIRES_SHARED(Locks::mutator_lock_);
    476 
    477   // Write barrier for array operations that update many field positions
    478   ALWAYS_INLINE void WriteBarrierArray(ObjPtr<mirror::Object> dst,
    479                                        int start_offset,
    480                                        // TODO: element_count or byte_count?
    481                                        size_t length)
    482       REQUIRES_SHARED(Locks::mutator_lock_);
    483 
    484   ALWAYS_INLINE void WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj)
    485       REQUIRES_SHARED(Locks::mutator_lock_);
    486 
    487   accounting::CardTable* GetCardTable() const {
    488     return card_table_.get();
    489   }
    490 
    491   accounting::ReadBarrierTable* GetReadBarrierTable() const {
    492     return rb_table_.get();
    493   }
    494 
    495   void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
    496 
    497   // Returns the number of bytes currently allocated.
    498   size_t GetBytesAllocated() const {
    499     return num_bytes_allocated_.LoadSequentiallyConsistent();
    500   }
    501 
    502   // Returns the number of objects currently allocated.
    503   size_t GetObjectsAllocated() const
    504       REQUIRES(!Locks::heap_bitmap_lock_);
    505 
    506   // Returns the total number of objects allocated since the heap was created.
    507   uint64_t GetObjectsAllocatedEver() const;
    508 
    509   // Returns the total number of bytes allocated since the heap was created.
    510   uint64_t GetBytesAllocatedEver() const;
    511 
    512   // Returns the total number of objects freed since the heap was created.
    513   uint64_t GetObjectsFreedEver() const {
    514     return total_objects_freed_ever_;
    515   }
    516 
    517   // Returns the total number of bytes freed since the heap was created.
    518   uint64_t GetBytesFreedEver() const {
    519     return total_bytes_freed_ever_;
    520   }
    521 
    522   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
    523   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
    524   // were specified. Android apps start with a growth limit (small heap size) which is
    525   // cleared/extended for large apps.
    526   size_t GetMaxMemory() const {
    527     // There is some race conditions in the allocation code that can cause bytes allocated to
    528     // become larger than growth_limit_ in rare cases.
    529     return std::max(GetBytesAllocated(), growth_limit_);
    530   }
    531 
    532   // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
    533   // consumed by an application.
    534   size_t GetTotalMemory() const;
    535 
    536   // Returns approximately how much free memory we have until the next GC happens.
    537   size_t GetFreeMemoryUntilGC() const {
    538     return max_allowed_footprint_ - GetBytesAllocated();
    539   }
    540 
    541   // Returns approximately how much free memory we have until the next OOME happens.
    542   size_t GetFreeMemoryUntilOOME() const {
    543     return growth_limit_ - GetBytesAllocated();
    544   }
    545 
    546   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
    547   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
    548   size_t GetFreeMemory() const {
    549     size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
    550     size_t total_memory = GetTotalMemory();
    551     // Make sure we don't get a negative number.
    552     return total_memory - std::min(total_memory, byte_allocated);
    553   }
    554 
    555   // Get the space that corresponds to an object's address. Current implementation searches all
    556   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
    557   // TODO: consider using faster data structure like binary tree.
    558   space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
    559       REQUIRES_SHARED(Locks::mutator_lock_);
    560 
    561   space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
    562       REQUIRES_SHARED(Locks::mutator_lock_);
    563 
    564   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
    565                                                               bool fail_ok) const
    566       REQUIRES_SHARED(Locks::mutator_lock_);
    567 
    568   space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
    569       REQUIRES_SHARED(Locks::mutator_lock_);
    570 
    571   space::Space* FindSpaceFromAddress(const void* ptr) const
    572       REQUIRES_SHARED(Locks::mutator_lock_);
    573 
    574   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
    575 
    576   // Do a pending collector transition.
    577   void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
    578 
    579   // Deflate monitors, ... and trim the spaces.
    580   void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
    581 
    582   void RevokeThreadLocalBuffers(Thread* thread);
    583   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
    584   void RevokeAllThreadLocalBuffers();
    585   void AssertThreadLocalBuffersAreRevoked(Thread* thread);
    586   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
    587   void RosAllocVerification(TimingLogger* timings, const char* name)
    588       REQUIRES(Locks::mutator_lock_);
    589 
    590   accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
    591     return live_bitmap_.get();
    592   }
    593 
    594   accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
    595     return mark_bitmap_.get();
    596   }
    597 
    598   accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
    599     return live_stack_.get();
    600   }
    601 
    602   void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
    603 
    604   // Mark and empty stack.
    605   void FlushAllocStack()
    606       REQUIRES_SHARED(Locks::mutator_lock_)
    607       REQUIRES(Locks::heap_bitmap_lock_);
    608 
    609   // Revoke all the thread-local allocation stacks.
    610   void RevokeAllThreadLocalAllocationStacks(Thread* self)
    611       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
    612 
    613   // Mark all the objects in the allocation stack in the specified bitmap.
    614   // TODO: Refactor?
    615   void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
    616                       accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
    617                       accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
    618                       accounting::ObjectStack* stack)
    619       REQUIRES_SHARED(Locks::mutator_lock_)
    620       REQUIRES(Locks::heap_bitmap_lock_);
    621 
    622   // Mark the specified allocation stack as live.
    623   void MarkAllocStackAsLive(accounting::ObjectStack* stack)
    624       REQUIRES_SHARED(Locks::mutator_lock_)
    625       REQUIRES(Locks::heap_bitmap_lock_);
    626 
    627   // Unbind any bound bitmaps.
    628   void UnBindBitmaps()
    629       REQUIRES(Locks::heap_bitmap_lock_)
    630       REQUIRES_SHARED(Locks::mutator_lock_);
    631 
    632   // Returns the boot image spaces. There may be multiple boot image spaces.
    633   const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
    634     return boot_image_spaces_;
    635   }
    636 
    637   bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
    638       REQUIRES_SHARED(Locks::mutator_lock_);
    639 
    640   bool IsInBootImageOatFile(const void* p) const
    641       REQUIRES_SHARED(Locks::mutator_lock_);
    642 
    643   void GetBootImagesSize(uint32_t* boot_image_begin,
    644                          uint32_t* boot_image_end,
    645                          uint32_t* boot_oat_begin,
    646                          uint32_t* boot_oat_end);
    647 
    648   // Permenantly disable moving garbage collection.
    649   void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
    650 
    651   space::DlMallocSpace* GetDlMallocSpace() const {
    652     return dlmalloc_space_;
    653   }
    654 
    655   space::RosAllocSpace* GetRosAllocSpace() const {
    656     return rosalloc_space_;
    657   }
    658 
    659   // Return the corresponding rosalloc space.
    660   space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
    661       REQUIRES_SHARED(Locks::mutator_lock_);
    662 
    663   space::MallocSpace* GetNonMovingSpace() const {
    664     return non_moving_space_;
    665   }
    666 
    667   space::LargeObjectSpace* GetLargeObjectsSpace() const {
    668     return large_object_space_;
    669   }
    670 
    671   // Returns the free list space that may contain movable objects (the
    672   // one that's not the non-moving space), either rosalloc_space_ or
    673   // dlmalloc_space_.
    674   space::MallocSpace* GetPrimaryFreeListSpace() {
    675     if (kUseRosAlloc) {
    676       DCHECK(rosalloc_space_ != nullptr);
    677       // reinterpret_cast is necessary as the space class hierarchy
    678       // isn't known (#included) yet here.
    679       return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
    680     } else {
    681       DCHECK(dlmalloc_space_ != nullptr);
    682       return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
    683     }
    684   }
    685 
    686   void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
    687   std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
    688 
    689   // GC performance measuring
    690   void DumpGcPerformanceInfo(std::ostream& os)
    691       REQUIRES(!*gc_complete_lock_);
    692   void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
    693 
    694   // Thread pool.
    695   void CreateThreadPool();
    696   void DeleteThreadPool();
    697   ThreadPool* GetThreadPool() {
    698     return thread_pool_.get();
    699   }
    700   size_t GetParallelGCThreadCount() const {
    701     return parallel_gc_threads_;
    702   }
    703   size_t GetConcGCThreadCount() const {
    704     return conc_gc_threads_;
    705   }
    706   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
    707   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
    708 
    709   accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
    710   void AddRememberedSet(accounting::RememberedSet* remembered_set);
    711   // Also deletes the remebered set.
    712   void RemoveRememberedSet(space::Space* space);
    713 
    714   bool IsCompilingBoot() const;
    715   bool HasBootImageSpace() const {
    716     return !boot_image_spaces_.empty();
    717   }
    718 
    719   ReferenceProcessor* GetReferenceProcessor() {
    720     return reference_processor_.get();
    721   }
    722   TaskProcessor* GetTaskProcessor() {
    723     return task_processor_.get();
    724   }
    725 
    726   bool HasZygoteSpace() const {
    727     return zygote_space_ != nullptr;
    728   }
    729 
    730   collector::ConcurrentCopying* ConcurrentCopyingCollector() {
    731     return concurrent_copying_collector_;
    732   }
    733 
    734   CollectorType CurrentCollectorType() {
    735     return collector_type_;
    736   }
    737 
    738   bool IsGcConcurrentAndMoving() const {
    739     if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
    740       // Assume no transition when a concurrent moving collector is used.
    741       DCHECK_EQ(collector_type_, foreground_collector_type_);
    742       return true;
    743     }
    744     return false;
    745   }
    746 
    747   bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
    748     MutexLock mu(self, *gc_complete_lock_);
    749     return disable_moving_gc_count_ > 0;
    750   }
    751 
    752   // Request an asynchronous trim.
    753   void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
    754 
    755   // Request asynchronous GC.
    756   void RequestConcurrentGC(Thread* self, GcCause cause, bool force_full)
    757       REQUIRES(!*pending_task_lock_);
    758 
    759   // Whether or not we may use a garbage collector, used so that we only create collectors we need.
    760   bool MayUseCollector(CollectorType type) const;
    761 
    762   // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
    763   void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
    764     min_interval_homogeneous_space_compaction_by_oom_ = interval;
    765   }
    766 
    767   // Helpers for android.os.Debug.getRuntimeStat().
    768   uint64_t GetGcCount() const;
    769   uint64_t GetGcTime() const;
    770   uint64_t GetBlockingGcCount() const;
    771   uint64_t GetBlockingGcTime() const;
    772   void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
    773   void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
    774 
    775   // Allocation tracking support
    776   // Callers to this function use double-checked locking to ensure safety on allocation_records_
    777   bool IsAllocTrackingEnabled() const {
    778     return alloc_tracking_enabled_.LoadRelaxed();
    779   }
    780 
    781   void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
    782     alloc_tracking_enabled_.StoreRelaxed(enabled);
    783   }
    784 
    785   AllocRecordObjectMap* GetAllocationRecords() const
    786       REQUIRES(Locks::alloc_tracker_lock_) {
    787     return allocation_records_.get();
    788   }
    789 
    790   void SetAllocationRecords(AllocRecordObjectMap* records)
    791       REQUIRES(Locks::alloc_tracker_lock_);
    792 
    793   void VisitAllocationRecords(RootVisitor* visitor) const
    794       REQUIRES_SHARED(Locks::mutator_lock_)
    795       REQUIRES(!Locks::alloc_tracker_lock_);
    796 
    797   void SweepAllocationRecords(IsMarkedVisitor* visitor) const
    798       REQUIRES_SHARED(Locks::mutator_lock_)
    799       REQUIRES(!Locks::alloc_tracker_lock_);
    800 
    801   void DisallowNewAllocationRecords() const
    802       REQUIRES_SHARED(Locks::mutator_lock_)
    803       REQUIRES(!Locks::alloc_tracker_lock_);
    804 
    805   void AllowNewAllocationRecords() const
    806       REQUIRES_SHARED(Locks::mutator_lock_)
    807       REQUIRES(!Locks::alloc_tracker_lock_);
    808 
    809   void BroadcastForNewAllocationRecords() const
    810       REQUIRES(!Locks::alloc_tracker_lock_);
    811 
    812   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
    813 
    814   // Create a new alloc space and compact default alloc space to it.
    815   HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
    816   bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
    817 
    818   // Install an allocation listener.
    819   void SetAllocationListener(AllocationListener* l);
    820   // Remove an allocation listener. Note: the listener must not be deleted, as for performance
    821   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
    822   void RemoveAllocationListener();
    823 
    824   // Install a gc pause listener.
    825   void SetGcPauseListener(GcPauseListener* l);
    826   // Get the currently installed gc pause listener, or null.
    827   GcPauseListener* GetGcPauseListener() {
    828     return gc_pause_listener_.LoadAcquire();
    829   }
    830   // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
    831   // reasons, we assume it stays valid when we read it (so that we don't require a lock).
    832   void RemoveGcPauseListener();
    833 
    834   const Verification* GetVerification() const;
    835 
    836  private:
    837   class ConcurrentGCTask;
    838   class CollectorTransitionTask;
    839   class HeapTrimTask;
    840 
    841   // Compact source space to target space. Returns the collector used.
    842   collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
    843                                        space::ContinuousMemMapAllocSpace* source_space,
    844                                        GcCause gc_cause)
    845       REQUIRES(Locks::mutator_lock_);
    846 
    847   void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
    848   void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
    849       REQUIRES(!*gc_complete_lock_);
    850   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
    851 
    852   // Create a mem map with a preferred base address.
    853   static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
    854                                               size_t capacity, std::string* out_error_str);
    855 
    856   bool SupportHSpaceCompaction() const {
    857     // Returns true if we can do hspace compaction
    858     return main_space_backup_ != nullptr;
    859   }
    860 
    861   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
    862     return
    863         allocator_type != kAllocatorTypeBumpPointer &&
    864         allocator_type != kAllocatorTypeTLAB &&
    865         allocator_type != kAllocatorTypeRegion &&
    866         allocator_type != kAllocatorTypeRegionTLAB;
    867   }
    868   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
    869     if (kUseReadBarrier) {
    870       // Read barrier may have the TLAB allocator but is always concurrent. TODO: clean this up.
    871       return true;
    872     }
    873     return
    874         allocator_type != kAllocatorTypeBumpPointer &&
    875         allocator_type != kAllocatorTypeTLAB;
    876   }
    877   static bool IsMovingGc(CollectorType collector_type) {
    878     return
    879         collector_type == kCollectorTypeSS ||
    880         collector_type == kCollectorTypeGSS ||
    881         collector_type == kCollectorTypeCC ||
    882         collector_type == kCollectorTypeCCBackground ||
    883         collector_type == kCollectorTypeMC ||
    884         collector_type == kCollectorTypeHomogeneousSpaceCompact;
    885   }
    886   bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
    887       REQUIRES_SHARED(Locks::mutator_lock_);
    888   ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
    889                                        size_t new_num_bytes_allocated,
    890                                        ObjPtr<mirror::Object>* obj)
    891       REQUIRES_SHARED(Locks::mutator_lock_)
    892       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
    893 
    894   accounting::ObjectStack* GetMarkStack() {
    895     return mark_stack_.get();
    896   }
    897 
    898   // We don't force this to be inlined since it is a slow path.
    899   template <bool kInstrumented, typename PreFenceVisitor>
    900   mirror::Object* AllocLargeObject(Thread* self,
    901                                    ObjPtr<mirror::Class>* klass,
    902                                    size_t byte_count,
    903                                    const PreFenceVisitor& pre_fence_visitor)
    904       REQUIRES_SHARED(Locks::mutator_lock_)
    905       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
    906 
    907   // Handles Allocate()'s slow allocation path with GC involved after
    908   // an initial allocation attempt failed.
    909   mirror::Object* AllocateInternalWithGc(Thread* self,
    910                                          AllocatorType allocator,
    911                                          bool instrumented,
    912                                          size_t num_bytes,
    913                                          size_t* bytes_allocated,
    914                                          size_t* usable_size,
    915                                          size_t* bytes_tl_bulk_allocated,
    916                                          ObjPtr<mirror::Class>* klass)
    917       REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
    918       REQUIRES_SHARED(Locks::mutator_lock_);
    919 
    920   // Allocate into a specific space.
    921   mirror::Object* AllocateInto(Thread* self,
    922                                space::AllocSpace* space,
    923                                ObjPtr<mirror::Class> c,
    924                                size_t bytes)
    925       REQUIRES_SHARED(Locks::mutator_lock_);
    926 
    927   // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
    928   // wrong space.
    929   void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
    930 
    931   // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
    932   // that the switch statement is constant optimized in the entrypoints.
    933   template <const bool kInstrumented, const bool kGrow>
    934   ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
    935                                               AllocatorType allocator_type,
    936                                               size_t alloc_size,
    937                                               size_t* bytes_allocated,
    938                                               size_t* usable_size,
    939                                               size_t* bytes_tl_bulk_allocated)
    940       REQUIRES_SHARED(Locks::mutator_lock_);
    941 
    942   mirror::Object* AllocWithNewTLAB(Thread* self,
    943                                    size_t alloc_size,
    944                                    bool grow,
    945                                    size_t* bytes_allocated,
    946                                    size_t* usable_size,
    947                                    size_t* bytes_tl_bulk_allocated)
    948       REQUIRES_SHARED(Locks::mutator_lock_);
    949 
    950   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
    951       REQUIRES_SHARED(Locks::mutator_lock_);
    952 
    953   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
    954                                                size_t alloc_size,
    955                                                bool grow);
    956 
    957   // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
    958   void RunFinalization(JNIEnv* env, uint64_t timeout);
    959 
    960   // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
    961   // waited for.
    962   collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
    963       REQUIRES(gc_complete_lock_);
    964 
    965   void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
    966       REQUIRES(!*pending_task_lock_);
    967 
    968   void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
    969       REQUIRES_SHARED(Locks::mutator_lock_)
    970       REQUIRES(!*pending_task_lock_);
    971   bool IsGCRequestPending() const;
    972 
    973   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
    974   // which type of Gc was actually ran.
    975   collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
    976                                            GcCause gc_cause,
    977                                            bool clear_soft_references)
    978       REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
    979                !*pending_task_lock_);
    980 
    981   void PreGcVerification(collector::GarbageCollector* gc)
    982       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
    983   void PreGcVerificationPaused(collector::GarbageCollector* gc)
    984       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
    985   void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
    986       REQUIRES(Locks::mutator_lock_);
    987   void PreSweepingGcVerification(collector::GarbageCollector* gc)
    988       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
    989   void PostGcVerification(collector::GarbageCollector* gc)
    990       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
    991   void PostGcVerificationPaused(collector::GarbageCollector* gc)
    992       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
    993 
    994   // Find a collector based on GC type.
    995   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
    996 
    997   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
    998   void CreateMainMallocSpace(MemMap* mem_map,
    999                              size_t initial_size,
   1000                              size_t growth_limit,
   1001                              size_t capacity);
   1002 
   1003   // Create a malloc space based on a mem map. Does not set the space as default.
   1004   space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map,
   1005                                                   size_t initial_size,
   1006                                                   size_t growth_limit,
   1007                                                   size_t capacity,
   1008                                                   const char* name,
   1009                                                   bool can_move_objects);
   1010 
   1011   // Given the current contents of the alloc space, increase the allowed heap footprint to match
   1012   // the target utilization ratio.  This should only be called immediately after a full garbage
   1013   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
   1014   // the GC was run.
   1015   void GrowForUtilization(collector::GarbageCollector* collector_ran,
   1016                           uint64_t bytes_allocated_before_gc = 0);
   1017 
   1018   size_t GetPercentFree();
   1019 
   1020   // Swap the allocation stack with the live stack.
   1021   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
   1022 
   1023   // Clear cards and update the mod union table. When process_alloc_space_cards is true,
   1024   // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
   1025   // not process the alloc space if process_alloc_space_cards is false.
   1026   void ProcessCards(TimingLogger* timings,
   1027                     bool use_rem_sets,
   1028                     bool process_alloc_space_cards,
   1029                     bool clear_alloc_space_cards)
   1030       REQUIRES_SHARED(Locks::mutator_lock_);
   1031 
   1032   // Push an object onto the allocation stack.
   1033   void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
   1034       REQUIRES_SHARED(Locks::mutator_lock_)
   1035       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
   1036   void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
   1037       REQUIRES_SHARED(Locks::mutator_lock_)
   1038       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
   1039   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
   1040       REQUIRES_SHARED(Locks::mutator_lock_)
   1041       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
   1042 
   1043   void ClearConcurrentGCRequest();
   1044   void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
   1045   void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
   1046 
   1047   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
   1048   // sweep GC, false for other GC types.
   1049   bool IsGcConcurrent() const ALWAYS_INLINE {
   1050     return collector_type_ == kCollectorTypeCMS ||
   1051         collector_type_ == kCollectorTypeCC ||
   1052         collector_type_ == kCollectorTypeCCBackground;
   1053   }
   1054 
   1055   // Trim the managed and native spaces by releasing unused memory back to the OS.
   1056   void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
   1057 
   1058   // Trim 0 pages at the end of reference tables.
   1059   void TrimIndirectReferenceTables(Thread* self);
   1060 
   1061   template <typename Visitor>
   1062   ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
   1063       REQUIRES_SHARED(Locks::mutator_lock_)
   1064       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
   1065   template <typename Visitor>
   1066   ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
   1067       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
   1068 
   1069   void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
   1070 
   1071   // GC stress mode attempts to do one GC per unique backtrace.
   1072   void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
   1073       REQUIRES_SHARED(Locks::mutator_lock_)
   1074       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
   1075 
   1076   collector::GcType NonStickyGcType() const {
   1077     return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
   1078   }
   1079 
   1080   // How large new_native_bytes_allocated_ can grow before we trigger a new
   1081   // GC.
   1082   ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
   1083     // Reuse max_free_ for the native allocation gc watermark, so that the
   1084     // native heap is treated in the same way as the Java heap in the case
   1085     // where the gc watermark update would exceed max_free_. Using max_free_
   1086     // instead of the target utilization means the watermark doesn't depend on
   1087     // the current number of registered native allocations.
   1088     return max_free_;
   1089   }
   1090 
   1091   void TraceHeapSize(size_t heap_size);
   1092 
   1093   // Remove a vlog code from heap-inl.h which is transitively included in half the world.
   1094   static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
   1095 
   1096   // All-known continuous spaces, where objects lie within fixed bounds.
   1097   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
   1098 
   1099   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
   1100   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
   1101 
   1102   // All-known alloc spaces, where objects may be or have been allocated.
   1103   std::vector<space::AllocSpace*> alloc_spaces_;
   1104 
   1105   // A space where non-movable objects are allocated, when compaction is enabled it contains
   1106   // Classes, ArtMethods, ArtFields, and non moving objects.
   1107   space::MallocSpace* non_moving_space_;
   1108 
   1109   // Space which we use for the kAllocatorTypeROSAlloc.
   1110   space::RosAllocSpace* rosalloc_space_;
   1111 
   1112   // Space which we use for the kAllocatorTypeDlMalloc.
   1113   space::DlMallocSpace* dlmalloc_space_;
   1114 
   1115   // The main space is the space which the GC copies to and from on process state updates. This
   1116   // space is typically either the dlmalloc_space_ or the rosalloc_space_.
   1117   space::MallocSpace* main_space_;
   1118 
   1119   // The large object space we are currently allocating into.
   1120   space::LargeObjectSpace* large_object_space_;
   1121 
   1122   // The card table, dirtied by the write barrier.
   1123   std::unique_ptr<accounting::CardTable> card_table_;
   1124 
   1125   std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
   1126 
   1127   // A mod-union table remembers all of the references from the it's space to other spaces.
   1128   AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
   1129       mod_union_tables_;
   1130 
   1131   // A remembered set remembers all of the references from the it's space to the target space.
   1132   AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
   1133       remembered_sets_;
   1134 
   1135   // The current collector type.
   1136   CollectorType collector_type_;
   1137   // Which collector we use when the app is in the foreground.
   1138   CollectorType foreground_collector_type_;
   1139   // Which collector we will use when the app is notified of a transition to background.
   1140   CollectorType background_collector_type_;
   1141   // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
   1142   CollectorType desired_collector_type_;
   1143 
   1144   // Lock which guards pending tasks.
   1145   Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   1146 
   1147   // How many GC threads we may use for paused parts of garbage collection.
   1148   const size_t parallel_gc_threads_;
   1149 
   1150   // How many GC threads we may use for unpaused parts of garbage collection.
   1151   const size_t conc_gc_threads_;
   1152 
   1153   // Boolean for if we are in low memory mode.
   1154   const bool low_memory_mode_;
   1155 
   1156   // If we get a pause longer than long pause log threshold, then we print out the GC after it
   1157   // finishes.
   1158   const size_t long_pause_log_threshold_;
   1159 
   1160   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
   1161   const size_t long_gc_log_threshold_;
   1162 
   1163   // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
   1164   // useful for benchmarking since it reduces time spent in GC to a low %.
   1165   const bool ignore_max_footprint_;
   1166 
   1167   // Lock which guards zygote space creation.
   1168   Mutex zygote_creation_lock_;
   1169 
   1170   // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
   1171   // zygote space creation.
   1172   space::ZygoteSpace* zygote_space_;
   1173 
   1174   // Minimum allocation size of large object.
   1175   size_t large_object_threshold_;
   1176 
   1177   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
   1178   // completes.
   1179   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   1180   std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
   1181 
   1182   // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
   1183   Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   1184   std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
   1185   // This counter keeps track of how many threads are currently in a JNI critical section. This is
   1186   // incremented once per thread even with nested enters.
   1187   size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
   1188   bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
   1189 
   1190   // Reference processor;
   1191   std::unique_ptr<ReferenceProcessor> reference_processor_;
   1192 
   1193   // Task processor, proxies heap trim requests to the daemon threads.
   1194   std::unique_ptr<TaskProcessor> task_processor_;
   1195 
   1196   // Collector type of the running GC.
   1197   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
   1198 
   1199   // Cause of the last running GC.
   1200   volatile GcCause last_gc_cause_ GUARDED_BY(gc_complete_lock_);
   1201 
   1202   // The thread currently running the GC.
   1203   volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_);
   1204 
   1205   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
   1206   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
   1207   collector::GcType next_gc_type_;
   1208 
   1209   // Maximum size that the heap can reach.
   1210   size_t capacity_;
   1211 
   1212   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
   1213   // programs it is "cleared" making it the same as capacity.
   1214   size_t growth_limit_;
   1215 
   1216   // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
   1217   // a GC should be triggered.
   1218   size_t max_allowed_footprint_;
   1219 
   1220   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
   1221   // it completes ahead of an allocation failing.
   1222   size_t concurrent_start_bytes_;
   1223 
   1224   // Since the heap was created, how many bytes have been freed.
   1225   uint64_t total_bytes_freed_ever_;
   1226 
   1227   // Since the heap was created, how many objects have been freed.
   1228   uint64_t total_objects_freed_ever_;
   1229 
   1230   // Number of bytes allocated.  Adjusted after each allocation and free.
   1231   Atomic<size_t> num_bytes_allocated_;
   1232 
   1233   // Number of registered native bytes allocated since the last time GC was
   1234   // triggered. Adjusted after each RegisterNativeAllocation and
   1235   // RegisterNativeFree. Used to determine when to trigger GC for native
   1236   // allocations.
   1237   // See the REDESIGN section of go/understanding-register-native-allocation.
   1238   Atomic<size_t> new_native_bytes_allocated_;
   1239 
   1240   // Number of registered native bytes allocated prior to the last time GC was
   1241   // triggered, for debugging purposes. The current number of registered
   1242   // native bytes is determined by taking the sum of
   1243   // old_native_bytes_allocated_ and new_native_bytes_allocated_.
   1244   Atomic<size_t> old_native_bytes_allocated_;
   1245 
   1246   // Number of bytes freed by thread local buffer revokes. This will
   1247   // cancel out the ahead-of-time bulk counting of bytes allocated in
   1248   // rosalloc thread-local buffers.  It is temporarily accumulated
   1249   // here to be subtracted from num_bytes_allocated_ later at the next
   1250   // GC.
   1251   Atomic<size_t> num_bytes_freed_revoke_;
   1252 
   1253   // Info related to the current or previous GC iteration.
   1254   collector::Iteration current_gc_iteration_;
   1255 
   1256   // Heap verification flags.
   1257   const bool verify_missing_card_marks_;
   1258   const bool verify_system_weaks_;
   1259   const bool verify_pre_gc_heap_;
   1260   const bool verify_pre_sweeping_heap_;
   1261   const bool verify_post_gc_heap_;
   1262   const bool verify_mod_union_table_;
   1263   bool verify_pre_gc_rosalloc_;
   1264   bool verify_pre_sweeping_rosalloc_;
   1265   bool verify_post_gc_rosalloc_;
   1266   const bool gc_stress_mode_;
   1267 
   1268   // RAII that temporarily disables the rosalloc verification during
   1269   // the zygote fork.
   1270   class ScopedDisableRosAllocVerification {
   1271    private:
   1272     Heap* const heap_;
   1273     const bool orig_verify_pre_gc_;
   1274     const bool orig_verify_pre_sweeping_;
   1275     const bool orig_verify_post_gc_;
   1276 
   1277    public:
   1278     explicit ScopedDisableRosAllocVerification(Heap* heap)
   1279         : heap_(heap),
   1280           orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
   1281           orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
   1282           orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
   1283       heap_->verify_pre_gc_rosalloc_ = false;
   1284       heap_->verify_pre_sweeping_rosalloc_ = false;
   1285       heap_->verify_post_gc_rosalloc_ = false;
   1286     }
   1287     ~ScopedDisableRosAllocVerification() {
   1288       heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
   1289       heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
   1290       heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
   1291     }
   1292   };
   1293 
   1294   // Parallel GC data structures.
   1295   std::unique_ptr<ThreadPool> thread_pool_;
   1296 
   1297   // A bitmap that is set corresponding to the known live objects since the last GC cycle.
   1298   std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
   1299   // A bitmap that is set corresponding to the marked objects in the current GC cycle.
   1300   std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
   1301 
   1302   // Mark stack that we reuse to avoid re-allocating the mark stack.
   1303   std::unique_ptr<accounting::ObjectStack> mark_stack_;
   1304 
   1305   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
   1306   // to use the live bitmap as the old mark bitmap.
   1307   const size_t max_allocation_stack_size_;
   1308   std::unique_ptr<accounting::ObjectStack> allocation_stack_;
   1309 
   1310   // Second allocation stack so that we can process allocation with the heap unlocked.
   1311   std::unique_ptr<accounting::ObjectStack> live_stack_;
   1312 
   1313   // Allocator type.
   1314   AllocatorType current_allocator_;
   1315   const AllocatorType current_non_moving_allocator_;
   1316 
   1317   // Which GCs we run in order when an allocation fails.
   1318   std::vector<collector::GcType> gc_plan_;
   1319 
   1320   // Bump pointer spaces.
   1321   space::BumpPointerSpace* bump_pointer_space_;
   1322   // Temp space is the space which the semispace collector copies to.
   1323   space::BumpPointerSpace* temp_space_;
   1324 
   1325   // Region space, used by the concurrent collector.
   1326   space::RegionSpace* region_space_;
   1327 
   1328   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
   1329   // utilization, regardless of target utilization ratio.
   1330   size_t min_free_;
   1331 
   1332   // The ideal maximum free size, when we grow the heap for utilization.
   1333   size_t max_free_;
   1334 
   1335   // Target ideal heap utilization ratio.
   1336   double target_utilization_;
   1337 
   1338   // How much more we grow the heap when we are a foreground app instead of background.
   1339   double foreground_heap_growth_multiplier_;
   1340 
   1341   // Total time which mutators are paused or waiting for GC to complete.
   1342   uint64_t total_wait_time_;
   1343 
   1344   // The current state of heap verification, may be enabled or disabled.
   1345   VerifyObjectMode verify_object_mode_;
   1346 
   1347   // Compacting GC disable count, prevents compacting GC from running iff > 0.
   1348   size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
   1349 
   1350   std::vector<collector::GarbageCollector*> garbage_collectors_;
   1351   collector::SemiSpace* semi_space_collector_;
   1352   collector::MarkCompact* mark_compact_collector_;
   1353   collector::ConcurrentCopying* concurrent_copying_collector_;
   1354 
   1355   const bool is_running_on_memory_tool_;
   1356   const bool use_tlab_;
   1357 
   1358   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
   1359   // Use unique_ptr since the space is only added during the homogeneous compaction phase.
   1360   std::unique_ptr<space::MallocSpace> main_space_backup_;
   1361 
   1362   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
   1363   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
   1364 
   1365   // Times of the last homogeneous space compaction caused by OOM.
   1366   uint64_t last_time_homogeneous_space_compaction_by_oom_;
   1367 
   1368   // Saved OOMs by homogeneous space compaction.
   1369   Atomic<size_t> count_delayed_oom_;
   1370 
   1371   // Count for requested homogeneous space compaction.
   1372   Atomic<size_t> count_requested_homogeneous_space_compaction_;
   1373 
   1374   // Count for ignored homogeneous space compaction.
   1375   Atomic<size_t> count_ignored_homogeneous_space_compaction_;
   1376 
   1377   // Count for performed homogeneous space compaction.
   1378   Atomic<size_t> count_performed_homogeneous_space_compaction_;
   1379 
   1380   // Whether or not a concurrent GC is pending.
   1381   Atomic<bool> concurrent_gc_pending_;
   1382 
   1383   // Active tasks which we can modify (change target time, desired collector type, etc..).
   1384   CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
   1385   HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
   1386 
   1387   // Whether or not we use homogeneous space compaction to avoid OOM errors.
   1388   bool use_homogeneous_space_compaction_for_oom_;
   1389 
   1390   // True if the currently running collection has made some thread wait.
   1391   bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
   1392   // The number of blocking GC runs.
   1393   uint64_t blocking_gc_count_;
   1394   // The total duration of blocking GC runs.
   1395   uint64_t blocking_gc_time_;
   1396   // The duration of the window for the GC count rate histograms.
   1397   static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
   1398   // The last time when the GC count rate histograms were updated.
   1399   // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
   1400   uint64_t last_update_time_gc_count_rate_histograms_;
   1401   // The running count of GC runs in the last window.
   1402   uint64_t gc_count_last_window_;
   1403   // The running count of blocking GC runs in the last window.
   1404   uint64_t blocking_gc_count_last_window_;
   1405   // The maximum number of buckets in the GC count rate histograms.
   1406   static constexpr size_t kGcCountRateMaxBucketCount = 200;
   1407   // The histogram of the number of GC invocations per window duration.
   1408   Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
   1409   // The histogram of the number of blocking GC invocations per window duration.
   1410   Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
   1411 
   1412   // Allocation tracking support
   1413   Atomic<bool> alloc_tracking_enabled_;
   1414   std::unique_ptr<AllocRecordObjectMap> allocation_records_;
   1415 
   1416   // GC stress related data structures.
   1417   Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   1418   // Debugging variables, seen backtraces vs unique backtraces.
   1419   Atomic<uint64_t> seen_backtrace_count_;
   1420   Atomic<uint64_t> unique_backtrace_count_;
   1421   // Stack trace hashes that we already saw,
   1422   std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
   1423 
   1424   // We disable GC when we are shutting down the runtime in case there are daemon threads still
   1425   // allocating.
   1426   bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
   1427 
   1428   // Boot image spaces.
   1429   std::vector<space::ImageSpace*> boot_image_spaces_;
   1430 
   1431   // An installed allocation listener.
   1432   Atomic<AllocationListener*> alloc_listener_;
   1433   // An installed GC Pause listener.
   1434   Atomic<GcPauseListener*> gc_pause_listener_;
   1435 
   1436   std::unique_ptr<Verification> verification_;
   1437 
   1438   friend class CollectorTransitionTask;
   1439   friend class collector::GarbageCollector;
   1440   friend class collector::MarkCompact;
   1441   friend class collector::ConcurrentCopying;
   1442   friend class collector::MarkSweep;
   1443   friend class collector::SemiSpace;
   1444   friend class ReferenceQueue;
   1445   friend class ScopedGCCriticalSection;
   1446   friend class VerifyReferenceCardVisitor;
   1447   friend class VerifyReferenceVisitor;
   1448   friend class VerifyObjectVisitor;
   1449 
   1450   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
   1451 };
   1452 
   1453 }  // namespace gc
   1454 }  // namespace art
   1455 
   1456 #endif  // ART_RUNTIME_GC_HEAP_H_
   1457