Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_HEAP_H_
     18 #define ART_RUNTIME_GC_HEAP_H_
     19 
     20 #include <iosfwd>
     21 #include <string>
     22 #include <vector>
     23 
     24 #include "atomic_integer.h"
     25 #include "base/timing_logger.h"
     26 #include "gc/accounting/atomic_stack.h"
     27 #include "gc/accounting/card_table.h"
     28 #include "gc/collector/gc_type.h"
     29 #include "globals.h"
     30 #include "gtest/gtest.h"
     31 #include "jni.h"
     32 #include "locks.h"
     33 #include "offsets.h"
     34 #include "safe_map.h"
     35 #include "thread_pool.h"
     36 
     37 namespace art {
     38 
     39 class ConditionVariable;
     40 class Mutex;
     41 class StackVisitor;
     42 class Thread;
     43 class TimingLogger;
     44 
     45 namespace mirror {
     46   class Class;
     47   class Object;
     48 }  // namespace mirror
     49 
     50 namespace gc {
     51 namespace accounting {
     52   class HeapBitmap;
     53   class ModUnionTable;
     54   class SpaceSetMap;
     55 }  // namespace accounting
     56 
     57 namespace collector {
     58   class GarbageCollector;
     59   class MarkSweep;
     60 }  // namespace collector
     61 
     62 namespace space {
     63   class AllocSpace;
     64   class DiscontinuousSpace;
     65   class DlMallocSpace;
     66   class ImageSpace;
     67   class LargeObjectSpace;
     68   class Space;
     69   class SpaceTest;
     70 }  // namespace space
     71 
     72 class AgeCardVisitor {
     73  public:
     74   byte operator()(byte card) const {
     75     if (card == accounting::CardTable::kCardDirty) {
     76       return card - 1;
     77     } else {
     78       return 0;
     79     }
     80   }
     81 };
     82 
     83 // What caused the GC?
     84 enum GcCause {
     85   // GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
     86   // retrying allocation.
     87   kGcCauseForAlloc,
     88   // A background GC trying to ensure there is free memory ahead of allocations.
     89   kGcCauseBackground,
     90   // An explicit System.gc() call.
     91   kGcCauseExplicit,
     92 };
     93 std::ostream& operator<<(std::ostream& os, const GcCause& policy);
     94 
     95 // How we want to sanity check the heap's correctness.
     96 enum HeapVerificationMode {
     97   kHeapVerificationNotPermitted,  // Too early in runtime start-up for heap to be verified.
     98   kNoHeapVerification,  // Production default.
     99   kVerifyAllFast,  // Sanity check all heap accesses with quick(er) tests.
    100   kVerifyAll  // Sanity check all heap accesses.
    101 };
    102 static constexpr HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification;
    103 
    104 class Heap {
    105  public:
    106   static constexpr size_t kDefaultInitialSize = 2 * MB;
    107   static constexpr size_t kDefaultMaximumSize = 32 * MB;
    108   static constexpr size_t kDefaultMaxFree = 2 * MB;
    109   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
    110   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
    111   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
    112 
    113   // Default target utilization.
    114   static constexpr double kDefaultTargetUtilization = 0.5;
    115 
    116   // Used so that we don't overflow the allocation time atomic integer.
    117   static constexpr size_t kTimeAdjust = 1024;
    118 
    119   // Create a heap with the requested sizes. The possible empty
    120   // image_file_names names specify Spaces to load based on
    121   // ImageWriter output.
    122   explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
    123                 size_t max_free, double target_utilization, size_t capacity,
    124                 const std::string& original_image_file_name, bool concurrent_gc,
    125                 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
    126                 size_t long_pause_threshold, size_t long_gc_threshold, bool ignore_max_footprint);
    127 
    128   ~Heap();
    129 
    130   // Allocates and initializes storage for an object instance.
    131   mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
    132       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    133 
    134   void RegisterNativeAllocation(int bytes)
    135       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    136   void RegisterNativeFree(int bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    137 
    138   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
    139   void VerifyObjectImpl(const mirror::Object* o);
    140   void VerifyObject(const mirror::Object* o) {
    141     if (o != NULL && this != NULL && verify_object_mode_ > kNoHeapVerification) {
    142       VerifyObjectImpl(o);
    143     }
    144   }
    145 
    146   // Check sanity of all live references.
    147   void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
    148   bool VerifyHeapReferences()
    149       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    150       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    151   bool VerifyMissingCardMarks()
    152       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    153       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    154 
    155   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
    156   // and doesn't abort on error, allowing the caller to report more
    157   // meaningful diagnostics.
    158   bool IsHeapAddress(const mirror::Object* obj);
    159 
    160   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
    161   // Requires the heap lock to be held.
    162   bool IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack = true,
    163                           bool search_live_stack = true, bool sorted = false)
    164       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    165 
    166   // Initiates an explicit garbage collection.
    167   void CollectGarbage(bool clear_soft_references) LOCKS_EXCLUDED(Locks::mutator_lock_);
    168 
    169   // Does a concurrent GC, should only be called by the GC daemon thread
    170   // through runtime.
    171   void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
    172 
    173   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
    174   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
    175   void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
    176                       uint64_t* counts)
    177       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
    178       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    179   // Implements JDWP RT_Instances.
    180   void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
    181       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
    182       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    183   // Implements JDWP OR_ReferringObjects.
    184   void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
    185       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
    186       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    187 
    188   // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
    189   // implement dalvik.system.VMRuntime.clearGrowthLimit.
    190   void ClearGrowthLimit();
    191 
    192   // Target ideal heap utilization ratio, implements
    193   // dalvik.system.VMRuntime.getTargetHeapUtilization.
    194   double GetTargetHeapUtilization() const {
    195     return target_utilization_;
    196   }
    197 
    198   // Data structure memory usage tracking.
    199   void RegisterGCAllocation(size_t bytes);
    200   void RegisterGCDeAllocation(size_t bytes);
    201 
    202   // Set target ideal heap utilization ratio, implements
    203   // dalvik.system.VMRuntime.setTargetHeapUtilization.
    204   void SetTargetHeapUtilization(float target);
    205 
    206   // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
    207   // from the system. Doesn't allow the space to exceed its growth limit.
    208   void SetIdealFootprint(size_t max_allowed_footprint);
    209 
    210   // Blocks the caller until the garbage collector becomes idle and returns
    211   // true if we waited for the GC to complete.
    212   collector::GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
    213 
    214   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
    215     return continuous_spaces_;
    216   }
    217 
    218   const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
    219     return discontinuous_spaces_;
    220   }
    221 
    222   void SetReferenceOffsets(MemberOffset reference_referent_offset,
    223                            MemberOffset reference_queue_offset,
    224                            MemberOffset reference_queueNext_offset,
    225                            MemberOffset reference_pendingNext_offset,
    226                            MemberOffset finalizer_reference_zombie_offset);
    227 
    228   mirror::Object* GetReferenceReferent(mirror::Object* reference);
    229   void ClearReferenceReferent(mirror::Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    230 
    231   // Returns true if the reference object has not yet been enqueued.
    232   bool IsEnqueuable(const mirror::Object* ref);
    233   void EnqueueReference(mirror::Object* ref, mirror::Object** list)
    234       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    235   bool IsEnqueued(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    236   void EnqueuePendingReference(mirror::Object* ref, mirror::Object** list)
    237       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    238   mirror::Object* DequeuePendingReference(mirror::Object** list)
    239       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    240 
    241   MemberOffset GetReferencePendingNextOffset() {
    242     DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
    243     return reference_pendingNext_offset_;
    244   }
    245 
    246   MemberOffset GetFinalizerReferenceZombieOffset() {
    247     DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
    248     return finalizer_reference_zombie_offset_;
    249   }
    250 
    251   // Enable verification of object references when the runtime is sufficiently initialized.
    252   void EnableObjectValidation() {
    253     verify_object_mode_ = kDesiredHeapVerification;
    254     if (verify_object_mode_ > kNoHeapVerification) {
    255       VerifyHeap();
    256     }
    257   }
    258 
    259   // Disable object reference verification for image writing.
    260   void DisableObjectValidation() {
    261     verify_object_mode_ = kHeapVerificationNotPermitted;
    262   }
    263 
    264   // Other checks may be performed if we know the heap should be in a sane state.
    265   bool IsObjectValidationEnabled() const {
    266     return kDesiredHeapVerification > kNoHeapVerification &&
    267         verify_object_mode_ > kHeapVerificationNotPermitted;
    268   }
    269 
    270   // Returns true if low memory mode is enabled.
    271   bool IsLowMemoryMode() const {
    272     return low_memory_mode_;
    273   }
    274 
    275   void RecordFree(size_t freed_objects, size_t freed_bytes);
    276 
    277   // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
    278   // The call is not needed if NULL is stored in the field.
    279   void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/, const mirror::Object* /*new_value*/) {
    280     card_table_->MarkCard(dst);
    281   }
    282 
    283   // Write barrier for array operations that update many field positions
    284   void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
    285                          size_t /*length TODO: element_count or byte_count?*/) {
    286     card_table_->MarkCard(dst);
    287   }
    288 
    289   accounting::CardTable* GetCardTable() const {
    290     return card_table_.get();
    291   }
    292 
    293   void AddFinalizerReference(Thread* self, mirror::Object* object);
    294 
    295   // Returns the number of bytes currently allocated.
    296   size_t GetBytesAllocated() const {
    297     return num_bytes_allocated_;
    298   }
    299 
    300   // Returns the number of objects currently allocated.
    301   size_t GetObjectsAllocated() const;
    302 
    303   // Returns the total number of objects allocated since the heap was created.
    304   size_t GetObjectsAllocatedEver() const;
    305 
    306   // Returns the total number of bytes allocated since the heap was created.
    307   size_t GetBytesAllocatedEver() const;
    308 
    309   // Returns the total number of objects freed since the heap was created.
    310   size_t GetObjectsFreedEver() const {
    311     return total_objects_freed_ever_;
    312   }
    313 
    314   // Returns the total number of bytes freed since the heap was created.
    315   size_t GetBytesFreedEver() const {
    316     return total_bytes_freed_ever_;
    317   }
    318 
    319   // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
    320   // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
    321   // were specified. Android apps start with a growth limit (small heap size) which is
    322   // cleared/extended for large apps.
    323   int64_t GetMaxMemory() const {
    324     return growth_limit_;
    325   }
    326 
    327   // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an
    328   // application.
    329   int64_t GetTotalMemory() const;
    330 
    331   // Implements java.lang.Runtime.freeMemory.
    332   int64_t GetFreeMemory() const {
    333     return GetTotalMemory() - num_bytes_allocated_;
    334   }
    335 
    336   // Get the space that corresponds to an object's address. Current implementation searches all
    337   // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
    338   // TODO: consider using faster data structure like binary tree.
    339   space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
    340   space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
    341                                                               bool fail_ok) const;
    342   space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
    343 
    344   void DumpForSigQuit(std::ostream& os);
    345 
    346   size_t Trim();
    347 
    348   accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
    349     return live_bitmap_.get();
    350   }
    351 
    352   accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
    353     return mark_bitmap_.get();
    354   }
    355 
    356   accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
    357     return live_stack_.get();
    358   }
    359 
    360   void PreZygoteFork() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
    361 
    362   // Mark and empty stack.
    363   void FlushAllocStack()
    364       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    365 
    366   // Mark all the objects in the allocation stack in the specified bitmap.
    367   void MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
    368                       accounting::ObjectStack* stack)
    369       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    370 
    371   // Update and mark mod union table based on gc type.
    372   void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings,
    373                              collector::GcType gc_type)
    374       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    375 
    376   // Gets called when we get notified by ActivityThread that the process state has changed.
    377   void ListenForProcessStateChange();
    378 
    379   // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
    380   // Assumes there is only one image space.
    381   space::ImageSpace* GetImageSpace() const;
    382 
    383   space::DlMallocSpace* GetAllocSpace() const {
    384     return alloc_space_;
    385   }
    386 
    387   space::LargeObjectSpace* GetLargeObjectsSpace() const {
    388     return large_object_space_;
    389   }
    390 
    391   Mutex* GetSoftRefQueueLock() {
    392     return soft_ref_queue_lock_;
    393   }
    394 
    395   Mutex* GetWeakRefQueueLock() {
    396     return weak_ref_queue_lock_;
    397   }
    398 
    399   Mutex* GetFinalizerRefQueueLock() {
    400     return finalizer_ref_queue_lock_;
    401   }
    402 
    403   Mutex* GetPhantomRefQueueLock() {
    404     return phantom_ref_queue_lock_;
    405   }
    406 
    407   void DumpSpaces();
    408 
    409   // GC performance measuring
    410   void DumpGcPerformanceInfo(std::ostream& os);
    411 
    412   // Returns true if we currently care about pause times.
    413   bool CareAboutPauseTimes() const {
    414     return care_about_pause_times_;
    415   }
    416 
    417   // Thread pool.
    418   void CreateThreadPool();
    419   void DeleteThreadPool();
    420   ThreadPool* GetThreadPool() {
    421     return thread_pool_.get();
    422   }
    423   size_t GetParallelGCThreadCount() const {
    424     return parallel_gc_threads_;
    425   }
    426   size_t GetConcGCThreadCount() const {
    427     return conc_gc_threads_;
    428   }
    429 
    430  private:
    431   // Allocates uninitialized storage. Passing in a null space tries to place the object in the
    432   // large object space.
    433   template <class T> mirror::Object* Allocate(Thread* self, T* space, size_t num_bytes, size_t* bytes_allocated)
    434       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    435       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    436 
    437   // Handles Allocate()'s slow allocation path with GC involved after
    438   // an initial allocation attempt failed.
    439   mirror::Object* AllocateInternalWithGc(Thread* self, space::AllocSpace* space, size_t num_bytes,
    440                                          size_t* bytes_allocated)
    441       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    442       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    443 
    444   // Try to allocate a number of bytes, this function never does any GCs.
    445   mirror::Object* TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, bool grow,
    446                                 size_t* bytes_allocated)
    447       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    448       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    449 
    450   // Try to allocate a number of bytes, this function never does any GCs. DlMallocSpace-specialized version.
    451   mirror::Object* TryToAllocate(Thread* self, space::DlMallocSpace* space, size_t alloc_size, bool grow,
    452                                 size_t* bytes_allocated)
    453       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
    454       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    455 
    456   bool IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow);
    457 
    458   // Pushes a list of cleared references out to the managed heap.
    459   void EnqueueClearedReferences(mirror::Object** cleared_references);
    460 
    461   void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
    462   void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
    463   bool IsGCRequestPending() const;
    464 
    465   void RecordAllocation(size_t size, mirror::Object* object)
    466       LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
    467       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    468 
    469   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
    470   // which type of Gc was actually ran.
    471   collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
    472                                            bool clear_soft_references)
    473       LOCKS_EXCLUDED(gc_complete_lock_,
    474                      Locks::heap_bitmap_lock_,
    475                      Locks::thread_suspend_count_lock_);
    476 
    477   void PreGcVerification(collector::GarbageCollector* gc);
    478   void PreSweepingGcVerification(collector::GarbageCollector* gc)
    479       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    480   void PostGcVerification(collector::GarbageCollector* gc);
    481 
    482   // Update the watermark for the native allocated bytes based on the current number of native
    483   // bytes allocated and the target utilization ratio.
    484   void UpdateMaxNativeFootprint();
    485 
    486   // Given the current contents of the alloc space, increase the allowed heap footprint to match
    487   // the target utilization ratio.  This should only be called immediately after a full garbage
    488   // collection.
    489   void GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration);
    490 
    491   size_t GetPercentFree();
    492 
    493   void AddContinuousSpace(space::ContinuousSpace* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
    494   void AddDiscontinuousSpace(space::DiscontinuousSpace* space)
    495       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
    496 
    497   // No thread saftey analysis since we call this everywhere and it is impossible to find a proper
    498   // lock ordering for it.
    499   void VerifyObjectBody(const mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS;
    500 
    501   static void VerificationCallback(mirror::Object* obj, void* arg)
    502       SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
    503 
    504   // Swap the allocation stack with the live stack.
    505   void SwapStacks();
    506 
    507   // Clear cards and update the mod union table.
    508   void ProcessCards(base::TimingLogger& timings);
    509 
    510   // All-known continuous spaces, where objects lie within fixed bounds.
    511   std::vector<space::ContinuousSpace*> continuous_spaces_;
    512 
    513   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
    514   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
    515 
    516   // The allocation space we are currently allocating into.
    517   space::DlMallocSpace* alloc_space_;
    518 
    519   // The large object space we are currently allocating into.
    520   space::LargeObjectSpace* large_object_space_;
    521 
    522   // The card table, dirtied by the write barrier.
    523   UniquePtr<accounting::CardTable> card_table_;
    524 
    525   // The mod-union table remembers all of the references from the image space to the alloc /
    526   // zygote spaces to allow the card table to be cleared.
    527   UniquePtr<accounting::ModUnionTable> image_mod_union_table_;
    528 
    529   // This table holds all of the references from the zygote space to the alloc space.
    530   UniquePtr<accounting::ModUnionTable> zygote_mod_union_table_;
    531 
    532   // What kind of concurrency behavior is the runtime after? True for concurrent mark sweep GC,
    533   // false for stop-the-world mark sweep.
    534   const bool concurrent_gc_;
    535 
    536   // How many GC threads we may use for paused parts of garbage collection.
    537   const size_t parallel_gc_threads_;
    538 
    539   // How many GC threads we may use for unpaused parts of garbage collection.
    540   const size_t conc_gc_threads_;
    541 
    542   // Boolean for if we are in low memory mode.
    543   const bool low_memory_mode_;
    544 
    545   // If we get a pause longer than long pause log threshold, then we print out the GC after it
    546   // finishes.
    547   const size_t long_pause_log_threshold_;
    548 
    549   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
    550   const size_t long_gc_log_threshold_;
    551 
    552   // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
    553   // useful for benchmarking since it reduces time spent in GC to a low %.
    554   const bool ignore_max_footprint_;
    555 
    556   // If we have a zygote space.
    557   bool have_zygote_space_;
    558 
    559   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
    560   // completes.
    561   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    562   UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
    563 
    564   // Mutexes held when adding references to reference queues.
    565   // TODO: move to a UniquePtr, currently annotalysis is confused that UniquePtr isn't lockable.
    566   Mutex* soft_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    567   Mutex* weak_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    568   Mutex* finalizer_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    569   Mutex* phantom_ref_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    570 
    571   // True while the garbage collector is running.
    572   volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
    573 
    574   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
    575   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
    576   collector::GcType next_gc_type_;
    577 
    578   // Maximum size that the heap can reach.
    579   const size_t capacity_;
    580 
    581   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
    582   // programs it is "cleared" making it the same as capacity.
    583   size_t growth_limit_;
    584 
    585   // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
    586   // a GC should be triggered.
    587   size_t max_allowed_footprint_;
    588 
    589   // The watermark at which a concurrent GC is requested by registerNativeAllocation.
    590   size_t native_footprint_gc_watermark_;
    591 
    592   // The watermark at which a GC is performed inside of registerNativeAllocation.
    593   size_t native_footprint_limit_;
    594 
    595   // Activity manager members.
    596   jclass activity_thread_class_;
    597   jclass application_thread_class_;
    598   jobject activity_thread_;
    599   jobject application_thread_;
    600   jfieldID last_process_state_id_;
    601 
    602   // Process states which care about pause times.
    603   std::set<int> process_state_cares_about_pause_time_;
    604 
    605   // Whether or not we currently care about pause times.
    606   bool care_about_pause_times_;
    607 
    608   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
    609   // it completes ahead of an allocation failing.
    610   size_t concurrent_start_bytes_;
    611 
    612   // Since the heap was created, how many bytes have been freed.
    613   size_t total_bytes_freed_ever_;
    614 
    615   // Since the heap was created, how many objects have been freed.
    616   size_t total_objects_freed_ever_;
    617 
    618   // Primitive objects larger than this size are put in the large object space.
    619   const size_t large_object_threshold_;
    620 
    621   // Number of bytes allocated.  Adjusted after each allocation and free.
    622   AtomicInteger num_bytes_allocated_;
    623 
    624   // Bytes which are allocated and managed by native code but still need to be accounted for.
    625   AtomicInteger native_bytes_allocated_;
    626 
    627   // Data structure GC overhead.
    628   AtomicInteger gc_memory_overhead_;
    629 
    630   // Heap verification flags.
    631   const bool verify_missing_card_marks_;
    632   const bool verify_system_weaks_;
    633   const bool verify_pre_gc_heap_;
    634   const bool verify_post_gc_heap_;
    635   const bool verify_mod_union_table_;
    636 
    637   // Parallel GC data structures.
    638   UniquePtr<ThreadPool> thread_pool_;
    639 
    640   // Sticky mark bits GC has some overhead, so if we have less a few megabytes of AllocSpace then
    641   // it's probably better to just do a partial GC.
    642   const size_t min_alloc_space_size_for_sticky_gc_;
    643 
    644   // Minimum remaining size for sticky GC. Since sticky GC doesn't free up as much memory as a
    645   // normal GC, it is important to not use it when we are almost out of memory.
    646   const size_t min_remaining_space_for_sticky_gc_;
    647 
    648   // The last time a heap trim occurred.
    649   uint64_t last_trim_time_ms_;
    650 
    651   // The nanosecond time at which the last GC ended.
    652   uint64_t last_gc_time_ns_;
    653 
    654   // How many bytes were allocated at the end of the last GC.
    655   uint64_t last_gc_size_;
    656 
    657   // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
    658   // and the start of the current one.
    659   uint64_t allocation_rate_;
    660 
    661   // For a GC cycle, a bitmap that is set corresponding to the
    662   UniquePtr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
    663   UniquePtr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
    664 
    665   // Mark stack that we reuse to avoid re-allocating the mark stack.
    666   UniquePtr<accounting::ObjectStack> mark_stack_;
    667 
    668   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
    669   // to use the live bitmap as the old mark bitmap.
    670   const size_t max_allocation_stack_size_;
    671   bool is_allocation_stack_sorted_;
    672   UniquePtr<accounting::ObjectStack> allocation_stack_;
    673 
    674   // Second allocation stack so that we can process allocation with the heap unlocked.
    675   UniquePtr<accounting::ObjectStack> live_stack_;
    676 
    677   // offset of java.lang.ref.Reference.referent
    678   MemberOffset reference_referent_offset_;
    679 
    680   // offset of java.lang.ref.Reference.queue
    681   MemberOffset reference_queue_offset_;
    682 
    683   // offset of java.lang.ref.Reference.queueNext
    684   MemberOffset reference_queueNext_offset_;
    685 
    686   // offset of java.lang.ref.Reference.pendingNext
    687   MemberOffset reference_pendingNext_offset_;
    688 
    689   // offset of java.lang.ref.FinalizerReference.zombie
    690   MemberOffset finalizer_reference_zombie_offset_;
    691 
    692   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
    693   // utilization, regardless of target utilization ratio.
    694   size_t min_free_;
    695 
    696   // The ideal maximum free size, when we grow the heap for utilization.
    697   size_t max_free_;
    698 
    699   // Target ideal heap utilization ratio
    700   double target_utilization_;
    701 
    702   // Total time which mutators are paused or waiting for GC to complete.
    703   uint64_t total_wait_time_;
    704 
    705   // Total number of objects allocated in microseconds.
    706   AtomicInteger total_allocation_time_;
    707 
    708   // The current state of heap verification, may be enabled or disabled.
    709   HeapVerificationMode verify_object_mode_;
    710 
    711   std::vector<collector::MarkSweep*> mark_sweep_collectors_;
    712 
    713   const bool running_on_valgrind_;
    714 
    715   friend class collector::MarkSweep;
    716   friend class VerifyReferenceCardVisitor;
    717   friend class VerifyReferenceVisitor;
    718   friend class VerifyObjectVisitor;
    719   friend class ScopedHeapLock;
    720   friend class space::SpaceTest;
    721 
    722   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
    723 };
    724 
    725 }  // namespace gc
    726 }  // namespace art
    727 
    728 #endif  // ART_RUNTIME_GC_HEAP_H_
    729