Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
     18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
     19 
     20 #include "garbage_collector.h"
     21 #include "immune_spaces.h"
     22 #include "offsets.h"
     23 
     24 #include <map>
     25 #include <memory>
     26 #include <vector>
     27 
     28 namespace art {
     29 class Barrier;
     30 class Closure;
     31 class RootInfo;
     32 
     33 namespace mirror {
     34 template<class MirrorType> class CompressedReference;
     35 template<class MirrorType> class HeapReference;
     36 class Object;
     37 }  // namespace mirror
     38 
     39 namespace gc {
     40 
     41 namespace accounting {
     42 template<typename T> class AtomicStack;
     43 typedef AtomicStack<mirror::Object> ObjectStack;
     44 template <size_t kAlignment> class SpaceBitmap;
     45 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
     46 class HeapBitmap;
     47 class ReadBarrierTable;
     48 }  // namespace accounting
     49 
     50 namespace space {
     51 class RegionSpace;
     52 }  // namespace space
     53 
     54 namespace collector {
     55 
     56 class ConcurrentCopying : public GarbageCollector {
     57  public:
     58   // Enable the no-from-space-refs verification at the pause.
     59   static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
     60   // Enable the from-space bytes/objects check.
     61   static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
     62   // Enable verbose mode.
     63   static constexpr bool kVerboseMode = false;
     64   // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
     65   // pages.
     66   static constexpr bool kGrayDirtyImmuneObjects = true;
     67 
     68   ConcurrentCopying(Heap* heap,
     69                     bool young_gen,
     70                     bool use_generational_cc,
     71                     const std::string& name_prefix = "",
     72                     bool measure_read_barrier_slow_path = false);
     73   ~ConcurrentCopying();
     74 
     75   void RunPhases() override
     76       REQUIRES(!immune_gray_stack_lock_,
     77                !mark_stack_lock_,
     78                !rb_slow_path_histogram_lock_,
     79                !skipped_blocks_lock_);
     80   void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
     81       REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
     82   void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
     83       REQUIRES(!mark_stack_lock_);
     84   void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
     85       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
     86   void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
     87   void FinishPhase() REQUIRES(!mark_stack_lock_,
     88                               !rb_slow_path_histogram_lock_,
     89                               !skipped_blocks_lock_);
     90 
     91   void CaptureRssAtPeak() REQUIRES(!mark_stack_lock_);
     92   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
     93       REQUIRES(!Locks::heap_bitmap_lock_);
     94   GcType GetGcType() const override {
     95     return (use_generational_cc_ && young_gen_)
     96         ? kGcTypeSticky
     97         : kGcTypePartial;
     98   }
     99   CollectorType GetCollectorType() const override {
    100     return kCollectorTypeCC;
    101   }
    102   void RevokeAllThreadLocalBuffers() override;
    103   // Creates inter-region ref bitmaps for region-space and non-moving-space.
    104   // Gets called in Heap construction after the two spaces are created.
    105   void CreateInterRegionRefBitmaps();
    106   void SetRegionSpace(space::RegionSpace* region_space) {
    107     DCHECK(region_space != nullptr);
    108     region_space_ = region_space;
    109   }
    110   space::RegionSpace* RegionSpace() {
    111     return region_space_;
    112   }
    113   // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`.
    114   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
    115       REQUIRES_SHARED(Locks::mutator_lock_);
    116   // Assert the to-space invariant for a GC root reference `ref`.
    117   void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
    118       REQUIRES_SHARED(Locks::mutator_lock_);
    119   bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
    120     DCHECK(ref != nullptr);
    121     return IsMarked(ref) == ref;
    122   }
    123   // Mark object `from_ref`, copying it to the to-space if needed.
    124   template<bool kGrayImmuneObject = true, bool kNoUnEvac = false, bool kFromGCThread = false>
    125   ALWAYS_INLINE mirror::Object* Mark(Thread* const self,
    126                                      mirror::Object* from_ref,
    127                                      mirror::Object* holder = nullptr,
    128                                      MemberOffset offset = MemberOffset(0))
    129       REQUIRES_SHARED(Locks::mutator_lock_)
    130       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    131   ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
    132       REQUIRES_SHARED(Locks::mutator_lock_)
    133       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    134   bool IsMarking() const {
    135     return is_marking_;
    136   }
    137   // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
    138   // creates a small window where we might dispatch on these entrypoints.
    139   bool IsUsingReadBarrierEntrypoints() const {
    140     return is_using_read_barrier_entrypoints_;
    141   }
    142   bool IsActive() const {
    143     return is_active_;
    144   }
    145   Barrier& GetBarrier() {
    146     return *gc_barrier_;
    147   }
    148   bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
    149     return weak_ref_access_enabled_;
    150   }
    151   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
    152       REQUIRES(!mark_stack_lock_);
    153 
    154   mirror::Object* IsMarked(mirror::Object* from_ref) override
    155       REQUIRES_SHARED(Locks::mutator_lock_);
    156 
    157  private:
    158   void PushOntoMarkStack(Thread* const self, mirror::Object* obj)
    159       REQUIRES_SHARED(Locks::mutator_lock_)
    160       REQUIRES(!mark_stack_lock_);
    161   mirror::Object* Copy(Thread* const self,
    162                        mirror::Object* from_ref,
    163                        mirror::Object* holder,
    164                        MemberOffset offset)
    165       REQUIRES_SHARED(Locks::mutator_lock_)
    166       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    167   // Scan the reference fields of object `to_ref`.
    168   template <bool kNoUnEvac>
    169   void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
    170       REQUIRES(!mark_stack_lock_);
    171   // Scan the reference fields of object 'obj' in the dirty cards during
    172   // card-table scan. In addition to visiting the references, it also sets the
    173   // read-barrier state to gray for Reference-type objects to ensure that
    174   // GetReferent() called on these objects calls the read-barrier on the referent.
    175   template <bool kNoUnEvac>
    176   void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
    177       REQUIRES(!mark_stack_lock_);
    178   // Process a field.
    179   template <bool kNoUnEvac>
    180   void Process(mirror::Object* obj, MemberOffset offset)
    181       REQUIRES_SHARED(Locks::mutator_lock_)
    182       REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
    183   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
    184       REQUIRES_SHARED(Locks::mutator_lock_)
    185       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    186   template<bool kGrayImmuneObject>
    187   void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
    188       REQUIRES_SHARED(Locks::mutator_lock_)
    189       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    190   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
    191                   size_t count,
    192                   const RootInfo& info) override
    193       REQUIRES_SHARED(Locks::mutator_lock_)
    194       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    195   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
    196   accounting::ObjectStack* GetAllocationStack();
    197   accounting::ObjectStack* GetLiveStack();
    198   void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
    199       REQUIRES(!mark_stack_lock_);
    200   bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    201   void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
    202       REQUIRES(!mark_stack_lock_);
    203   void GrayAllDirtyImmuneObjects()
    204       REQUIRES(Locks::mutator_lock_)
    205       REQUIRES(!mark_stack_lock_);
    206   void GrayAllNewlyDirtyImmuneObjects()
    207       REQUIRES(Locks::mutator_lock_)
    208       REQUIRES(!mark_stack_lock_);
    209   void VerifyGrayImmuneObjects()
    210       REQUIRES(Locks::mutator_lock_)
    211       REQUIRES(!mark_stack_lock_);
    212   void VerifyNoMissingCardMarks()
    213       REQUIRES(Locks::mutator_lock_)
    214       REQUIRES(!mark_stack_lock_);
    215   template <typename Processor>
    216   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
    217                                       Closure* checkpoint_callback,
    218                                       const Processor& processor)
    219       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    220   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
    221       REQUIRES_SHARED(Locks::mutator_lock_);
    222   void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
    223       REQUIRES(!mark_stack_lock_);
    224   void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
    225   void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
    226                               ObjPtr<mirror::Reference> reference) override
    227       REQUIRES_SHARED(Locks::mutator_lock_);
    228   void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
    229   mirror::Object* MarkObject(mirror::Object* from_ref) override
    230       REQUIRES_SHARED(Locks::mutator_lock_)
    231       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    232   void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
    233                          bool do_atomic_update) override
    234       REQUIRES_SHARED(Locks::mutator_lock_)
    235       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    236   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
    237       REQUIRES_SHARED(Locks::mutator_lock_);
    238   bool IsMarkedInNonMovingSpace(mirror::Object* from_ref)
    239       REQUIRES_SHARED(Locks::mutator_lock_);
    240   bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
    241                                    bool do_atomic_update) override
    242       REQUIRES_SHARED(Locks::mutator_lock_);
    243   void SweepSystemWeaks(Thread* self)
    244       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
    245   // Sweep unmarked objects to complete the garbage collection. Full GCs sweep
    246   // all allocation spaces (except the region space). Sticky-bit GCs just sweep
    247   // a subset of the heap.
    248   void Sweep(bool swap_bitmaps)
    249       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
    250   // Sweep only pointers within an array.
    251   void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
    252       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
    253   void SweepLargeObjects(bool swap_bitmaps)
    254       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
    255   void MarkZygoteLargeObjects()
    256       REQUIRES_SHARED(Locks::mutator_lock_);
    257   void FillWithDummyObject(Thread* const self, mirror::Object* dummy_obj, size_t byte_size)
    258       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
    259       REQUIRES_SHARED(Locks::mutator_lock_);
    260   mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size)
    261       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
    262       REQUIRES_SHARED(Locks::mutator_lock_);
    263   void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    264   void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
    265   bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
    266   mirror::Object* GetFwdPtr(mirror::Object* from_ref)
    267       REQUIRES_SHARED(Locks::mutator_lock_);
    268   void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
    269   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
    270   void RecordLiveStackFreezeSize(Thread* self);
    271   void ComputeUnevacFromSpaceLiveRatio();
    272   void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
    273       REQUIRES_SHARED(Locks::mutator_lock_);
    274   // Dump information about reference `ref` and return it as a string.
    275   // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`.
    276   std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "")
    277       REQUIRES_SHARED(Locks::mutator_lock_);
    278   // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`,
    279   // and return it as a string.
    280   std::string DumpHeapReference(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
    281       REQUIRES_SHARED(Locks::mutator_lock_);
    282   // Dump information about GC root `ref` and return it as a string.
    283   std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
    284   void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
    285       REQUIRES_SHARED(Locks::mutator_lock_);
    286   void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
    287   void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
    288   void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
    289   void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
    290   mirror::Object* MarkNonMoving(Thread* const self,
    291                                 mirror::Object* from_ref,
    292                                 mirror::Object* holder = nullptr,
    293                                 MemberOffset offset = MemberOffset(0))
    294       REQUIRES_SHARED(Locks::mutator_lock_)
    295       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    296   ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(Thread* const self,
    297       mirror::Object* from_ref,
    298       accounting::SpaceBitmap<kObjectAlignment>* bitmap)
    299       REQUIRES_SHARED(Locks::mutator_lock_)
    300       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    301   template<bool kGrayImmuneObject>
    302   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
    303                                                 mirror::Object* from_ref)
    304       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
    305   void ScanImmuneObject(mirror::Object* obj)
    306       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    307   mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
    308                                                       mirror::Object* from_ref)
    309       REQUIRES_SHARED(Locks::mutator_lock_)
    310       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    311   void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
    312   // Set the read barrier mark entrypoints to non-null.
    313   void ActivateReadBarrierEntrypoints();
    314 
    315   void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
    316   void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
    317   bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
    318   template <bool kAtomic = false>
    319   bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
    320   void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
    321   void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
    322       REQUIRES(!mark_stack_lock_);
    323 
    324   space::RegionSpace* region_space_;      // The underlying region space.
    325   std::unique_ptr<Barrier> gc_barrier_;
    326   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
    327 
    328   // If true, enable generational collection when using the Concurrent Copying
    329   // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
    330   // for major collections. Generational CC collection is currently only
    331   // compatible with Baker read barriers. Set in Heap constructor.
    332   const bool use_generational_cc_;
    333 
    334   // Generational "sticky", only trace through dirty objects in region space.
    335   const bool young_gen_;
    336 
    337   // If true, the GC thread is done scanning marked objects on dirty and aged
    338   // card (see ConcurrentCopying::CopyingPhase).
    339   Atomic<bool> done_scanning_;
    340 
    341   // The read-barrier mark-bit stack. Stores object references whose
    342   // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
    343   // so that this bit can be reset at the end of the collection in
    344   // ConcurrentCopying::FinishPhase. The mark bit of an object can be
    345   // used by mutator read barrier code to quickly test whether that
    346   // object has been already marked.
    347   std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
    348   // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is
    349   // full. A thread-safe test of whether the read-barrier mark-bit
    350   // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)`
    351   // (see use case in ConcurrentCopying::MarkFromReadBarrier).
    352   bool rb_mark_bit_stack_full_;
    353 
    354   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    355   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
    356       GUARDED_BY(mark_stack_lock_);
    357   static constexpr size_t kMarkStackSize = kPageSize;
    358   static constexpr size_t kMarkStackPoolSize = 256;
    359   std::vector<accounting::ObjectStack*> pooled_mark_stacks_
    360       GUARDED_BY(mark_stack_lock_);
    361   Thread* thread_running_gc_;
    362   bool is_marking_;                       // True while marking is ongoing.
    363   // True while we might dispatch on the read barrier entrypoints.
    364   bool is_using_read_barrier_entrypoints_;
    365   bool is_active_;                        // True while the collection is ongoing.
    366   bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
    367   ImmuneSpaces immune_spaces_;
    368   accounting::ContinuousSpaceBitmap* region_space_bitmap_;
    369   // A cache of Heap::GetMarkBitmap().
    370   accounting::HeapBitmap* heap_mark_bitmap_;
    371   size_t live_stack_freeze_size_;
    372   size_t from_space_num_objects_at_first_pause_;  // Computed if kEnableFromSpaceAccountingCheck
    373   size_t from_space_num_bytes_at_first_pause_;  // Computed if kEnableFromSpaceAccountingCheck
    374   Atomic<int> is_mark_stack_push_disallowed_;
    375   enum MarkStackMode {
    376     kMarkStackModeOff = 0,      // Mark stack is off.
    377     kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
    378                                 // thread-local mark stacks. The GC-running thread pushes onto and
    379                                 // pops off the GC mark stack without a lock.
    380     kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
    381     kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
    382                                 // without a lock. Other threads won't access the mark stack.
    383   };
    384   Atomic<MarkStackMode> mark_stack_mode_;
    385   bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
    386 
    387   // How many objects and bytes we moved. The GC thread moves many more objects
    388   // than mutators.  Therefore, we separate the two to avoid CAS.  Bytes_moved_ and
    389   // bytes_moved_gc_thread_ are critical for GC triggering; the others are just informative.
    390   Atomic<size_t> bytes_moved_;  // Used by mutators
    391   Atomic<size_t> objects_moved_;  // Used by mutators
    392   size_t bytes_moved_gc_thread_;  // Used by GC
    393   size_t objects_moved_gc_thread_;  // Used by GC
    394   Atomic<uint64_t> cumulative_bytes_moved_;
    395   Atomic<uint64_t> cumulative_objects_moved_;
    396 
    397   // copied_live_bytes_ratio_sum_ is read and written by CC per GC, in
    398   // ReclaimPhase, and is read by DumpPerformanceInfo (potentially from another
    399   // thread). However, at present, DumpPerformanceInfo is only called when the
    400   // runtime shuts down, so no concurrent access. The same reasoning goes for
    401   // gc_count_ and reclaimed_bytes_ratio_sum_
    402 
    403   // The sum of of all copied live bytes ratio (to_bytes/from_bytes)
    404   float copied_live_bytes_ratio_sum_;
    405   // The number of GC counts, used to calculate the average above. (It doesn't
    406   // include GC where from_bytes is zero, IOW, from-space is empty, which is
    407   // possible for minor GC if all allocated objects are in non-moving
    408   // space.)
    409   size_t gc_count_;
    410   // Bit is set if the corresponding object has inter-region references that
    411   // were found during the marking phase of two-phase full-heap GC cycle.
    412   std::unique_ptr<accounting::ContinuousSpaceBitmap> region_space_inter_region_bitmap_;
    413   std::unique_ptr<accounting::ContinuousSpaceBitmap> non_moving_space_inter_region_bitmap_;
    414 
    415   // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
    416   float reclaimed_bytes_ratio_sum_;
    417 
    418   // The skipped blocks are memory blocks/chucks that were copies of
    419   // objects that were unused due to lost races (cas failures) at
    420   // object copy/forward pointer install. They may be reused.
    421   // Skipped blocks are always in region space. Their size is included directly
    422   // in num_bytes_allocated_, i.e. they are treated as allocated, but may be directly
    423   // used without going through a GC cycle like other objects. They are reused only
    424   // if we run out of region space. TODO: Revisit this design.
    425   Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    426   std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
    427   Atomic<size_t> to_space_bytes_skipped_;
    428   Atomic<size_t> to_space_objects_skipped_;
    429 
    430   // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
    431   // and also log.
    432   bool measure_read_barrier_slow_path_;
    433   // mark_from_read_barrier_measurements_ is true if systrace is enabled or
    434   // measure_read_barrier_time_ is true.
    435   bool mark_from_read_barrier_measurements_;
    436   Atomic<uint64_t> rb_slow_path_ns_;
    437   Atomic<uint64_t> rb_slow_path_count_;
    438   Atomic<uint64_t> rb_slow_path_count_gc_;
    439   mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    440   Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
    441   uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
    442   uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
    443 
    444   accounting::ReadBarrierTable* rb_table_;
    445   bool force_evacuate_all_;  // True if all regions are evacuated.
    446   Atomic<bool> updated_all_immune_objects_;
    447   bool gc_grays_immune_objects_;
    448   Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    449   std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
    450 
    451   // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
    452   // be filled in before flipping thread roots so that FillDummyObject can run. Not
    453   // ObjPtr since the GC may transition to suspended and runnable between phases.
    454   mirror::Class* java_lang_Object_;
    455 
    456   // Sweep array free buffer, used to sweep the spaces based on an array more
    457   // efficiently, by recording dead objects to be freed in batches (see
    458   // ConcurrentCopying::SweepArray).
    459   MemMap sweep_array_free_buffer_mem_map_;
    460 
    461   // Use signed because after_gc may be larger than before_gc.
    462   int64_t num_bytes_allocated_before_gc_;
    463 
    464   class ActivateReadBarrierEntrypointsCallback;
    465   class ActivateReadBarrierEntrypointsCheckpoint;
    466   class AssertToSpaceInvariantFieldVisitor;
    467   class AssertToSpaceInvariantRefsVisitor;
    468   class ClearBlackPtrsVisitor;
    469   class ComputeUnevacFromSpaceLiveRatioVisitor;
    470   class DisableMarkingCallback;
    471   class DisableMarkingCheckpoint;
    472   class DisableWeakRefAccessCallback;
    473   class FlipCallback;
    474   template <bool kConcurrent> class GrayImmuneObjectVisitor;
    475   class ImmuneSpaceScanObjVisitor;
    476   class LostCopyVisitor;
    477   template <bool kNoUnEvac> class RefFieldsVisitor;
    478   class RevokeThreadLocalMarkStackCheckpoint;
    479   class ScopedGcGraysImmuneObjects;
    480   class ThreadFlipVisitor;
    481   class VerifyGrayImmuneObjectsVisitor;
    482   class VerifyNoFromSpaceRefsFieldVisitor;
    483   class VerifyNoFromSpaceRefsVisitor;
    484   class VerifyNoMissingCardMarkVisitor;
    485   class ImmuneSpaceCaptureRefsVisitor;
    486   template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
    487   class CaptureThreadRootsForMarkingAndCheckpoint;
    488   template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
    489 
    490   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
    491 };
    492 
    493 }  // namespace collector
    494 }  // namespace gc
    495 }  // namespace art
    496 
    497 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
    498