Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
     18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
     19 
     20 #include "barrier.h"
     21 #include "base/safe_map.h"
     22 #include "garbage_collector.h"
     23 #include "immune_spaces.h"
     24 #include "jni.h"
     25 #include "mirror/object_reference.h"
     26 #include "offsets.h"
     27 
     28 #include <unordered_map>
     29 #include <vector>
     30 
     31 namespace art {
     32 class Closure;
     33 class RootInfo;
     34 
     35 namespace mirror {
     36 class Object;
     37 }  // namespace mirror
     38 
     39 namespace gc {
     40 
     41 namespace accounting {
     42 template<typename T> class AtomicStack;
     43 typedef AtomicStack<mirror::Object> ObjectStack;
     44 template <size_t kAlignment> class SpaceBitmap;
     45 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
     46 class HeapBitmap;
     47 class ReadBarrierTable;
     48 }  // namespace accounting
     49 
     50 namespace space {
     51 class RegionSpace;
     52 }  // namespace space
     53 
     54 namespace collector {
     55 
     56 class ConcurrentCopying : public GarbageCollector {
     57  public:
     58   // Enable the no-from-space-refs verification at the pause.
     59   static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
     60   // Enable the from-space bytes/objects check.
     61   static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
     62   // Enable verbose mode.
     63   static constexpr bool kVerboseMode = false;
     64   // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
     65   // pages.
     66   static constexpr bool kGrayDirtyImmuneObjects = true;
     67 
     68   explicit ConcurrentCopying(Heap* heap,
     69                              const std::string& name_prefix = "",
     70                              bool measure_read_barrier_slow_path = false);
     71   ~ConcurrentCopying();
     72 
     73   virtual void RunPhases() OVERRIDE
     74       REQUIRES(!immune_gray_stack_lock_,
     75                !mark_stack_lock_,
     76                !rb_slow_path_histogram_lock_,
     77                !skipped_blocks_lock_);
     78   void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
     79       REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
     80   void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
     81       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
     82   void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
     83   void FinishPhase() REQUIRES(!mark_stack_lock_,
     84                               !rb_slow_path_histogram_lock_,
     85                               !skipped_blocks_lock_);
     86 
     87   void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
     88       REQUIRES(!Locks::heap_bitmap_lock_);
     89   virtual GcType GetGcType() const OVERRIDE {
     90     return kGcTypePartial;
     91   }
     92   virtual CollectorType GetCollectorType() const OVERRIDE {
     93     return kCollectorTypeCC;
     94   }
     95   virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
     96   void SetRegionSpace(space::RegionSpace* region_space) {
     97     DCHECK(region_space != nullptr);
     98     region_space_ = region_space;
     99   }
    100   space::RegionSpace* RegionSpace() {
    101     return region_space_;
    102   }
    103   // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`.
    104   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
    105       REQUIRES_SHARED(Locks::mutator_lock_);
    106   // Assert the to-space invariant for a GC root reference `ref`.
    107   void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
    108       REQUIRES_SHARED(Locks::mutator_lock_);
    109   bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
    110     DCHECK(ref != nullptr);
    111     return IsMarked(ref) == ref;
    112   }
    113   template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
    114   // Mark object `from_ref`, copying it to the to-space if needed.
    115   ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref,
    116                                      mirror::Object* holder = nullptr,
    117                                      MemberOffset offset = MemberOffset(0))
    118       REQUIRES_SHARED(Locks::mutator_lock_)
    119       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    120   ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
    121       REQUIRES_SHARED(Locks::mutator_lock_)
    122       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    123   bool IsMarking() const {
    124     return is_marking_;
    125   }
    126   // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
    127   // creates a small window where we might dispatch on these entrypoints.
    128   bool IsUsingReadBarrierEntrypoints() const {
    129     return is_using_read_barrier_entrypoints_;
    130   }
    131   bool IsActive() const {
    132     return is_active_;
    133   }
    134   Barrier& GetBarrier() {
    135     return *gc_barrier_;
    136   }
    137   bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
    138     return weak_ref_access_enabled_;
    139   }
    140   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
    141       REQUIRES(!mark_stack_lock_);
    142 
    143   virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
    144       REQUIRES_SHARED(Locks::mutator_lock_);
    145 
    146  private:
    147   void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
    148       REQUIRES(!mark_stack_lock_);
    149   mirror::Object* Copy(mirror::Object* from_ref,
    150                        mirror::Object* holder,
    151                        MemberOffset offset)
    152       REQUIRES_SHARED(Locks::mutator_lock_)
    153       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    154   // Scan the reference fields of object `to_ref`.
    155   void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
    156       REQUIRES(!mark_stack_lock_);
    157   // Process a field.
    158   void Process(mirror::Object* obj, MemberOffset offset)
    159       REQUIRES_SHARED(Locks::mutator_lock_)
    160       REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
    161   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
    162       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
    163       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    164   template<bool kGrayImmuneObject>
    165   void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
    166       REQUIRES_SHARED(Locks::mutator_lock_)
    167       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    168   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
    169                           const RootInfo& info)
    170       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
    171       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    172   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
    173   accounting::ObjectStack* GetAllocationStack();
    174   accounting::ObjectStack* GetLiveStack();
    175   virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
    176       REQUIRES(!mark_stack_lock_);
    177   bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    178   void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
    179       REQUIRES(!mark_stack_lock_);
    180   void GrayAllDirtyImmuneObjects()
    181       REQUIRES(Locks::mutator_lock_)
    182       REQUIRES(!mark_stack_lock_);
    183   void GrayAllNewlyDirtyImmuneObjects()
    184       REQUIRES(Locks::mutator_lock_)
    185       REQUIRES(!mark_stack_lock_);
    186   void VerifyGrayImmuneObjects()
    187       REQUIRES(Locks::mutator_lock_)
    188       REQUIRES(!mark_stack_lock_);
    189   void VerifyNoMissingCardMarks()
    190       REQUIRES(Locks::mutator_lock_)
    191       REQUIRES(!mark_stack_lock_);
    192   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
    193       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    194   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
    195       REQUIRES_SHARED(Locks::mutator_lock_);
    196   void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
    197       REQUIRES(!mark_stack_lock_);
    198   void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
    199   virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
    200                                       ObjPtr<mirror::Reference> reference) OVERRIDE
    201       REQUIRES_SHARED(Locks::mutator_lock_);
    202   void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
    203   virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
    204       REQUIRES_SHARED(Locks::mutator_lock_)
    205       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    206   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
    207                                  bool do_atomic_update) OVERRIDE
    208       REQUIRES_SHARED(Locks::mutator_lock_)
    209       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    210   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
    211       REQUIRES_SHARED(Locks::mutator_lock_);
    212   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
    213                                            bool do_atomic_update) OVERRIDE
    214       REQUIRES_SHARED(Locks::mutator_lock_);
    215   void SweepSystemWeaks(Thread* self)
    216       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
    217   void Sweep(bool swap_bitmaps)
    218       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
    219   void SweepLargeObjects(bool swap_bitmaps)
    220       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
    221   void MarkZygoteLargeObjects()
    222       REQUIRES_SHARED(Locks::mutator_lock_);
    223   void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
    224       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
    225       REQUIRES_SHARED(Locks::mutator_lock_);
    226   mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
    227       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
    228       REQUIRES_SHARED(Locks::mutator_lock_);
    229   void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    230   void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
    231   bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
    232   mirror::Object* GetFwdPtr(mirror::Object* from_ref)
    233       REQUIRES_SHARED(Locks::mutator_lock_);
    234   void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
    235   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
    236   void RecordLiveStackFreezeSize(Thread* self);
    237   void ComputeUnevacFromSpaceLiveRatio();
    238   void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
    239       REQUIRES_SHARED(Locks::mutator_lock_);
    240   // Dump information about reference `ref` and return it as a string.
    241   // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`.
    242   std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, std::string indent = "")
    243       REQUIRES_SHARED(Locks::mutator_lock_);
    244   // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`,
    245   // and return it as a string.
    246   std::string DumpHeapReference(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
    247       REQUIRES_SHARED(Locks::mutator_lock_);
    248   // Dump information about GC root `ref` and return it as a string.
    249   std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
    250   void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
    251       REQUIRES_SHARED(Locks::mutator_lock_);
    252   void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
    253   void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
    254   void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
    255   void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
    256   mirror::Object* MarkNonMoving(mirror::Object* from_ref,
    257                                 mirror::Object* holder = nullptr,
    258                                 MemberOffset offset = MemberOffset(0))
    259       REQUIRES_SHARED(Locks::mutator_lock_)
    260       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    261   ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
    262       accounting::SpaceBitmap<kObjectAlignment>* bitmap)
    263       REQUIRES_SHARED(Locks::mutator_lock_)
    264       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    265   template<bool kGrayImmuneObject>
    266   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
    267       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
    268   void PushOntoFalseGrayStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
    269       REQUIRES(!mark_stack_lock_);
    270   void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
    271       REQUIRES(!mark_stack_lock_);
    272   void ScanImmuneObject(mirror::Object* obj)
    273       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    274   mirror::Object* MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref)
    275       REQUIRES_SHARED(Locks::mutator_lock_)
    276       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
    277   void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
    278   // Set the read barrier mark entrypoints to non-null.
    279   void ActivateReadBarrierEntrypoints();
    280 
    281   space::RegionSpace* region_space_;      // The underlying region space.
    282   std::unique_ptr<Barrier> gc_barrier_;
    283   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
    284 
    285   // The read-barrier mark-bit stack. Stores object references whose
    286   // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
    287   // so that this bit can be reset at the end of the collection in
    288   // ConcurrentCopying::FinishPhase. The mark bit of an object can be
    289   // used by mutator read barrier code to quickly test whether that
    290   // object has been already marked.
    291   std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
    292   // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is
    293   // full. A thread-safe test of whether the read-barrier mark-bit
    294   // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)`
    295   // (see use case in ConcurrentCopying::MarkFromReadBarrier).
    296   bool rb_mark_bit_stack_full_;
    297 
    298   std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
    299   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    300   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
    301       GUARDED_BY(mark_stack_lock_);
    302   static constexpr size_t kMarkStackSize = kPageSize;
    303   static constexpr size_t kMarkStackPoolSize = 256;
    304   std::vector<accounting::ObjectStack*> pooled_mark_stacks_
    305       GUARDED_BY(mark_stack_lock_);
    306   Thread* thread_running_gc_;
    307   bool is_marking_;                       // True while marking is ongoing.
    308   // True while we might dispatch on the read barrier entrypoints.
    309   bool is_using_read_barrier_entrypoints_;
    310   bool is_active_;                        // True while the collection is ongoing.
    311   bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
    312   ImmuneSpaces immune_spaces_;
    313   accounting::ContinuousSpaceBitmap* region_space_bitmap_;
    314   // A cache of Heap::GetMarkBitmap().
    315   accounting::HeapBitmap* heap_mark_bitmap_;
    316   size_t live_stack_freeze_size_;
    317   size_t from_space_num_objects_at_first_pause_;
    318   size_t from_space_num_bytes_at_first_pause_;
    319   Atomic<int> is_mark_stack_push_disallowed_;
    320   enum MarkStackMode {
    321     kMarkStackModeOff = 0,      // Mark stack is off.
    322     kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
    323                                 // thread-local mark stacks. The GC-running thread pushes onto and
    324                                 // pops off the GC mark stack without a lock.
    325     kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
    326     kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
    327                                 // without a lock. Other threads won't access the mark stack.
    328   };
    329   Atomic<MarkStackMode> mark_stack_mode_;
    330   bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
    331 
    332   // How many objects and bytes we moved. Used for accounting.
    333   Atomic<size_t> bytes_moved_;
    334   Atomic<size_t> objects_moved_;
    335   Atomic<uint64_t> cumulative_bytes_moved_;
    336   Atomic<uint64_t> cumulative_objects_moved_;
    337 
    338   // The skipped blocks are memory blocks/chucks that were copies of
    339   // objects that were unused due to lost races (cas failures) at
    340   // object copy/forward pointer install. They are reused.
    341   Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    342   std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
    343   Atomic<size_t> to_space_bytes_skipped_;
    344   Atomic<size_t> to_space_objects_skipped_;
    345 
    346   // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
    347   // and also log.
    348   bool measure_read_barrier_slow_path_;
    349   // mark_from_read_barrier_measurements_ is true if systrace is enabled or
    350   // measure_read_barrier_time_ is true.
    351   bool mark_from_read_barrier_measurements_;
    352   Atomic<uint64_t> rb_slow_path_ns_;
    353   Atomic<uint64_t> rb_slow_path_count_;
    354   Atomic<uint64_t> rb_slow_path_count_gc_;
    355   mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    356   Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
    357   uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
    358   uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
    359 
    360   accounting::ReadBarrierTable* rb_table_;
    361   bool force_evacuate_all_;  // True if all regions are evacuated.
    362   Atomic<bool> updated_all_immune_objects_;
    363   bool gc_grays_immune_objects_;
    364   Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    365   std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
    366 
    367   // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
    368   // be filled in before flipping thread roots so that FillDummyObject can run. Not
    369   // ObjPtr since the GC may transition to suspended and runnable between phases.
    370   mirror::Class* java_lang_Object_;
    371 
    372   class ActivateReadBarrierEntrypointsCallback;
    373   class ActivateReadBarrierEntrypointsCheckpoint;
    374   class AssertToSpaceInvariantFieldVisitor;
    375   class AssertToSpaceInvariantRefsVisitor;
    376   class ClearBlackPtrsVisitor;
    377   class ComputeUnevacFromSpaceLiveRatioVisitor;
    378   class DisableMarkingCallback;
    379   class DisableMarkingCheckpoint;
    380   class DisableWeakRefAccessCallback;
    381   class FlipCallback;
    382   template <bool kConcurrent> class GrayImmuneObjectVisitor;
    383   class ImmuneSpaceScanObjVisitor;
    384   class LostCopyVisitor;
    385   class RefFieldsVisitor;
    386   class RevokeThreadLocalMarkStackCheckpoint;
    387   class ScopedGcGraysImmuneObjects;
    388   class ThreadFlipVisitor;
    389   class VerifyGrayImmuneObjectsVisitor;
    390   class VerifyNoFromSpaceRefsFieldVisitor;
    391   class VerifyNoFromSpaceRefsVisitor;
    392   class VerifyNoMissingCardMarkVisitor;
    393 
    394   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
    395 };
    396 
    397 }  // namespace collector
    398 }  // namespace gc
    399 }  // namespace art
    400 
    401 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
    402