Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
     18 #define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
     19 
     20 #include <memory>
     21 
     22 #include "atomic.h"
     23 #include "barrier.h"
     24 #include "base/macros.h"
     25 #include "base/mutex.h"
     26 #include "garbage_collector.h"
     27 #include "gc_root.h"
     28 #include "gc/accounting/heap_bitmap.h"
     29 #include "immune_spaces.h"
     30 #include "offsets.h"
     31 
     32 namespace art {
     33 
     34 namespace mirror {
     35 class Class;
     36 class Object;
     37 class Reference;
     38 }  // namespace mirror
     39 
     40 class Thread;
     41 enum VisitRootFlags : uint8_t;
     42 
     43 namespace gc {
     44 
     45 class Heap;
     46 
     47 namespace accounting {
     48 template<typename T> class AtomicStack;
     49 typedef AtomicStack<mirror::Object> ObjectStack;
     50 }  // namespace accounting
     51 
     52 namespace collector {
     53 
     54 class MarkSweep : public GarbageCollector {
     55  public:
     56   MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
     57 
     58   ~MarkSweep() {}
     59 
     60   virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
     61   void InitializePhase();
     62   void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
     63   void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
     64   void ReclaimPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
     65   void FinishPhase();
     66   virtual void MarkReachableObjects()
     67       REQUIRES(Locks::heap_bitmap_lock_)
     68       REQUIRES(!mark_stack_lock_)
     69       REQUIRES_SHARED(Locks::mutator_lock_);
     70 
     71   bool IsConcurrent() const {
     72     return is_concurrent_;
     73   }
     74 
     75   virtual GcType GetGcType() const OVERRIDE {
     76     return kGcTypeFull;
     77   }
     78 
     79   virtual CollectorType GetCollectorType() const OVERRIDE {
     80     return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
     81   }
     82 
     83   // Initializes internal structures.
     84   void Init();
     85 
     86   // Find the default mark bitmap.
     87   void FindDefaultSpaceBitmap() REQUIRES_SHARED(Locks::mutator_lock_);
     88 
     89   // Marks all objects in the root set at the start of a garbage collection.
     90   void MarkRoots(Thread* self)
     91       REQUIRES(Locks::heap_bitmap_lock_)
     92       REQUIRES(!mark_stack_lock_)
     93       REQUIRES_SHARED(Locks::mutator_lock_);
     94 
     95   void MarkNonThreadRoots()
     96       REQUIRES(Locks::heap_bitmap_lock_)
     97       REQUIRES(!mark_stack_lock_)
     98       REQUIRES_SHARED(Locks::mutator_lock_);
     99 
    100   virtual void MarkConcurrentRoots(VisitRootFlags flags)
    101       REQUIRES(Locks::heap_bitmap_lock_)
    102       REQUIRES(!mark_stack_lock_)
    103       REQUIRES_SHARED(Locks::mutator_lock_);
    104 
    105   void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
    106       REQUIRES(Locks::heap_bitmap_lock_)
    107       REQUIRES(!mark_stack_lock_)
    108       REQUIRES_SHARED(Locks::mutator_lock_);
    109 
    110   // Builds a mark stack and recursively mark until it empties.
    111   void RecursiveMark()
    112       REQUIRES(Locks::heap_bitmap_lock_)
    113       REQUIRES(!mark_stack_lock_)
    114       REQUIRES_SHARED(Locks::mutator_lock_);
    115 
    116   // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
    117   // the image. Mark that portion of the heap as immune.
    118   virtual void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_);
    119 
    120   // Builds a mark stack with objects on dirty cards and recursively mark until it empties.
    121   void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
    122       REQUIRES(Locks::heap_bitmap_lock_)
    123       REQUIRES(!mark_stack_lock_)
    124       REQUIRES_SHARED(Locks::mutator_lock_);
    125 
    126   // Remarks the root set after completing the concurrent mark.
    127   void ReMarkRoots()
    128       REQUIRES(Locks::heap_bitmap_lock_)
    129       REQUIRES(!mark_stack_lock_)
    130       REQUIRES_SHARED(Locks::mutator_lock_);
    131 
    132   void ProcessReferences(Thread* self)
    133       REQUIRES(!mark_stack_lock_)
    134       REQUIRES_SHARED(Locks::mutator_lock_);
    135 
    136   // Update and mark references from immune spaces.
    137   void UpdateAndMarkModUnion()
    138       REQUIRES(!mark_stack_lock_)
    139       REQUIRES_SHARED(Locks::mutator_lock_);
    140 
    141   // Pre clean cards to reduce how much work is needed in the pause.
    142   void PreCleanCards()
    143       REQUIRES(Locks::heap_bitmap_lock_)
    144       REQUIRES(!mark_stack_lock_)
    145       REQUIRES_SHARED(Locks::mutator_lock_);
    146 
    147   // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
    148   // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
    149   virtual void Sweep(bool swap_bitmaps)
    150       REQUIRES(Locks::heap_bitmap_lock_)
    151       REQUIRES_SHARED(Locks::mutator_lock_);
    152 
    153   // Sweeps unmarked objects to complete the garbage collection.
    154   void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
    155 
    156   // Sweep only pointers within an array. WARNING: Trashes objects.
    157   void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
    158       REQUIRES(Locks::heap_bitmap_lock_)
    159       REQUIRES_SHARED(Locks::mutator_lock_);
    160 
    161   // Blackens an object.
    162   void ScanObject(mirror::Object* obj)
    163       REQUIRES(Locks::heap_bitmap_lock_)
    164       REQUIRES(!mark_stack_lock_)
    165       REQUIRES_SHARED(Locks::mutator_lock_);
    166 
    167   // No thread safety analysis due to lambdas.
    168   template<typename MarkVisitor, typename ReferenceVisitor>
    169   void ScanObjectVisit(mirror::Object* obj,
    170                        const MarkVisitor& visitor,
    171                        const ReferenceVisitor& ref_visitor)
    172       REQUIRES(Locks::heap_bitmap_lock_)
    173       REQUIRES(!mark_stack_lock_)
    174       REQUIRES_SHARED(Locks::mutator_lock_);
    175 
    176   void SweepSystemWeaks(Thread* self)
    177       REQUIRES(!Locks::heap_bitmap_lock_)
    178       REQUIRES_SHARED(Locks::mutator_lock_);
    179 
    180   static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
    181       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
    182 
    183   void VerifySystemWeaks()
    184       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
    185 
    186   // Verify that an object is live, either in a live bitmap or in the allocation stack.
    187   void VerifyIsLive(const mirror::Object* obj)
    188       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
    189 
    190   virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
    191                                            bool do_atomic_update) OVERRIDE
    192       REQUIRES(Locks::heap_bitmap_lock_)
    193       REQUIRES_SHARED(Locks::mutator_lock_);
    194 
    195   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
    196       REQUIRES(Locks::heap_bitmap_lock_)
    197       REQUIRES(!mark_stack_lock_)
    198       REQUIRES_SHARED(Locks::mutator_lock_);
    199 
    200   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
    201                           size_t count,
    202                           const RootInfo& info) OVERRIDE
    203       REQUIRES(Locks::heap_bitmap_lock_)
    204       REQUIRES(!mark_stack_lock_)
    205       REQUIRES_SHARED(Locks::mutator_lock_);
    206 
    207   // Marks an object.
    208   virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
    209       REQUIRES(Locks::heap_bitmap_lock_)
    210       REQUIRES(!mark_stack_lock_)
    211       REQUIRES_SHARED(Locks::mutator_lock_);
    212 
    213   void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
    214       REQUIRES(Locks::heap_bitmap_lock_)
    215       REQUIRES(!mark_stack_lock_)
    216       REQUIRES_SHARED(Locks::mutator_lock_);
    217 
    218   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
    219                                  bool do_atomic_update) OVERRIDE
    220       REQUIRES(Locks::heap_bitmap_lock_)
    221       REQUIRES(!mark_stack_lock_)
    222       REQUIRES_SHARED(Locks::mutator_lock_);
    223 
    224   Barrier& GetBarrier() {
    225     return *gc_barrier_;
    226   }
    227 
    228   // Schedules an unmarked object for reference processing.
    229   void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
    230       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
    231 
    232  protected:
    233   // Returns object if the object is marked in the heap bitmap, otherwise null.
    234   virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
    235       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
    236 
    237   void MarkObjectNonNull(mirror::Object* obj,
    238                          mirror::Object* holder = nullptr,
    239                          MemberOffset offset = MemberOffset(0))
    240       REQUIRES(Locks::heap_bitmap_lock_)
    241       REQUIRES(!mark_stack_lock_)
    242       REQUIRES_SHARED(Locks::mutator_lock_);
    243 
    244   // Marks an object atomically, safe to use from multiple threads.
    245   void MarkObjectNonNullParallel(mirror::Object* obj)
    246       REQUIRES(!mark_stack_lock_)
    247       REQUIRES_SHARED(Locks::mutator_lock_);
    248 
    249   // Returns true if we need to add obj to a mark stack.
    250   bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
    251 
    252   // Verify the roots of the heap and print out information related to any invalid roots.
    253   // Called in MarkObject, so may we may not hold the mutator lock.
    254   void VerifySuspendedThreadRoots(std::ostream& os)
    255       REQUIRES_SHARED(Locks::mutator_lock_);
    256 
    257   // Expand mark stack to 2x its current size.
    258   void ExpandMarkStack()
    259       REQUIRES(mark_stack_lock_)
    260       REQUIRES_SHARED(Locks::mutator_lock_);
    261 
    262   void ResizeMarkStack(size_t new_size)
    263       REQUIRES(mark_stack_lock_)
    264       REQUIRES_SHARED(Locks::mutator_lock_);
    265 
    266   // Returns how many threads we should use for the current GC phase based on if we are paused,
    267   // whether or not we care about pauses.
    268   size_t GetThreadCount(bool paused) const;
    269 
    270   // Push a single reference on a mark stack.
    271   void PushOnMarkStack(mirror::Object* obj)
    272       REQUIRES(!mark_stack_lock_)
    273       REQUIRES_SHARED(Locks::mutator_lock_);
    274 
    275   // Blackens objects grayed during a garbage collection.
    276   void ScanGrayObjects(bool paused, uint8_t minimum_age)
    277       REQUIRES(Locks::heap_bitmap_lock_)
    278       REQUIRES(!mark_stack_lock_)
    279       REQUIRES_SHARED(Locks::mutator_lock_);
    280 
    281   virtual void ProcessMarkStack()
    282       OVERRIDE
    283       REQUIRES(Locks::heap_bitmap_lock_)
    284       REQUIRES(!mark_stack_lock_)
    285       REQUIRES_SHARED(Locks::mutator_lock_) {
    286     ProcessMarkStack(false);
    287   }
    288 
    289   // Recursively blackens objects on the mark stack.
    290   void ProcessMarkStack(bool paused)
    291       REQUIRES(Locks::heap_bitmap_lock_)
    292       REQUIRES(!mark_stack_lock_)
    293       REQUIRES_SHARED(Locks::mutator_lock_);
    294 
    295   void ProcessMarkStackParallel(size_t thread_count)
    296       REQUIRES(Locks::heap_bitmap_lock_)
    297       REQUIRES(!mark_stack_lock_)
    298       REQUIRES_SHARED(Locks::mutator_lock_);
    299 
    300   // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
    301   // IsExclusiveHeld.
    302   void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    303 
    304   // Revoke all the thread-local buffers.
    305   void RevokeAllThreadLocalBuffers();
    306 
    307   // Whether or not we count how many of each type of object were scanned.
    308   static constexpr bool kCountScannedTypes = false;
    309 
    310   // Current space, we check this space first to avoid searching for the appropriate space for an
    311   // object.
    312   accounting::ContinuousSpaceBitmap* current_space_bitmap_;
    313   // Cache the heap's mark bitmap to prevent having to do 2 loads during slow path marking.
    314   accounting::HeapBitmap* mark_bitmap_;
    315 
    316   accounting::ObjectStack* mark_stack_;
    317 
    318   // Every object inside the immune spaces is assumed to be marked. Immune spaces that aren't in the
    319   // immune region are handled by the normal marking logic.
    320   ImmuneSpaces immune_spaces_;
    321 
    322   // Parallel finger.
    323   AtomicInteger atomic_finger_;
    324 
    325   AtomicInteger no_reference_class_count_;
    326   AtomicInteger normal_count_;
    327   // Number of classes scanned, if kCountScannedTypes.
    328   AtomicInteger class_count_;
    329   // Number of object arrays scanned, if kCountScannedTypes.
    330   AtomicInteger object_array_count_;
    331   // Number of non-class/arrays scanned, if kCountScannedTypes.
    332   AtomicInteger other_count_;
    333   // Number of java.lang.ref.Reference instances.
    334   AtomicInteger reference_count_;
    335 
    336   AtomicInteger large_object_test_;
    337   AtomicInteger large_object_mark_;
    338   AtomicInteger overhead_time_;
    339   AtomicInteger work_chunks_created_;
    340   AtomicInteger work_chunks_deleted_;
    341   AtomicInteger mark_null_count_;
    342   AtomicInteger mark_immune_count_;
    343   AtomicInteger mark_fastpath_count_;
    344   AtomicInteger mark_slowpath_count_;
    345 
    346   std::unique_ptr<Barrier> gc_barrier_;
    347   Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
    348 
    349   const bool is_concurrent_;
    350 
    351   // Verification.
    352   size_t live_stack_freeze_size_;
    353 
    354   std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
    355 
    356  private:
    357   class CardScanTask;
    358   class CheckpointMarkThreadRoots;
    359   class DelayReferenceReferentVisitor;
    360   template<bool kUseFinger> class MarkStackTask;
    361   class MarkObjectSlowPath;
    362   class RecursiveMarkTask;
    363   class ScanObjectParallelVisitor;
    364   class ScanObjectVisitor;
    365   class VerifyRootMarkedVisitor;
    366   class VerifyRootVisitor;
    367   class VerifySystemWeakVisitor;
    368 
    369   DISALLOW_IMPLICIT_CONSTRUCTORS(MarkSweep);
    370 };
    371 
    372 }  // namespace collector
    373 }  // namespace gc
    374 }  // namespace art
    375 
    376 #endif  // ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
    377