Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
     18 #define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
     19 
     20 #include <memory>
     21 
     22 #include "atomic.h"
     23 #include "barrier.h"
     24 #include "base/macros.h"
     25 #include "base/mutex.h"
     26 #include "garbage_collector.h"
     27 #include "gc/accounting/heap_bitmap.h"
     28 #include "immune_region.h"
     29 #include "object_callbacks.h"
     30 #include "offsets.h"
     31 
     32 namespace art {
     33 
     34 namespace mirror {
     35   class Class;
     36   class Object;
     37   class Reference;
     38 }  // namespace mirror
     39 
     40 class Thread;
     41 enum VisitRootFlags : uint8_t;
     42 
     43 namespace gc {
     44 
     45 class Heap;
     46 
     47 namespace accounting {
     48   template<typename T> class AtomicStack;
     49   typedef AtomicStack<mirror::Object*> ObjectStack;
     50 }  // namespace accounting
     51 
     52 namespace collector {
     53 
     54 class MarkSweep : public GarbageCollector {
     55  public:
     56   explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
     57 
     58   ~MarkSweep() {}
     59 
     60   virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
     61   void InitializePhase();
     62   void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     63   void PausePhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
     64   void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     65   void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     66   virtual void MarkReachableObjects()
     67       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
     68       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
     69 
     70   bool IsConcurrent() const {
     71     return is_concurrent_;
     72   }
     73 
     74   virtual GcType GetGcType() const OVERRIDE {
     75     return kGcTypeFull;
     76   }
     77 
     78   virtual CollectorType GetCollectorType() const OVERRIDE {
     79     return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
     80   }
     81 
     82   // Initializes internal structures.
     83   void Init();
     84 
     85   // Find the default mark bitmap.
     86   void FindDefaultSpaceBitmap();
     87 
     88   // Marks all objects in the root set at the start of a garbage collection.
     89   void MarkRoots(Thread* self)
     90       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
     91       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     92 
     93   void MarkNonThreadRoots()
     94       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
     95       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     96 
     97   void MarkConcurrentRoots(VisitRootFlags flags)
     98       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
     99       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    100 
    101   void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
    102       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    103       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    104 
    105   // Builds a mark stack and recursively mark until it empties.
    106   void RecursiveMark()
    107       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    108       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    109 
    110   // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
    111   // the image. Mark that portion of the heap as immune.
    112   virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    113 
    114   // Builds a mark stack with objects on dirty cards and recursively mark until it empties.
    115   void RecursiveMarkDirtyObjects(bool paused, byte minimum_age)
    116       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    117       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    118 
    119   // Remarks the root set after completing the concurrent mark.
    120   void ReMarkRoots()
    121       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    122       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    123 
    124   void ProcessReferences(Thread* self)
    125       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    126 
    127   // Update and mark references from immune spaces.
    128   void UpdateAndMarkModUnion()
    129       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    130 
    131   // Pre clean cards to reduce how much work is needed in the pause.
    132   void PreCleanCards()
    133       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    134       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    135 
    136   // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
    137   // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
    138   virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    139 
    140   // Sweeps unmarked objects to complete the garbage collection.
    141   void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    142 
    143   // Sweep only pointers within an array. WARNING: Trashes objects.
    144   void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
    145       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    146       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    147 
    148   // Blackens an object.
    149   void ScanObject(mirror::Object* obj)
    150       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    151       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    152 
    153   // No thread safety analysis due to lambdas.
    154   template<typename MarkVisitor, typename ReferenceVisitor>
    155   void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
    156                        const ReferenceVisitor& ref_visitor)
    157     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    158     EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    159 
    160   void SweepSystemWeaks(Thread* self)
    161       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
    162 
    163   static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
    164       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    165 
    166   void VerifySystemWeaks()
    167       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
    168 
    169   // Verify that an object is live, either in a live bitmap or in the allocation stack.
    170   void VerifyIsLive(const mirror::Object* obj)
    171       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    172 
    173   static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg)
    174       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    175       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    176 
    177   static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
    178       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    179       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    180 
    181   static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
    182       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    183       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    184 
    185   static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t thread_id,
    186                                RootType root_type)
    187       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    188       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    189 
    190   static void VerifyRootMarked(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
    191                                RootType /*root_type*/)
    192       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    193       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    194 
    195   static void ProcessMarkStackCallback(void* arg)
    196       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    197       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    198 
    199   static void MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t thread_id,
    200                                        RootType root_type)
    201       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    202 
    203   // Marks an object.
    204   void MarkObject(mirror::Object* obj)
    205       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    206       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    207 
    208   Barrier& GetBarrier() {
    209     return *gc_barrier_;
    210   }
    211 
    212   // Schedules an unmarked object for reference processing.
    213   void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
    214       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
    215 
    216  protected:
    217   // Returns true if the object has its bit set in the mark bitmap.
    218   bool IsMarked(const mirror::Object* object) const
    219       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    220 
    221   static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
    222       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    223 
    224   static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
    225       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
    226 
    227   void MarkObjectNonNull(mirror::Object* obj)
    228         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    229         EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
    230 
    231   // Marks an object atomically, safe to use from multiple threads.
    232   void MarkObjectNonNullParallel(mirror::Object* obj);
    233 
    234   // Returns true if we need to add obj to a mark stack.
    235   bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
    236 
    237   // Verify the roots of the heap and print out information related to any invalid roots.
    238   // Called in MarkObject, so may we may not hold the mutator lock.
    239   void VerifyRoots()
    240       NO_THREAD_SAFETY_ANALYSIS;
    241 
    242   // Expand mark stack to 2x its current size.
    243   void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_);
    244   void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_);
    245 
    246   // Returns how many threads we should use for the current GC phase based on if we are paused,
    247   // whether or not we care about pauses.
    248   size_t GetThreadCount(bool paused) const;
    249 
    250   static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
    251                                  const StackVisitor *visitor, RootType root_type);
    252 
    253   void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor,
    254                   RootType root_type) NO_THREAD_SAFETY_ANALYSIS;
    255 
    256   // Push a single reference on a mark stack.
    257   void PushOnMarkStack(mirror::Object* obj);
    258 
    259   // Blackens objects grayed during a garbage collection.
    260   void ScanGrayObjects(bool paused, byte minimum_age)
    261       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    262       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    263 
    264   // Recursively blackens objects on the mark stack.
    265   void ProcessMarkStack(bool paused)
    266       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    267       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    268 
    269   void ProcessMarkStackParallel(size_t thread_count)
    270       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
    271       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    272 
    273   // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
    274   // IsExclusiveHeld.
    275   void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    276 
    277   // Revoke all the thread-local buffers.
    278   void RevokeAllThreadLocalBuffers();
    279 
    280   // Whether or not we count how many of each type of object were scanned.
    281   static const bool kCountScannedTypes = false;
    282 
    283   // Current space, we check this space first to avoid searching for the appropriate space for an
    284   // object.
    285   accounting::ContinuousSpaceBitmap* current_space_bitmap_;
    286   // Cache the heap's mark bitmap to prevent having to do 2 loads during slow path marking.
    287   accounting::HeapBitmap* mark_bitmap_;
    288 
    289   accounting::ObjectStack* mark_stack_;
    290 
    291   // Immune region, every object inside the immune range is assumed to be marked.
    292   ImmuneRegion immune_region_;
    293 
    294   // Parallel finger.
    295   AtomicInteger atomic_finger_;
    296   // Number of classes scanned, if kCountScannedTypes.
    297   AtomicInteger class_count_;
    298   // Number of arrays scanned, if kCountScannedTypes.
    299   AtomicInteger array_count_;
    300   // Number of non-class/arrays scanned, if kCountScannedTypes.
    301   AtomicInteger other_count_;
    302   AtomicInteger large_object_test_;
    303   AtomicInteger large_object_mark_;
    304   AtomicInteger overhead_time_;
    305   AtomicInteger work_chunks_created_;
    306   AtomicInteger work_chunks_deleted_;
    307   AtomicInteger reference_count_;
    308   AtomicInteger mark_null_count_;
    309   AtomicInteger mark_immune_count_;
    310   AtomicInteger mark_fastpath_count_;
    311   AtomicInteger mark_slowpath_count_;
    312 
    313   std::unique_ptr<Barrier> gc_barrier_;
    314   Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
    315 
    316   const bool is_concurrent_;
    317 
    318   // Verification.
    319   size_t live_stack_freeze_size_;
    320 
    321   std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
    322 
    323  private:
    324   friend class AddIfReachesAllocSpaceVisitor;  // Used by mod-union table.
    325   friend class CardScanTask;
    326   friend class CheckBitmapVisitor;
    327   friend class CheckReferenceVisitor;
    328   friend class art::gc::Heap;
    329   friend class MarkObjectVisitor;
    330   friend class ModUnionCheckReferences;
    331   friend class ModUnionClearCardVisitor;
    332   friend class ModUnionReferenceVisitor;
    333   friend class ModUnionVisitor;
    334   friend class ModUnionTableBitmap;
    335   friend class ModUnionTableReferenceCache;
    336   friend class ModUnionScanImageRootVisitor;
    337   template<bool kUseFinger> friend class MarkStackTask;
    338   friend class FifoMarkStackChunk;
    339   friend class MarkSweepMarkObjectSlowPath;
    340 
    341   DISALLOW_COPY_AND_ASSIGN(MarkSweep);
    342 };
    343 
    344 }  // namespace collector
    345 }  // namespace gc
    346 }  // namespace art
    347 
    348 #endif  // ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
    349