Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
     18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
     19 
     20 #include "barrier.h"
     21 #include "garbage_collector.h"
     22 #include "immune_spaces.h"
     23 #include "jni.h"
     24 #include "object_callbacks.h"
     25 #include "offsets.h"
     26 #include "gc/accounting/atomic_stack.h"
     27 #include "gc/accounting/read_barrier_table.h"
     28 #include "gc/accounting/space_bitmap.h"
     29 #include "mirror/object.h"
     30 #include "mirror/object_reference.h"
     31 #include "safe_map.h"
     32 
     33 #include <unordered_map>
     34 #include <vector>
     35 
     36 namespace art {
     37 class RootInfo;
     38 
     39 namespace gc {
     40 
     41 namespace accounting {
     42   typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
     43   class HeapBitmap;
     44 }  // namespace accounting
     45 
     46 namespace space {
     47   class RegionSpace;
     48 }  // namespace space
     49 
     50 namespace collector {
     51 
     52 class ConcurrentCopying : public GarbageCollector {
     53  public:
     54   // TODO: disable thse flags for production use.
     55   // Enable the no-from-space-refs verification at the pause.
     56   static constexpr bool kEnableNoFromSpaceRefsVerification = true;
     57   // Enable the from-space bytes/objects check.
     58   static constexpr bool kEnableFromSpaceAccountingCheck = true;
     59   // Enable verbose mode.
     60   static constexpr bool kVerboseMode = false;
     61 
     62   ConcurrentCopying(Heap* heap, const std::string& name_prefix = "");
     63   ~ConcurrentCopying();
     64 
     65   virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
     66   void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
     67   void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_)
     68       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
     69   void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
     70   void FinishPhase() REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
     71 
     72   void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
     73       REQUIRES(!Locks::heap_bitmap_lock_);
     74   virtual GcType GetGcType() const OVERRIDE {
     75     return kGcTypePartial;
     76   }
     77   virtual CollectorType GetCollectorType() const OVERRIDE {
     78     return kCollectorTypeCC;
     79   }
     80   virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
     81   void SetRegionSpace(space::RegionSpace* region_space) {
     82     DCHECK(region_space != nullptr);
     83     region_space_ = region_space;
     84   }
     85   space::RegionSpace* RegionSpace() {
     86     return region_space_;
     87   }
     88   void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
     89       SHARED_REQUIRES(Locks::mutator_lock_);
     90   void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
     91       SHARED_REQUIRES(Locks::mutator_lock_);
     92   bool IsInToSpace(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
     93     DCHECK(ref != nullptr);
     94     return IsMarked(ref) == ref;
     95   }
     96   ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
     97       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
     98   bool IsMarking() const {
     99     return is_marking_;
    100   }
    101   bool IsActive() const {
    102     return is_active_;
    103   }
    104   Barrier& GetBarrier() {
    105     return *gc_barrier_;
    106   }
    107   bool IsWeakRefAccessEnabled() {
    108     return weak_ref_access_enabled_.LoadRelaxed();
    109   }
    110   void RevokeThreadLocalMarkStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
    111       REQUIRES(!mark_stack_lock_);
    112 
    113  private:
    114   void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
    115       REQUIRES(!mark_stack_lock_);
    116   mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
    117       REQUIRES(!skipped_blocks_lock_, !mark_stack_lock_);
    118   void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
    119       REQUIRES(!mark_stack_lock_);
    120   void Process(mirror::Object* obj, MemberOffset offset)
    121       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_);
    122   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
    123       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
    124       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    125   void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
    126       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    127   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
    128                           const RootInfo& info)
    129       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
    130       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    131   void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
    132   accounting::ObjectStack* GetAllocationStack();
    133   accounting::ObjectStack* GetLiveStack();
    134   virtual void ProcessMarkStack() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
    135       REQUIRES(!mark_stack_lock_);
    136   bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    137   void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
    138       REQUIRES(!mark_stack_lock_);
    139   size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
    140       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    141   void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)
    142       SHARED_REQUIRES(Locks::mutator_lock_);
    143   void SwitchToSharedMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_)
    144       REQUIRES(!mark_stack_lock_);
    145   void SwitchToGcExclusiveMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_);
    146   virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
    147       SHARED_REQUIRES(Locks::mutator_lock_);
    148   void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
    149   virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
    150       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    151   virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
    152       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    153   virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
    154       SHARED_REQUIRES(Locks::mutator_lock_);
    155   virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
    156       SHARED_REQUIRES(Locks::mutator_lock_);
    157   void SweepSystemWeaks(Thread* self)
    158       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
    159   void Sweep(bool swap_bitmaps)
    160       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
    161   void SweepLargeObjects(bool swap_bitmaps)
    162       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
    163   void ClearBlackPtrs()
    164       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
    165   void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
    166       SHARED_REQUIRES(Locks::mutator_lock_);
    167   mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
    168       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!skipped_blocks_lock_);
    169   void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
    170   void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
    171   bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_);
    172   mirror::Object* GetFwdPtr(mirror::Object* from_ref)
    173       SHARED_REQUIRES(Locks::mutator_lock_);
    174   void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
    175   void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_);
    176   void RecordLiveStackFreezeSize(Thread* self);
    177   void ComputeUnevacFromSpaceLiveRatio();
    178   void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
    179       SHARED_REQUIRES(Locks::mutator_lock_);
    180   void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
    181       SHARED_REQUIRES(Locks::mutator_lock_);
    182   void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
    183   void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
    184   void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
    185   void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
    186   mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
    187       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
    188 
    189   space::RegionSpace* region_space_;      // The underlying region space.
    190   std::unique_ptr<Barrier> gc_barrier_;
    191   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
    192   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    193   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
    194       GUARDED_BY(mark_stack_lock_);
    195   static constexpr size_t kMarkStackSize = kPageSize;
    196   static constexpr size_t kMarkStackPoolSize = 256;
    197   std::vector<accounting::ObjectStack*> pooled_mark_stacks_
    198       GUARDED_BY(mark_stack_lock_);
    199   Thread* thread_running_gc_;
    200   bool is_marking_;                       // True while marking is ongoing.
    201   bool is_active_;                        // True while the collection is ongoing.
    202   bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
    203   ImmuneSpaces immune_spaces_;
    204   std::unique_ptr<accounting::HeapBitmap> cc_heap_bitmap_;
    205   std::vector<accounting::SpaceBitmap<kObjectAlignment>*> cc_bitmaps_;
    206   accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
    207   // A cache of Heap::GetMarkBitmap().
    208   accounting::HeapBitmap* heap_mark_bitmap_;
    209   size_t live_stack_freeze_size_;
    210   size_t from_space_num_objects_at_first_pause_;
    211   size_t from_space_num_bytes_at_first_pause_;
    212   Atomic<int> is_mark_stack_push_disallowed_;
    213   enum MarkStackMode {
    214     kMarkStackModeOff = 0,      // Mark stack is off.
    215     kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
    216                                 // thread-local mark stacks. The GC-running thread pushes onto and
    217                                 // pops off the GC mark stack without a lock.
    218     kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
    219     kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
    220                                 // without a lock. Other threads won't access the mark stack.
    221   };
    222   Atomic<MarkStackMode> mark_stack_mode_;
    223   Atomic<bool> weak_ref_access_enabled_;
    224 
    225   // How many objects and bytes we moved. Used for accounting.
    226   Atomic<size_t> bytes_moved_;
    227   Atomic<size_t> objects_moved_;
    228 
    229   // The skipped blocks are memory blocks/chucks that were copies of
    230   // objects that were unused due to lost races (cas failures) at
    231   // object copy/forward pointer install. They are reused.
    232   Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    233   std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
    234   Atomic<size_t> to_space_bytes_skipped_;
    235   Atomic<size_t> to_space_objects_skipped_;
    236 
    237   accounting::ReadBarrierTable* rb_table_;
    238   bool force_evacuate_all_;  // True if all regions are evacuated.
    239 
    240   class AssertToSpaceInvariantFieldVisitor;
    241   class AssertToSpaceInvariantObjectVisitor;
    242   class AssertToSpaceInvariantRefsVisitor;
    243   class ClearBlackPtrsVisitor;
    244   class ComputeUnevacFromSpaceLiveRatioVisitor;
    245   class DisableMarkingCheckpoint;
    246   class FlipCallback;
    247   class ImmuneSpaceObjVisitor;
    248   class LostCopyVisitor;
    249   class RefFieldsVisitor;
    250   class RevokeThreadLocalMarkStackCheckpoint;
    251   class VerifyNoFromSpaceRefsFieldVisitor;
    252   class VerifyNoFromSpaceRefsObjectVisitor;
    253   class VerifyNoFromSpaceRefsVisitor;
    254   class ThreadFlipVisitor;
    255 
    256   DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
    257 };
    258 
    259 }  // namespace collector
    260 }  // namespace gc
    261 }  // namespace art
    262 
    263 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
    264