Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "concurrent_copying.h"
     18 
     19 #include "art_field-inl.h"
     20 #include "base/enums.h"
     21 #include "base/file_utils.h"
     22 #include "base/histogram-inl.h"
     23 #include "base/quasi_atomic.h"
     24 #include "base/stl_util.h"
     25 #include "base/systrace.h"
     26 #include "debugger.h"
     27 #include "gc/accounting/atomic_stack.h"
     28 #include "gc/accounting/heap_bitmap-inl.h"
     29 #include "gc/accounting/mod_union_table-inl.h"
     30 #include "gc/accounting/read_barrier_table.h"
     31 #include "gc/accounting/space_bitmap-inl.h"
     32 #include "gc/gc_pause_listener.h"
     33 #include "gc/reference_processor.h"
     34 #include "gc/space/image_space.h"
     35 #include "gc/space/space-inl.h"
     36 #include "gc/verification.h"
     37 #include "image-inl.h"
     38 #include "intern_table.h"
     39 #include "mirror/class-inl.h"
     40 #include "mirror/object-inl.h"
     41 #include "mirror/object-refvisitor-inl.h"
     42 #include "scoped_thread_state_change-inl.h"
     43 #include "thread-inl.h"
     44 #include "thread_list.h"
     45 #include "well_known_classes.h"
     46 
     47 namespace art {
     48 namespace gc {
     49 namespace collector {
     50 
     51 static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
     52 // If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
     53 // union table. Disabled since it does not seem to help the pause much.
     54 static constexpr bool kFilterModUnionCards = kIsDebugBuild;
     55 // If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during
     56 // ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
     57 // Only enabled for kIsDebugBuild to avoid performance hit.
     58 static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
     59 // Slow path mark stack size, increase this if the stack is getting full and it is causing
     60 // performance problems.
     61 static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
     62 // Verify that there are no missing card marks.
     63 static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild;
     64 
     65 ConcurrentCopying::ConcurrentCopying(Heap* heap,
     66                                      const std::string& name_prefix,
     67                                      bool measure_read_barrier_slow_path)
     68     : GarbageCollector(heap,
     69                        name_prefix + (name_prefix.empty() ? "" : " ") +
     70                        "concurrent copying"),
     71       region_space_(nullptr), gc_barrier_(new Barrier(0)),
     72       gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
     73                                                      kDefaultGcMarkStackSize,
     74                                                      kDefaultGcMarkStackSize)),
     75       rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
     76                                                          kReadBarrierMarkStackSize,
     77                                                          kReadBarrierMarkStackSize)),
     78       rb_mark_bit_stack_full_(false),
     79       mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
     80       thread_running_gc_(nullptr),
     81       is_marking_(false),
     82       is_using_read_barrier_entrypoints_(false),
     83       is_active_(false),
     84       is_asserting_to_space_invariant_(false),
     85       region_space_bitmap_(nullptr),
     86       heap_mark_bitmap_(nullptr),
     87       live_stack_freeze_size_(0),
     88       from_space_num_objects_at_first_pause_(0),
     89       from_space_num_bytes_at_first_pause_(0),
     90       mark_stack_mode_(kMarkStackModeOff),
     91       weak_ref_access_enabled_(true),
     92       skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
     93       measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
     94       mark_from_read_barrier_measurements_(false),
     95       rb_slow_path_ns_(0),
     96       rb_slow_path_count_(0),
     97       rb_slow_path_count_gc_(0),
     98       rb_slow_path_histogram_lock_("Read barrier histogram lock"),
     99       rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
    100       rb_slow_path_count_total_(0),
    101       rb_slow_path_count_gc_total_(0),
    102       rb_table_(heap_->GetReadBarrierTable()),
    103       force_evacuate_all_(false),
    104       gc_grays_immune_objects_(false),
    105       immune_gray_stack_lock_("concurrent copying immune gray stack lock",
    106                               kMarkSweepMarkStackLock) {
    107   static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
    108                 "The region space size and the read barrier table region size must match");
    109   Thread* self = Thread::Current();
    110   {
    111     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
    112     // Cache this so that we won't have to lock heap_bitmap_lock_ in
    113     // Mark() which could cause a nested lock on heap_bitmap_lock_
    114     // when GC causes a RB while doing GC or a lock order violation
    115     // (class_linker_lock_ and heap_bitmap_lock_).
    116     heap_mark_bitmap_ = heap->GetMarkBitmap();
    117   }
    118   {
    119     MutexLock mu(self, mark_stack_lock_);
    120     for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
    121       accounting::AtomicStack<mirror::Object>* mark_stack =
    122           accounting::AtomicStack<mirror::Object>::Create(
    123               "thread local mark stack", kMarkStackSize, kMarkStackSize);
    124       pooled_mark_stacks_.push_back(mark_stack);
    125     }
    126   }
    127 }
    128 
    129 void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* field,
    130                                           bool do_atomic_update) {
    131   if (UNLIKELY(do_atomic_update)) {
    132     // Used to mark the referent in DelayReferenceReferent in transaction mode.
    133     mirror::Object* from_ref = field->AsMirrorPtr();
    134     if (from_ref == nullptr) {
    135       return;
    136     }
    137     mirror::Object* to_ref = Mark(from_ref);
    138     if (from_ref != to_ref) {
    139       do {
    140         if (field->AsMirrorPtr() != from_ref) {
    141           // Concurrently overwritten by a mutator.
    142           break;
    143         }
    144       } while (!field->CasWeakRelaxed(from_ref, to_ref));
    145     }
    146   } else {
    147     // Used for preserving soft references, should be OK to not have a CAS here since there should be
    148     // no other threads which can trigger read barriers on the same referent during reference
    149     // processing.
    150     field->Assign(Mark(field->AsMirrorPtr()));
    151   }
    152 }
    153 
    154 ConcurrentCopying::~ConcurrentCopying() {
    155   STLDeleteElements(&pooled_mark_stacks_);
    156 }
    157 
    158 void ConcurrentCopying::RunPhases() {
    159   CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
    160   CHECK(!is_active_);
    161   is_active_ = true;
    162   Thread* self = Thread::Current();
    163   thread_running_gc_ = self;
    164   Locks::mutator_lock_->AssertNotHeld(self);
    165   {
    166     ReaderMutexLock mu(self, *Locks::mutator_lock_);
    167     InitializePhase();
    168   }
    169   if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
    170     // Switch to read barrier mark entrypoints before we gray the objects. This is required in case
    171     // a mutator sees a gray bit and dispatches on the entrypoint. (b/37876887).
    172     ActivateReadBarrierEntrypoints();
    173     // Gray dirty immune objects concurrently to reduce GC pause times. We re-process gray cards in
    174     // the pause.
    175     ReaderMutexLock mu(self, *Locks::mutator_lock_);
    176     GrayAllDirtyImmuneObjects();
    177   }
    178   FlipThreadRoots();
    179   {
    180     ReaderMutexLock mu(self, *Locks::mutator_lock_);
    181     MarkingPhase();
    182   }
    183   // Verify no from space refs. This causes a pause.
    184   if (kEnableNoFromSpaceRefsVerification) {
    185     TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
    186     ScopedPause pause(this, false);
    187     CheckEmptyMarkStack();
    188     if (kVerboseMode) {
    189       LOG(INFO) << "Verifying no from-space refs";
    190     }
    191     VerifyNoFromSpaceReferences();
    192     if (kVerboseMode) {
    193       LOG(INFO) << "Done verifying no from-space refs";
    194     }
    195     CheckEmptyMarkStack();
    196   }
    197   {
    198     ReaderMutexLock mu(self, *Locks::mutator_lock_);
    199     ReclaimPhase();
    200   }
    201   FinishPhase();
    202   CHECK(is_active_);
    203   is_active_ = false;
    204   thread_running_gc_ = nullptr;
    205 }
    206 
    207 class ConcurrentCopying::ActivateReadBarrierEntrypointsCheckpoint : public Closure {
    208  public:
    209   explicit ActivateReadBarrierEntrypointsCheckpoint(ConcurrentCopying* concurrent_copying)
    210       : concurrent_copying_(concurrent_copying) {}
    211 
    212   void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
    213     // Note: self is not necessarily equal to thread since thread may be suspended.
    214     Thread* self = Thread::Current();
    215     DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
    216         << thread->GetState() << " thread " << thread << " self " << self;
    217     // Switch to the read barrier entrypoints.
    218     thread->SetReadBarrierEntrypoints();
    219     // If thread is a running mutator, then act on behalf of the garbage collector.
    220     // See the code in ThreadList::RunCheckpoint.
    221     concurrent_copying_->GetBarrier().Pass(self);
    222   }
    223 
    224  private:
    225   ConcurrentCopying* const concurrent_copying_;
    226 };
    227 
    228 class ConcurrentCopying::ActivateReadBarrierEntrypointsCallback : public Closure {
    229  public:
    230   explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
    231       : concurrent_copying_(concurrent_copying) {}
    232 
    233   void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
    234     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
    235     // to avoid a race with ThreadList::Register().
    236     CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
    237     concurrent_copying_->is_using_read_barrier_entrypoints_ = true;
    238   }
    239 
    240  private:
    241   ConcurrentCopying* const concurrent_copying_;
    242 };
    243 
    244 void ConcurrentCopying::ActivateReadBarrierEntrypoints() {
    245   Thread* const self = Thread::Current();
    246   ActivateReadBarrierEntrypointsCheckpoint checkpoint(this);
    247   ThreadList* thread_list = Runtime::Current()->GetThreadList();
    248   gc_barrier_->Init(self, 0);
    249   ActivateReadBarrierEntrypointsCallback callback(this);
    250   const size_t barrier_count = thread_list->RunCheckpoint(&checkpoint, &callback);
    251   // If there are no threads to wait which implies that all the checkpoint functions are finished,
    252   // then no need to release the mutator lock.
    253   if (barrier_count == 0) {
    254     return;
    255   }
    256   ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
    257   gc_barrier_->Increment(self, barrier_count);
    258 }
    259 
    260 void ConcurrentCopying::BindBitmaps() {
    261   Thread* self = Thread::Current();
    262   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    263   // Mark all of the spaces we never collect as immune.
    264   for (const auto& space : heap_->GetContinuousSpaces()) {
    265     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
    266         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
    267       CHECK(space->IsZygoteSpace() || space->IsImageSpace());
    268       immune_spaces_.AddSpace(space);
    269     } else if (space == region_space_) {
    270       // It is OK to clear the bitmap with mutators running since the only place it is read is
    271       // VisitObjects which has exclusion with CC.
    272       region_space_bitmap_ = region_space_->GetMarkBitmap();
    273       region_space_bitmap_->Clear();
    274     }
    275   }
    276 }
    277 
    278 void ConcurrentCopying::InitializePhase() {
    279   TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
    280   if (kVerboseMode) {
    281     LOG(INFO) << "GC InitializePhase";
    282     LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
    283               << reinterpret_cast<void*>(region_space_->Limit());
    284   }
    285   CheckEmptyMarkStack();
    286   if (kIsDebugBuild) {
    287     MutexLock mu(Thread::Current(), mark_stack_lock_);
    288     CHECK(false_gray_stack_.empty());
    289   }
    290 
    291   rb_mark_bit_stack_full_ = false;
    292   mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
    293   if (measure_read_barrier_slow_path_) {
    294     rb_slow_path_ns_.StoreRelaxed(0);
    295     rb_slow_path_count_.StoreRelaxed(0);
    296     rb_slow_path_count_gc_.StoreRelaxed(0);
    297   }
    298 
    299   immune_spaces_.Reset();
    300   bytes_moved_.StoreRelaxed(0);
    301   objects_moved_.StoreRelaxed(0);
    302   GcCause gc_cause = GetCurrentIteration()->GetGcCause();
    303   if (gc_cause == kGcCauseExplicit ||
    304       gc_cause == kGcCauseCollectorTransition ||
    305       GetCurrentIteration()->GetClearSoftReferences()) {
    306     force_evacuate_all_ = true;
    307   } else {
    308     force_evacuate_all_ = false;
    309   }
    310   if (kUseBakerReadBarrier) {
    311     updated_all_immune_objects_.StoreRelaxed(false);
    312     // GC may gray immune objects in the thread flip.
    313     gc_grays_immune_objects_ = true;
    314     if (kIsDebugBuild) {
    315       MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
    316       DCHECK(immune_gray_stack_.empty());
    317     }
    318   }
    319   BindBitmaps();
    320   if (kVerboseMode) {
    321     LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
    322     LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
    323               << "-" << immune_spaces_.GetLargestImmuneRegion().End();
    324     for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
    325       LOG(INFO) << "Immune space: " << *space;
    326     }
    327     LOG(INFO) << "GC end of InitializePhase";
    328   }
    329   // Mark all of the zygote large objects without graying them.
    330   MarkZygoteLargeObjects();
    331 }
    332 
    333 // Used to switch the thread roots of a thread from from-space refs to to-space refs.
    334 class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
    335  public:
    336   ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
    337       : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
    338   }
    339 
    340   virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
    341     // Note: self is not necessarily equal to thread since thread may be suspended.
    342     Thread* self = Thread::Current();
    343     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
    344         << thread->GetState() << " thread " << thread << " self " << self;
    345     thread->SetIsGcMarkingAndUpdateEntrypoints(true);
    346     if (use_tlab_ && thread->HasTlab()) {
    347       if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
    348         // This must come before the revoke.
    349         size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
    350         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
    351         reinterpret_cast<Atomic<size_t>*>(
    352             &concurrent_copying_->from_space_num_objects_at_first_pause_)->
    353                 FetchAndAddSequentiallyConsistent(thread_local_objects);
    354       } else {
    355         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
    356       }
    357     }
    358     if (kUseThreadLocalAllocationStack) {
    359       thread->RevokeThreadLocalAllocationStack();
    360     }
    361     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
    362     // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
    363     // only.
    364     thread->VisitRoots(this, kVisitRootFlagAllRoots);
    365     concurrent_copying_->GetBarrier().Pass(self);
    366   }
    367 
    368   void VisitRoots(mirror::Object*** roots,
    369                   size_t count,
    370                   const RootInfo& info ATTRIBUTE_UNUSED)
    371       REQUIRES_SHARED(Locks::mutator_lock_) {
    372     for (size_t i = 0; i < count; ++i) {
    373       mirror::Object** root = roots[i];
    374       mirror::Object* ref = *root;
    375       if (ref != nullptr) {
    376         mirror::Object* to_ref = concurrent_copying_->Mark(ref);
    377         if (to_ref != ref) {
    378           *root = to_ref;
    379         }
    380       }
    381     }
    382   }
    383 
    384   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
    385                   size_t count,
    386                   const RootInfo& info ATTRIBUTE_UNUSED)
    387       REQUIRES_SHARED(Locks::mutator_lock_) {
    388     for (size_t i = 0; i < count; ++i) {
    389       mirror::CompressedReference<mirror::Object>* const root = roots[i];
    390       if (!root->IsNull()) {
    391         mirror::Object* ref = root->AsMirrorPtr();
    392         mirror::Object* to_ref = concurrent_copying_->Mark(ref);
    393         if (to_ref != ref) {
    394           root->Assign(to_ref);
    395         }
    396       }
    397     }
    398   }
    399 
    400  private:
    401   ConcurrentCopying* const concurrent_copying_;
    402   const bool use_tlab_;
    403 };
    404 
    405 // Called back from Runtime::FlipThreadRoots() during a pause.
    406 class ConcurrentCopying::FlipCallback : public Closure {
    407  public:
    408   explicit FlipCallback(ConcurrentCopying* concurrent_copying)
    409       : concurrent_copying_(concurrent_copying) {
    410   }
    411 
    412   virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
    413     ConcurrentCopying* cc = concurrent_copying_;
    414     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
    415     // Note: self is not necessarily equal to thread since thread may be suspended.
    416     Thread* self = Thread::Current();
    417     if (kVerifyNoMissingCardMarks) {
    418       cc->VerifyNoMissingCardMarks();
    419     }
    420     CHECK_EQ(thread, self);
    421     Locks::mutator_lock_->AssertExclusiveHeld(self);
    422     {
    423       TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
    424       cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
    425     }
    426     cc->SwapStacks();
    427     if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
    428       cc->RecordLiveStackFreezeSize(self);
    429       cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
    430       cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
    431     }
    432     cc->is_marking_ = true;
    433     cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
    434     if (kIsDebugBuild) {
    435       cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
    436     }
    437     if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
    438       CHECK(Runtime::Current()->IsAotCompiler());
    439       TimingLogger::ScopedTiming split3("(Paused)VisitTransactionRoots", cc->GetTimings());
    440       Runtime::Current()->VisitTransactionRoots(cc);
    441     }
    442     if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
    443       cc->GrayAllNewlyDirtyImmuneObjects();
    444       if (kIsDebugBuild) {
    445         // Check that all non-gray immune objects only reference immune objects.
    446         cc->VerifyGrayImmuneObjects();
    447       }
    448     }
    449     // May be null during runtime creation, in this case leave java_lang_Object null.
    450     // This is safe since single threaded behavior should mean FillDummyObject does not
    451     // happen when java_lang_Object_ is null.
    452     if (WellKnownClasses::java_lang_Object != nullptr) {
    453       cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(
    454           WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object).Ptr()));
    455     } else {
    456       cc->java_lang_Object_ = nullptr;
    457     }
    458   }
    459 
    460  private:
    461   ConcurrentCopying* const concurrent_copying_;
    462 };
    463 
    464 class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
    465  public:
    466   explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
    467       : collector_(collector) {}
    468 
    469   void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
    470       const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
    471       REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
    472     CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
    473                    obj, offset);
    474   }
    475 
    476   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
    477       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
    478     CHECK(klass->IsTypeOfReferenceClass());
    479     CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
    480                    ref,
    481                    mirror::Reference::ReferentOffset());
    482   }
    483 
    484   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
    485       ALWAYS_INLINE
    486       REQUIRES_SHARED(Locks::mutator_lock_) {
    487     if (!root->IsNull()) {
    488       VisitRoot(root);
    489     }
    490   }
    491 
    492   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
    493       ALWAYS_INLINE
    494       REQUIRES_SHARED(Locks::mutator_lock_) {
    495     CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
    496   }
    497 
    498  private:
    499   ConcurrentCopying* const collector_;
    500 
    501   void CheckReference(ObjPtr<mirror::Object> ref,
    502                       ObjPtr<mirror::Object> holder,
    503                       MemberOffset offset) const
    504       REQUIRES_SHARED(Locks::mutator_lock_) {
    505     if (ref != nullptr) {
    506       if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
    507         // Not immune, must be a zygote large object.
    508         CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
    509             Thread::Current(), ref.Ptr()))
    510             << "Non gray object references non immune, non zygote large object "<< ref << " "
    511             << mirror::Object::PrettyTypeOf(ref) << " in holder " << holder << " "
    512             << mirror::Object::PrettyTypeOf(holder) << " offset=" << offset.Uint32Value();
    513       } else {
    514         // Make sure the large object class is immune since we will never scan the large object.
    515         CHECK(collector_->immune_spaces_.ContainsObject(
    516             ref->GetClass<kVerifyNone, kWithoutReadBarrier>()));
    517       }
    518     }
    519   }
    520 };
    521 
    522 void ConcurrentCopying::VerifyGrayImmuneObjects() {
    523   TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
    524   for (auto& space : immune_spaces_.GetSpaces()) {
    525     DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
    526     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
    527     VerifyGrayImmuneObjectsVisitor visitor(this);
    528     live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
    529                                   reinterpret_cast<uintptr_t>(space->Limit()),
    530                                   [&visitor](mirror::Object* obj)
    531         REQUIRES_SHARED(Locks::mutator_lock_) {
    532       // If an object is not gray, it should only have references to things in the immune spaces.
    533       if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
    534         obj->VisitReferences</*kVisitNativeRoots*/true,
    535                              kDefaultVerifyFlags,
    536                              kWithoutReadBarrier>(visitor, visitor);
    537       }
    538     });
    539   }
    540 }
    541 
    542 class ConcurrentCopying::VerifyNoMissingCardMarkVisitor {
    543  public:
    544   VerifyNoMissingCardMarkVisitor(ConcurrentCopying* cc, ObjPtr<mirror::Object> holder)
    545     : cc_(cc),
    546       holder_(holder) {}
    547 
    548   void operator()(ObjPtr<mirror::Object> obj,
    549                   MemberOffset offset,
    550                   bool is_static ATTRIBUTE_UNUSED) const
    551       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
    552     if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
    553      CheckReference(obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(
    554          offset), offset.Uint32Value());
    555     }
    556   }
    557   void operator()(ObjPtr<mirror::Class> klass,
    558                   ObjPtr<mirror::Reference> ref) const
    559       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
    560     CHECK(klass->IsTypeOfReferenceClass());
    561     this->operator()(ref, mirror::Reference::ReferentOffset(), false);
    562   }
    563 
    564   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
    565       REQUIRES_SHARED(Locks::mutator_lock_) {
    566     if (!root->IsNull()) {
    567       VisitRoot(root);
    568     }
    569   }
    570 
    571   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
    572       REQUIRES_SHARED(Locks::mutator_lock_) {
    573     CheckReference(root->AsMirrorPtr());
    574   }
    575 
    576   void CheckReference(mirror::Object* ref, int32_t offset = -1) const
    577       REQUIRES_SHARED(Locks::mutator_lock_) {
    578     CHECK(ref == nullptr || !cc_->region_space_->IsInNewlyAllocatedRegion(ref))
    579         << holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object "
    580         << ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset;
    581   }
    582 
    583  private:
    584   ConcurrentCopying* const cc_;
    585   ObjPtr<mirror::Object> const holder_;
    586 };
    587 
    588 void ConcurrentCopying::VerifyNoMissingCardMarks() {
    589   auto visitor = [&](mirror::Object* obj)
    590       REQUIRES(Locks::mutator_lock_)
    591       REQUIRES(!mark_stack_lock_) {
    592     // Objects not on dirty or aged cards should never have references to newly allocated regions.
    593     if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
    594       VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
    595       obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
    596           internal_visitor, internal_visitor);
    597     }
    598   };
    599   TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
    600   region_space_->Walk(visitor);
    601   {
    602     ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
    603     heap_->GetLiveBitmap()->Visit(visitor);
    604   }
    605 }
    606 
    607 // Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
    608 void ConcurrentCopying::FlipThreadRoots() {
    609   TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
    610   if (kVerboseMode) {
    611     LOG(INFO) << "time=" << region_space_->Time();
    612     region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
    613   }
    614   Thread* self = Thread::Current();
    615   Locks::mutator_lock_->AssertNotHeld(self);
    616   gc_barrier_->Init(self, 0);
    617   ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
    618   FlipCallback flip_callback(this);
    619 
    620   size_t barrier_count = Runtime::Current()->GetThreadList()->FlipThreadRoots(
    621       &thread_flip_visitor, &flip_callback, this, GetHeap()->GetGcPauseListener());
    622 
    623   {
    624     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
    625     gc_barrier_->Increment(self, barrier_count);
    626   }
    627   is_asserting_to_space_invariant_ = true;
    628   QuasiAtomic::ThreadFenceForConstructor();
    629   if (kVerboseMode) {
    630     LOG(INFO) << "time=" << region_space_->Time();
    631     region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
    632     LOG(INFO) << "GC end of FlipThreadRoots";
    633   }
    634 }
    635 
    636 template <bool kConcurrent>
    637 class ConcurrentCopying::GrayImmuneObjectVisitor {
    638  public:
    639   explicit GrayImmuneObjectVisitor(Thread* self) : self_(self) {}
    640 
    641   ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
    642     if (kUseBakerReadBarrier && obj->GetReadBarrierState() == ReadBarrier::WhiteState()) {
    643       if (kConcurrent) {
    644         Locks::mutator_lock_->AssertSharedHeld(self_);
    645         obj->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
    646         // Mod union table VisitObjects may visit the same object multiple times so we can't check
    647         // the result of the atomic set.
    648       } else {
    649         Locks::mutator_lock_->AssertExclusiveHeld(self_);
    650         obj->SetReadBarrierState(ReadBarrier::GrayState());
    651       }
    652     }
    653   }
    654 
    655   static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
    656     reinterpret_cast<GrayImmuneObjectVisitor<kConcurrent>*>(arg)->operator()(obj);
    657   }
    658 
    659  private:
    660   Thread* const self_;
    661 };
    662 
    663 void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
    664   TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings());
    665   accounting::CardTable* const card_table = heap_->GetCardTable();
    666   Thread* const self = Thread::Current();
    667   using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>;
    668   VisitorType visitor(self);
    669   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    670   for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
    671     DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
    672     accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
    673     // Mark all the objects on dirty cards since these may point to objects in other space.
    674     // Once these are marked, the GC will eventually clear them later.
    675     // Table is non null for boot image and zygote spaces. It is only null for application image
    676     // spaces.
    677     if (table != nullptr) {
    678       table->ProcessCards();
    679       table->VisitObjects(&VisitorType::Callback, &visitor);
    680       // Don't clear cards here since we need to rescan in the pause. If we cleared the cards here,
    681       // there would be races with the mutator marking new cards.
    682     } else {
    683       // Keep cards aged if we don't have a mod-union table since we may need to scan them in future
    684       // GCs. This case is for app images.
    685       card_table->ModifyCardsAtomic(
    686           space->Begin(),
    687           space->End(),
    688           [](uint8_t card) {
    689             return (card != gc::accounting::CardTable::kCardClean)
    690                 ? gc::accounting::CardTable::kCardAged
    691                 : card;
    692           },
    693           /* card modified visitor */ VoidFunctor());
    694       card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
    695                                                space->Begin(),
    696                                                space->End(),
    697                                                visitor,
    698                                                gc::accounting::CardTable::kCardAged);
    699     }
    700   }
    701 }
    702 
    703 void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() {
    704   TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings());
    705   accounting::CardTable* const card_table = heap_->GetCardTable();
    706   using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>;
    707   Thread* const self = Thread::Current();
    708   VisitorType visitor(self);
    709   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
    710   for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
    711     DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
    712     accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
    713 
    714     // Don't need to scan aged cards since we did these before the pause. Note that scanning cards
    715     // also handles the mod-union table cards.
    716     card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(),
    717                                              space->Begin(),
    718                                              space->End(),
    719                                              visitor,
    720                                              gc::accounting::CardTable::kCardDirty);
    721     if (table != nullptr) {
    722       // Add the cards to the mod-union table so that we can clear cards to save RAM.
    723       table->ProcessCards();
    724       TimingLogger::ScopedTiming split2("(Paused)ClearCards", GetTimings());
    725       card_table->ClearCardRange(space->Begin(),
    726                                  AlignDown(space->End(), accounting::CardTable::kCardSize));
    727     }
    728   }
    729   // Since all of the objects that may point to other spaces are gray, we can avoid all the read
    730   // barriers in the immune spaces.
    731   updated_all_immune_objects_.StoreRelaxed(true);
    732 }
    733 
    734 void ConcurrentCopying::SwapStacks() {
    735   heap_->SwapStacks();
    736 }
    737 
    738 void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
    739   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    740   live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
    741 }
    742 
    743 // Used to visit objects in the immune spaces.
    744 inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
    745   DCHECK(obj != nullptr);
    746   DCHECK(immune_spaces_.ContainsObject(obj));
    747   // Update the fields without graying it or pushing it onto the mark stack.
    748   Scan(obj);
    749 }
    750 
    751 class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
    752  public:
    753   explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
    754       : collector_(cc) {}
    755 
    756   ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
    757     if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
    758       // Only need to scan gray objects.
    759       if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
    760         collector_->ScanImmuneObject(obj);
    761         // Done scanning the object, go back to white.
    762         bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
    763                                                       ReadBarrier::WhiteState());
    764         CHECK(success)
    765             << Runtime::Current()->GetHeap()->GetVerification()->DumpObjectInfo(obj, "failed CAS");
    766       }
    767     } else {
    768       collector_->ScanImmuneObject(obj);
    769     }
    770   }
    771 
    772   static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
    773     reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
    774   }
    775 
    776  private:
    777   ConcurrentCopying* const collector_;
    778 };
    779 
    780 // Concurrently mark roots that are guarded by read barriers and process the mark stack.
    781 void ConcurrentCopying::MarkingPhase() {
    782   TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
    783   if (kVerboseMode) {
    784     LOG(INFO) << "GC MarkingPhase";
    785   }
    786   Thread* self = Thread::Current();
    787   if (kIsDebugBuild) {
    788     MutexLock mu(self, *Locks::thread_list_lock_);
    789     CHECK(weak_ref_access_enabled_);
    790   }
    791 
    792   // Scan immune spaces.
    793   // Update all the fields in the immune spaces first without graying the objects so that we
    794   // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
    795   // of the objects.
    796   if (kUseBakerReadBarrier) {
    797     gc_grays_immune_objects_ = false;
    798   }
    799   {
    800     TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
    801     for (auto& space : immune_spaces_.GetSpaces()) {
    802       DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
    803       accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
    804       accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
    805       ImmuneSpaceScanObjVisitor visitor(this);
    806       if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
    807         table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
    808       } else {
    809         // TODO: Scan only the aged cards.
    810         live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
    811                                       reinterpret_cast<uintptr_t>(space->Limit()),
    812                                       visitor);
    813       }
    814     }
    815   }
    816   if (kUseBakerReadBarrier) {
    817     // This release fence makes the field updates in the above loop visible before allowing mutator
    818     // getting access to immune objects without graying it first.
    819     updated_all_immune_objects_.StoreRelease(true);
    820     // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
    821     // the above loop because we would incorrectly disable the read barrier by whitening an object
    822     // which may point to an unscanned, white object, breaking the to-space invariant.
    823     //
    824     // Make sure no mutators are in the middle of marking an immune object before whitening immune
    825     // objects.
    826     IssueEmptyCheckpoint();
    827     MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
    828     if (kVerboseMode) {
    829       LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
    830     }
    831     for (mirror::Object* obj : immune_gray_stack_) {
    832       DCHECK(obj->GetReadBarrierState() == ReadBarrier::GrayState());
    833       bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
    834                                                     ReadBarrier::WhiteState());
    835       DCHECK(success);
    836     }
    837     immune_gray_stack_.clear();
    838   }
    839 
    840   {
    841     TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
    842     Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
    843   }
    844   {
    845     // TODO: don't visit the transaction roots if it's not active.
    846     TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
    847     Runtime::Current()->VisitNonThreadRoots(this);
    848   }
    849 
    850   {
    851     TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
    852     // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
    853     // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
    854     // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
    855     // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
    856     // reach the point where we process weak references, we can avoid using a lock when accessing
    857     // the GC mark stack, which makes mark stack processing more efficient.
    858 
    859     // Process the mark stack once in the thread local stack mode. This marks most of the live
    860     // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
    861     // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
    862     // objects and push refs on the mark stack.
    863     ProcessMarkStack();
    864     // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
    865     // for the last time before transitioning to the shared mark stack mode, which would process new
    866     // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
    867     // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
    868     // important to do these together in a single checkpoint so that we can ensure that mutators
    869     // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
    870     // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
    871     // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
    872     // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
    873     SwitchToSharedMarkStackMode();
    874     CHECK(!self->GetWeakRefAccessEnabled());
    875     // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
    876     // (which may be non-empty if there were refs found on thread-local mark stacks during the above
    877     // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
    878     // (via read barriers) have no way to produce any more refs to process. Marking converges once
    879     // before we process weak refs below.
    880     ProcessMarkStack();
    881     CheckEmptyMarkStack();
    882     // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
    883     // lock from this point on.
    884     SwitchToGcExclusiveMarkStackMode();
    885     CheckEmptyMarkStack();
    886     if (kVerboseMode) {
    887       LOG(INFO) << "ProcessReferences";
    888     }
    889     // Process weak references. This may produce new refs to process and have them processed via
    890     // ProcessMarkStack (in the GC exclusive mark stack mode).
    891     ProcessReferences(self);
    892     CheckEmptyMarkStack();
    893     if (kVerboseMode) {
    894       LOG(INFO) << "SweepSystemWeaks";
    895     }
    896     SweepSystemWeaks(self);
    897     if (kVerboseMode) {
    898       LOG(INFO) << "SweepSystemWeaks done";
    899     }
    900     // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
    901     // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
    902     // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
    903     ProcessMarkStack();
    904     CheckEmptyMarkStack();
    905     // Re-enable weak ref accesses.
    906     ReenableWeakRefAccess(self);
    907     // Free data for class loaders that we unloaded.
    908     Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
    909     // Marking is done. Disable marking.
    910     DisableMarking();
    911     if (kUseBakerReadBarrier) {
    912       ProcessFalseGrayStack();
    913     }
    914     CheckEmptyMarkStack();
    915   }
    916 
    917   if (kIsDebugBuild) {
    918     MutexLock mu(self, *Locks::thread_list_lock_);
    919     CHECK(weak_ref_access_enabled_);
    920   }
    921   if (kVerboseMode) {
    922     LOG(INFO) << "GC end of MarkingPhase";
    923   }
    924 }
    925 
    926 void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
    927   if (kVerboseMode) {
    928     LOG(INFO) << "ReenableWeakRefAccess";
    929   }
    930   // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
    931   {
    932     MutexLock mu(self, *Locks::thread_list_lock_);
    933     weak_ref_access_enabled_ = true;  // This is for new threads.
    934     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    935     for (Thread* thread : thread_list) {
    936       thread->SetWeakRefAccessEnabled(true);
    937     }
    938   }
    939   // Unblock blocking threads.
    940   GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
    941   Runtime::Current()->BroadcastForNewSystemWeaks();
    942 }
    943 
    944 class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
    945  public:
    946   explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
    947       : concurrent_copying_(concurrent_copying) {
    948   }
    949 
    950   void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
    951     // Note: self is not necessarily equal to thread since thread may be suspended.
    952     Thread* self = Thread::Current();
    953     DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
    954         << thread->GetState() << " thread " << thread << " self " << self;
    955     // Disable the thread-local is_gc_marking flag.
    956     // Note a thread that has just started right before this checkpoint may have already this flag
    957     // set to false, which is ok.
    958     thread->SetIsGcMarkingAndUpdateEntrypoints(false);
    959     // If thread is a running mutator, then act on behalf of the garbage collector.
    960     // See the code in ThreadList::RunCheckpoint.
    961     concurrent_copying_->GetBarrier().Pass(self);
    962   }
    963 
    964  private:
    965   ConcurrentCopying* const concurrent_copying_;
    966 };
    967 
    968 class ConcurrentCopying::DisableMarkingCallback : public Closure {
    969  public:
    970   explicit DisableMarkingCallback(ConcurrentCopying* concurrent_copying)
    971       : concurrent_copying_(concurrent_copying) {
    972   }
    973 
    974   void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
    975     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
    976     // to avoid a race with ThreadList::Register().
    977     CHECK(concurrent_copying_->is_marking_);
    978     concurrent_copying_->is_marking_ = false;
    979     if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
    980       CHECK(concurrent_copying_->is_using_read_barrier_entrypoints_);
    981       concurrent_copying_->is_using_read_barrier_entrypoints_ = false;
    982     } else {
    983       CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
    984     }
    985   }
    986 
    987  private:
    988   ConcurrentCopying* const concurrent_copying_;
    989 };
    990 
    991 void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
    992   Thread* self = Thread::Current();
    993   DisableMarkingCheckpoint check_point(this);
    994   ThreadList* thread_list = Runtime::Current()->GetThreadList();
    995   gc_barrier_->Init(self, 0);
    996   DisableMarkingCallback dmc(this);
    997   size_t barrier_count = thread_list->RunCheckpoint(&check_point, &dmc);
    998   // If there are no threads to wait which implies that all the checkpoint functions are finished,
    999   // then no need to release the mutator lock.
   1000   if (barrier_count == 0) {
   1001     return;
   1002   }
   1003   // Release locks then wait for all mutator threads to pass the barrier.
   1004   Locks::mutator_lock_->SharedUnlock(self);
   1005   {
   1006     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
   1007     gc_barrier_->Increment(self, barrier_count);
   1008   }
   1009   Locks::mutator_lock_->SharedLock(self);
   1010 }
   1011 
   1012 void ConcurrentCopying::DisableMarking() {
   1013   // Use a checkpoint to turn off the global is_marking and the thread-local is_gc_marking flags and
   1014   // to ensure no threads are still in the middle of a read barrier which may have a from-space ref
   1015   // cached in a local variable.
   1016   IssueDisableMarkingCheckpoint();
   1017   if (kUseTableLookupReadBarrier) {
   1018     heap_->rb_table_->ClearAll();
   1019     DCHECK(heap_->rb_table_->IsAllCleared());
   1020   }
   1021   is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
   1022   mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
   1023 }
   1024 
   1025 void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
   1026   CHECK(kUseBakerReadBarrier);
   1027   DCHECK(ref != nullptr);
   1028   MutexLock mu(Thread::Current(), mark_stack_lock_);
   1029   false_gray_stack_.push_back(ref);
   1030 }
   1031 
   1032 void ConcurrentCopying::ProcessFalseGrayStack() {
   1033   CHECK(kUseBakerReadBarrier);
   1034   // Change the objects on the false gray stack from gray to white.
   1035   MutexLock mu(Thread::Current(), mark_stack_lock_);
   1036   for (mirror::Object* obj : false_gray_stack_) {
   1037     DCHECK(IsMarked(obj));
   1038     // The object could be white here if a thread got preempted after a success at the
   1039     // AtomicSetReadBarrierState in Mark(), GC started marking through it (but not finished so
   1040     // still gray), and the thread ran to register it onto the false gray stack.
   1041     if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
   1042       bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
   1043                                                     ReadBarrier::WhiteState());
   1044       DCHECK(success);
   1045     }
   1046   }
   1047   false_gray_stack_.clear();
   1048 }
   1049 
   1050 void ConcurrentCopying::IssueEmptyCheckpoint() {
   1051   Thread* self = Thread::Current();
   1052   ThreadList* thread_list = Runtime::Current()->GetThreadList();
   1053   // Release locks then wait for all mutator threads to pass the barrier.
   1054   Locks::mutator_lock_->SharedUnlock(self);
   1055   thread_list->RunEmptyCheckpoint();
   1056   Locks::mutator_lock_->SharedLock(self);
   1057 }
   1058 
   1059 void ConcurrentCopying::ExpandGcMarkStack() {
   1060   DCHECK(gc_mark_stack_->IsFull());
   1061   const size_t new_size = gc_mark_stack_->Capacity() * 2;
   1062   std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
   1063                                                    gc_mark_stack_->End());
   1064   gc_mark_stack_->Resize(new_size);
   1065   for (auto& ref : temp) {
   1066     gc_mark_stack_->PushBack(ref.AsMirrorPtr());
   1067   }
   1068   DCHECK(!gc_mark_stack_->IsFull());
   1069 }
   1070 
   1071 void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
   1072   CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
   1073       << " " << to_ref << " " << mirror::Object::PrettyTypeOf(to_ref);
   1074   Thread* self = Thread::Current();  // TODO: pass self as an argument from call sites?
   1075   CHECK(thread_running_gc_ != nullptr);
   1076   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   1077   if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
   1078     if (LIKELY(self == thread_running_gc_)) {
   1079       // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
   1080       CHECK(self->GetThreadLocalMarkStack() == nullptr);
   1081       if (UNLIKELY(gc_mark_stack_->IsFull())) {
   1082         ExpandGcMarkStack();
   1083       }
   1084       gc_mark_stack_->PushBack(to_ref);
   1085     } else {
   1086       // Otherwise, use a thread-local mark stack.
   1087       accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
   1088       if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
   1089         MutexLock mu(self, mark_stack_lock_);
   1090         // Get a new thread local mark stack.
   1091         accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
   1092         if (!pooled_mark_stacks_.empty()) {
   1093           // Use a pooled mark stack.
   1094           new_tl_mark_stack = pooled_mark_stacks_.back();
   1095           pooled_mark_stacks_.pop_back();
   1096         } else {
   1097           // None pooled. Create a new one.
   1098           new_tl_mark_stack =
   1099               accounting::AtomicStack<mirror::Object>::Create(
   1100                   "thread local mark stack", 4 * KB, 4 * KB);
   1101         }
   1102         DCHECK(new_tl_mark_stack != nullptr);
   1103         DCHECK(new_tl_mark_stack->IsEmpty());
   1104         new_tl_mark_stack->PushBack(to_ref);
   1105         self->SetThreadLocalMarkStack(new_tl_mark_stack);
   1106         if (tl_mark_stack != nullptr) {
   1107           // Store the old full stack into a vector.
   1108           revoked_mark_stacks_.push_back(tl_mark_stack);
   1109         }
   1110       } else {
   1111         tl_mark_stack->PushBack(to_ref);
   1112       }
   1113     }
   1114   } else if (mark_stack_mode == kMarkStackModeShared) {
   1115     // Access the shared GC mark stack with a lock.
   1116     MutexLock mu(self, mark_stack_lock_);
   1117     if (UNLIKELY(gc_mark_stack_->IsFull())) {
   1118       ExpandGcMarkStack();
   1119     }
   1120     gc_mark_stack_->PushBack(to_ref);
   1121   } else {
   1122     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
   1123              static_cast<uint32_t>(kMarkStackModeGcExclusive))
   1124         << "ref=" << to_ref
   1125         << " self->gc_marking=" << self->GetIsGcMarking()
   1126         << " cc->is_marking=" << is_marking_;
   1127     CHECK(self == thread_running_gc_)
   1128         << "Only GC-running thread should access the mark stack "
   1129         << "in the GC exclusive mark stack mode";
   1130     // Access the GC mark stack without a lock.
   1131     if (UNLIKELY(gc_mark_stack_->IsFull())) {
   1132       ExpandGcMarkStack();
   1133     }
   1134     gc_mark_stack_->PushBack(to_ref);
   1135   }
   1136 }
   1137 
   1138 accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
   1139   return heap_->allocation_stack_.get();
   1140 }
   1141 
   1142 accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
   1143   return heap_->live_stack_.get();
   1144 }
   1145 
   1146 // The following visitors are used to verify that there's no references to the from-space left after
   1147 // marking.
   1148 class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
   1149  public:
   1150   explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
   1151       : collector_(collector) {}
   1152 
   1153   void operator()(mirror::Object* ref,
   1154                   MemberOffset offset = MemberOffset(0),
   1155                   mirror::Object* holder = nullptr) const
   1156       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
   1157     if (ref == nullptr) {
   1158       // OK.
   1159       return;
   1160     }
   1161     collector_->AssertToSpaceInvariant(holder, offset, ref);
   1162     if (kUseBakerReadBarrier) {
   1163       CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState())
   1164           << "Ref " << ref << " " << ref->PrettyTypeOf()
   1165           << " has non-white rb_state ";
   1166     }
   1167   }
   1168 
   1169   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
   1170       OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
   1171     DCHECK(root != nullptr);
   1172     operator()(root);
   1173   }
   1174 
   1175  private:
   1176   ConcurrentCopying* const collector_;
   1177 };
   1178 
   1179 class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
   1180  public:
   1181   explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
   1182       : collector_(collector) {}
   1183 
   1184   void operator()(ObjPtr<mirror::Object> obj,
   1185                   MemberOffset offset,
   1186                   bool is_static ATTRIBUTE_UNUSED) const
   1187       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
   1188     mirror::Object* ref =
   1189         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
   1190     VerifyNoFromSpaceRefsVisitor visitor(collector_);
   1191     visitor(ref, offset, obj.Ptr());
   1192   }
   1193   void operator()(ObjPtr<mirror::Class> klass,
   1194                   ObjPtr<mirror::Reference> ref) const
   1195       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
   1196     CHECK(klass->IsTypeOfReferenceClass());
   1197     this->operator()(ref, mirror::Reference::ReferentOffset(), false);
   1198   }
   1199 
   1200   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
   1201       REQUIRES_SHARED(Locks::mutator_lock_) {
   1202     if (!root->IsNull()) {
   1203       VisitRoot(root);
   1204     }
   1205   }
   1206 
   1207   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
   1208       REQUIRES_SHARED(Locks::mutator_lock_) {
   1209     VerifyNoFromSpaceRefsVisitor visitor(collector_);
   1210     visitor(root->AsMirrorPtr());
   1211   }
   1212 
   1213  private:
   1214   ConcurrentCopying* const collector_;
   1215 };
   1216 
   1217 // Verify there's no from-space references left after the marking phase.
   1218 void ConcurrentCopying::VerifyNoFromSpaceReferences() {
   1219   Thread* self = Thread::Current();
   1220   DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
   1221   // Verify all threads have is_gc_marking to be false
   1222   {
   1223     MutexLock mu(self, *Locks::thread_list_lock_);
   1224     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
   1225     for (Thread* thread : thread_list) {
   1226       CHECK(!thread->GetIsGcMarking());
   1227     }
   1228   }
   1229 
   1230   auto verify_no_from_space_refs_visitor = [&](mirror::Object* obj)
   1231       REQUIRES_SHARED(Locks::mutator_lock_) {
   1232     CHECK(obj != nullptr);
   1233     space::RegionSpace* region_space = RegionSpace();
   1234     CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
   1235     VerifyNoFromSpaceRefsFieldVisitor visitor(this);
   1236     obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
   1237         visitor,
   1238         visitor);
   1239     if (kUseBakerReadBarrier) {
   1240       CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
   1241           << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
   1242     }
   1243   };
   1244   // Roots.
   1245   {
   1246     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
   1247     VerifyNoFromSpaceRefsVisitor ref_visitor(this);
   1248     Runtime::Current()->VisitRoots(&ref_visitor);
   1249   }
   1250   // The to-space.
   1251   region_space_->WalkToSpace(verify_no_from_space_refs_visitor);
   1252   // Non-moving spaces.
   1253   {
   1254     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   1255     heap_->GetMarkBitmap()->Visit(verify_no_from_space_refs_visitor);
   1256   }
   1257   // The alloc stack.
   1258   {
   1259     VerifyNoFromSpaceRefsVisitor ref_visitor(this);
   1260     for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
   1261         it < end; ++it) {
   1262       mirror::Object* const obj = it->AsMirrorPtr();
   1263       if (obj != nullptr && obj->GetClass() != nullptr) {
   1264         // TODO: need to call this only if obj is alive?
   1265         ref_visitor(obj);
   1266         verify_no_from_space_refs_visitor(obj);
   1267       }
   1268     }
   1269   }
   1270   // TODO: LOS. But only refs in LOS are classes.
   1271 }
   1272 
   1273 // The following visitors are used to assert the to-space invariant.
   1274 class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
   1275  public:
   1276   explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
   1277       : collector_(collector) {}
   1278 
   1279   void operator()(mirror::Object* ref) const
   1280       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
   1281     if (ref == nullptr) {
   1282       // OK.
   1283       return;
   1284     }
   1285     collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
   1286   }
   1287 
   1288  private:
   1289   ConcurrentCopying* const collector_;
   1290 };
   1291 
   1292 class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
   1293  public:
   1294   explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
   1295       : collector_(collector) {}
   1296 
   1297   void operator()(ObjPtr<mirror::Object> obj,
   1298                   MemberOffset offset,
   1299                   bool is_static ATTRIBUTE_UNUSED) const
   1300       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
   1301     mirror::Object* ref =
   1302         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
   1303     AssertToSpaceInvariantRefsVisitor visitor(collector_);
   1304     visitor(ref);
   1305   }
   1306   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
   1307       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
   1308     CHECK(klass->IsTypeOfReferenceClass());
   1309   }
   1310 
   1311   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
   1312       REQUIRES_SHARED(Locks::mutator_lock_) {
   1313     if (!root->IsNull()) {
   1314       VisitRoot(root);
   1315     }
   1316   }
   1317 
   1318   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
   1319       REQUIRES_SHARED(Locks::mutator_lock_) {
   1320     AssertToSpaceInvariantRefsVisitor visitor(collector_);
   1321     visitor(root->AsMirrorPtr());
   1322   }
   1323 
   1324  private:
   1325   ConcurrentCopying* const collector_;
   1326 };
   1327 
   1328 class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
   1329  public:
   1330   RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
   1331                                        bool disable_weak_ref_access)
   1332       : concurrent_copying_(concurrent_copying),
   1333         disable_weak_ref_access_(disable_weak_ref_access) {
   1334   }
   1335 
   1336   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
   1337     // Note: self is not necessarily equal to thread since thread may be suspended.
   1338     Thread* self = Thread::Current();
   1339     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
   1340         << thread->GetState() << " thread " << thread << " self " << self;
   1341     // Revoke thread local mark stacks.
   1342     accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
   1343     if (tl_mark_stack != nullptr) {
   1344       MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
   1345       concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
   1346       thread->SetThreadLocalMarkStack(nullptr);
   1347     }
   1348     // Disable weak ref access.
   1349     if (disable_weak_ref_access_) {
   1350       thread->SetWeakRefAccessEnabled(false);
   1351     }
   1352     // If thread is a running mutator, then act on behalf of the garbage collector.
   1353     // See the code in ThreadList::RunCheckpoint.
   1354     concurrent_copying_->GetBarrier().Pass(self);
   1355   }
   1356 
   1357  private:
   1358   ConcurrentCopying* const concurrent_copying_;
   1359   const bool disable_weak_ref_access_;
   1360 };
   1361 
   1362 void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
   1363                                                     Closure* checkpoint_callback) {
   1364   Thread* self = Thread::Current();
   1365   RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
   1366   ThreadList* thread_list = Runtime::Current()->GetThreadList();
   1367   gc_barrier_->Init(self, 0);
   1368   size_t barrier_count = thread_list->RunCheckpoint(&check_point, checkpoint_callback);
   1369   // If there are no threads to wait which implys that all the checkpoint functions are finished,
   1370   // then no need to release the mutator lock.
   1371   if (barrier_count == 0) {
   1372     return;
   1373   }
   1374   Locks::mutator_lock_->SharedUnlock(self);
   1375   {
   1376     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
   1377     gc_barrier_->Increment(self, barrier_count);
   1378   }
   1379   Locks::mutator_lock_->SharedLock(self);
   1380 }
   1381 
   1382 void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
   1383   Thread* self = Thread::Current();
   1384   CHECK_EQ(self, thread);
   1385   accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
   1386   if (tl_mark_stack != nullptr) {
   1387     CHECK(is_marking_);
   1388     MutexLock mu(self, mark_stack_lock_);
   1389     revoked_mark_stacks_.push_back(tl_mark_stack);
   1390     thread->SetThreadLocalMarkStack(nullptr);
   1391   }
   1392 }
   1393 
   1394 void ConcurrentCopying::ProcessMarkStack() {
   1395   if (kVerboseMode) {
   1396     LOG(INFO) << "ProcessMarkStack. ";
   1397   }
   1398   bool empty_prev = false;
   1399   while (true) {
   1400     bool empty = ProcessMarkStackOnce();
   1401     if (empty_prev && empty) {
   1402       // Saw empty mark stack for a second time, done.
   1403       break;
   1404     }
   1405     empty_prev = empty;
   1406   }
   1407 }
   1408 
   1409 bool ConcurrentCopying::ProcessMarkStackOnce() {
   1410   Thread* self = Thread::Current();
   1411   CHECK(thread_running_gc_ != nullptr);
   1412   CHECK(self == thread_running_gc_);
   1413   CHECK(self->GetThreadLocalMarkStack() == nullptr);
   1414   size_t count = 0;
   1415   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   1416   if (mark_stack_mode == kMarkStackModeThreadLocal) {
   1417     // Process the thread-local mark stacks and the GC mark stack.
   1418     count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
   1419                                           /* checkpoint_callback */ nullptr);
   1420     while (!gc_mark_stack_->IsEmpty()) {
   1421       mirror::Object* to_ref = gc_mark_stack_->PopBack();
   1422       ProcessMarkStackRef(to_ref);
   1423       ++count;
   1424     }
   1425     gc_mark_stack_->Reset();
   1426   } else if (mark_stack_mode == kMarkStackModeShared) {
   1427     // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read
   1428     // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is
   1429     // disabled at this point.
   1430     IssueEmptyCheckpoint();
   1431     // Process the shared GC mark stack with a lock.
   1432     {
   1433       MutexLock mu(self, mark_stack_lock_);
   1434       CHECK(revoked_mark_stacks_.empty());
   1435     }
   1436     while (true) {
   1437       std::vector<mirror::Object*> refs;
   1438       {
   1439         // Copy refs with lock. Note the number of refs should be small.
   1440         MutexLock mu(self, mark_stack_lock_);
   1441         if (gc_mark_stack_->IsEmpty()) {
   1442           break;
   1443         }
   1444         for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
   1445              p != gc_mark_stack_->End(); ++p) {
   1446           refs.push_back(p->AsMirrorPtr());
   1447         }
   1448         gc_mark_stack_->Reset();
   1449       }
   1450       for (mirror::Object* ref : refs) {
   1451         ProcessMarkStackRef(ref);
   1452         ++count;
   1453       }
   1454     }
   1455   } else {
   1456     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
   1457              static_cast<uint32_t>(kMarkStackModeGcExclusive));
   1458     {
   1459       MutexLock mu(self, mark_stack_lock_);
   1460       CHECK(revoked_mark_stacks_.empty());
   1461     }
   1462     // Process the GC mark stack in the exclusive mode. No need to take the lock.
   1463     while (!gc_mark_stack_->IsEmpty()) {
   1464       mirror::Object* to_ref = gc_mark_stack_->PopBack();
   1465       ProcessMarkStackRef(to_ref);
   1466       ++count;
   1467     }
   1468     gc_mark_stack_->Reset();
   1469   }
   1470 
   1471   // Return true if the stack was empty.
   1472   return count == 0;
   1473 }
   1474 
   1475 size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
   1476                                                        Closure* checkpoint_callback) {
   1477   // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
   1478   RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
   1479   size_t count = 0;
   1480   std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
   1481   {
   1482     MutexLock mu(Thread::Current(), mark_stack_lock_);
   1483     // Make a copy of the mark stack vector.
   1484     mark_stacks = revoked_mark_stacks_;
   1485     revoked_mark_stacks_.clear();
   1486   }
   1487   for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
   1488     for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
   1489       mirror::Object* to_ref = p->AsMirrorPtr();
   1490       ProcessMarkStackRef(to_ref);
   1491       ++count;
   1492     }
   1493     {
   1494       MutexLock mu(Thread::Current(), mark_stack_lock_);
   1495       if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
   1496         // The pool has enough. Delete it.
   1497         delete mark_stack;
   1498       } else {
   1499         // Otherwise, put it into the pool for later reuse.
   1500         mark_stack->Reset();
   1501         pooled_mark_stacks_.push_back(mark_stack);
   1502       }
   1503     }
   1504   }
   1505   return count;
   1506 }
   1507 
   1508 inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
   1509   DCHECK(!region_space_->IsInFromSpace(to_ref));
   1510   if (kUseBakerReadBarrier) {
   1511     DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
   1512         << " " << to_ref << " " << to_ref->GetReadBarrierState()
   1513         << " is_marked=" << IsMarked(to_ref);
   1514   }
   1515   bool add_to_live_bytes = false;
   1516   if (region_space_->IsInUnevacFromSpace(to_ref)) {
   1517     // Mark the bitmap only in the GC thread here so that we don't need a CAS.
   1518     if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
   1519       // It may be already marked if we accidentally pushed the same object twice due to the racy
   1520       // bitmap read in MarkUnevacFromSpaceRegion.
   1521       Scan(to_ref);
   1522       // Only add to the live bytes if the object was not already marked.
   1523       add_to_live_bytes = true;
   1524     }
   1525   } else {
   1526     Scan(to_ref);
   1527   }
   1528   if (kUseBakerReadBarrier) {
   1529     DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
   1530         << " " << to_ref << " " << to_ref->GetReadBarrierState()
   1531         << " is_marked=" << IsMarked(to_ref);
   1532   }
   1533 #ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
   1534   mirror::Object* referent = nullptr;
   1535   if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
   1536                 (referent = to_ref->AsReference()->GetReferent<kWithoutReadBarrier>()) != nullptr &&
   1537                 !IsInToSpace(referent)))) {
   1538     // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
   1539     // will change it to white later in ReferenceQueue::DequeuePendingReference().
   1540     DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr)
   1541         << "Left unenqueued ref gray " << to_ref;
   1542   } else {
   1543     // We may occasionally leave a reference white in the queue if its referent happens to be
   1544     // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
   1545     // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
   1546     // else block.
   1547     if (kUseBakerReadBarrier) {
   1548       bool success = to_ref->AtomicSetReadBarrierState</*kCasRelease*/true>(
   1549           ReadBarrier::GrayState(),
   1550           ReadBarrier::WhiteState());
   1551       DCHECK(success) << "Must succeed as we won the race.";
   1552     }
   1553   }
   1554 #else
   1555   DCHECK(!kUseBakerReadBarrier);
   1556 #endif
   1557 
   1558   if (add_to_live_bytes) {
   1559     // Add to the live bytes per unevacuated from-space. Note this code is always run by the
   1560     // GC-running thread (no synchronization required).
   1561     DCHECK(region_space_bitmap_->Test(to_ref));
   1562     size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags>();
   1563     size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
   1564     region_space_->AddLiveBytes(to_ref, alloc_size);
   1565   }
   1566   if (ReadBarrier::kEnableToSpaceInvariantChecks) {
   1567     CHECK(to_ref != nullptr);
   1568     space::RegionSpace* region_space = RegionSpace();
   1569     CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
   1570     AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
   1571     AssertToSpaceInvariantFieldVisitor visitor(this);
   1572     to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
   1573         visitor,
   1574         visitor);
   1575   }
   1576 }
   1577 
   1578 class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
   1579  public:
   1580   explicit DisableWeakRefAccessCallback(ConcurrentCopying* concurrent_copying)
   1581       : concurrent_copying_(concurrent_copying) {
   1582   }
   1583 
   1584   void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
   1585     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
   1586     // to avoid a deadlock b/31500969.
   1587     CHECK(concurrent_copying_->weak_ref_access_enabled_);
   1588     concurrent_copying_->weak_ref_access_enabled_ = false;
   1589   }
   1590 
   1591  private:
   1592   ConcurrentCopying* const concurrent_copying_;
   1593 };
   1594 
   1595 void ConcurrentCopying::SwitchToSharedMarkStackMode() {
   1596   Thread* self = Thread::Current();
   1597   CHECK(thread_running_gc_ != nullptr);
   1598   CHECK_EQ(self, thread_running_gc_);
   1599   CHECK(self->GetThreadLocalMarkStack() == nullptr);
   1600   MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   1601   CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
   1602            static_cast<uint32_t>(kMarkStackModeThreadLocal));
   1603   mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
   1604   DisableWeakRefAccessCallback dwrac(this);
   1605   // Process the thread local mark stacks one last time after switching to the shared mark stack
   1606   // mode and disable weak ref accesses.
   1607   ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ true, &dwrac);
   1608   if (kVerboseMode) {
   1609     LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
   1610   }
   1611 }
   1612 
   1613 void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
   1614   Thread* self = Thread::Current();
   1615   CHECK(thread_running_gc_ != nullptr);
   1616   CHECK_EQ(self, thread_running_gc_);
   1617   CHECK(self->GetThreadLocalMarkStack() == nullptr);
   1618   MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   1619   CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
   1620            static_cast<uint32_t>(kMarkStackModeShared));
   1621   mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
   1622   QuasiAtomic::ThreadFenceForConstructor();
   1623   if (kVerboseMode) {
   1624     LOG(INFO) << "Switched to GC exclusive mark stack mode";
   1625   }
   1626 }
   1627 
   1628 void ConcurrentCopying::CheckEmptyMarkStack() {
   1629   Thread* self = Thread::Current();
   1630   CHECK(thread_running_gc_ != nullptr);
   1631   CHECK_EQ(self, thread_running_gc_);
   1632   CHECK(self->GetThreadLocalMarkStack() == nullptr);
   1633   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   1634   if (mark_stack_mode == kMarkStackModeThreadLocal) {
   1635     // Thread-local mark stack mode.
   1636     RevokeThreadLocalMarkStacks(false, nullptr);
   1637     MutexLock mu(Thread::Current(), mark_stack_lock_);
   1638     if (!revoked_mark_stacks_.empty()) {
   1639       for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
   1640         while (!mark_stack->IsEmpty()) {
   1641           mirror::Object* obj = mark_stack->PopBack();
   1642           if (kUseBakerReadBarrier) {
   1643             uint32_t rb_state = obj->GetReadBarrierState();
   1644             LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_state="
   1645                       << rb_state << " is_marked=" << IsMarked(obj);
   1646           } else {
   1647             LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf()
   1648                       << " is_marked=" << IsMarked(obj);
   1649           }
   1650         }
   1651       }
   1652       LOG(FATAL) << "mark stack is not empty";
   1653     }
   1654   } else {
   1655     // Shared, GC-exclusive, or off.
   1656     MutexLock mu(Thread::Current(), mark_stack_lock_);
   1657     CHECK(gc_mark_stack_->IsEmpty());
   1658     CHECK(revoked_mark_stacks_.empty());
   1659   }
   1660 }
   1661 
   1662 void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
   1663   TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
   1664   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
   1665   Runtime::Current()->SweepSystemWeaks(this);
   1666 }
   1667 
   1668 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
   1669   {
   1670     TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
   1671     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
   1672     if (kEnableFromSpaceAccountingCheck) {
   1673       CHECK_GE(live_stack_freeze_size_, live_stack->Size());
   1674     }
   1675     heap_->MarkAllocStackAsLive(live_stack);
   1676     live_stack->Reset();
   1677   }
   1678   CheckEmptyMarkStack();
   1679   TimingLogger::ScopedTiming split("Sweep", GetTimings());
   1680   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
   1681     if (space->IsContinuousMemMapAllocSpace()) {
   1682       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
   1683       if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
   1684         continue;
   1685       }
   1686       TimingLogger::ScopedTiming split2(
   1687           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
   1688       RecordFree(alloc_space->Sweep(swap_bitmaps));
   1689     }
   1690   }
   1691   SweepLargeObjects(swap_bitmaps);
   1692 }
   1693 
   1694 void ConcurrentCopying::MarkZygoteLargeObjects() {
   1695   TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
   1696   Thread* const self = Thread::Current();
   1697   WriterMutexLock rmu(self, *Locks::heap_bitmap_lock_);
   1698   space::LargeObjectSpace* const los = heap_->GetLargeObjectsSpace();
   1699   if (los != nullptr) {
   1700     // Pick the current live bitmap (mark bitmap if swapped).
   1701     accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
   1702     accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
   1703     // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
   1704     std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
   1705     live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
   1706                                   reinterpret_cast<uintptr_t>(range.second),
   1707                                   [mark_bitmap, los, self](mirror::Object* obj)
   1708         REQUIRES(Locks::heap_bitmap_lock_)
   1709         REQUIRES_SHARED(Locks::mutator_lock_) {
   1710       if (los->IsZygoteLargeObject(self, obj)) {
   1711         mark_bitmap->Set(obj);
   1712       }
   1713     });
   1714   }
   1715 }
   1716 
   1717 void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
   1718   TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
   1719   if (heap_->GetLargeObjectsSpace() != nullptr) {
   1720     RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
   1721   }
   1722 }
   1723 
   1724 void ConcurrentCopying::ReclaimPhase() {
   1725   TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
   1726   if (kVerboseMode) {
   1727     LOG(INFO) << "GC ReclaimPhase";
   1728   }
   1729   Thread* self = Thread::Current();
   1730 
   1731   {
   1732     // Double-check that the mark stack is empty.
   1733     // Note: need to set this after VerifyNoFromSpaceRef().
   1734     is_asserting_to_space_invariant_ = false;
   1735     QuasiAtomic::ThreadFenceForConstructor();
   1736     if (kVerboseMode) {
   1737       LOG(INFO) << "Issue an empty check point. ";
   1738     }
   1739     IssueEmptyCheckpoint();
   1740     // Disable the check.
   1741     is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
   1742     if (kUseBakerReadBarrier) {
   1743       updated_all_immune_objects_.StoreSequentiallyConsistent(false);
   1744     }
   1745     CheckEmptyMarkStack();
   1746   }
   1747 
   1748   {
   1749     // Record freed objects.
   1750     TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
   1751     // Don't include thread-locals that are in the to-space.
   1752     const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
   1753     const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
   1754     const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
   1755     const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
   1756     uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
   1757     cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
   1758     uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
   1759     cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
   1760     if (kEnableFromSpaceAccountingCheck) {
   1761       CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
   1762       CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
   1763     }
   1764     CHECK_LE(to_objects, from_objects);
   1765     CHECK_LE(to_bytes, from_bytes);
   1766     // Cleared bytes and objects, populated by the call to RegionSpace::ClearFromSpace below.
   1767     uint64_t cleared_bytes;
   1768     uint64_t cleared_objects;
   1769     {
   1770       TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
   1771       region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
   1772       // `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since
   1773       // RegionSpace::ClearFromSpace may clear empty unevac regions.
   1774       CHECK_GE(cleared_bytes, from_bytes);
   1775       CHECK_GE(cleared_objects, from_objects);
   1776     }
   1777     int64_t freed_bytes = cleared_bytes - to_bytes;
   1778     int64_t freed_objects = cleared_objects - to_objects;
   1779     if (kVerboseMode) {
   1780       LOG(INFO) << "RecordFree:"
   1781                 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
   1782                 << " unevac_from_bytes=" << unevac_from_bytes
   1783                 << " unevac_from_objects=" << unevac_from_objects
   1784                 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
   1785                 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
   1786                 << " from_space size=" << region_space_->FromSpaceSize()
   1787                 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
   1788                 << " to_space size=" << region_space_->ToSpaceSize();
   1789       LOG(INFO) << "(before) num_bytes_allocated="
   1790                 << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
   1791     }
   1792     RecordFree(ObjectBytePair(freed_objects, freed_bytes));
   1793     if (kVerboseMode) {
   1794       LOG(INFO) << "(after) num_bytes_allocated="
   1795                 << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
   1796     }
   1797   }
   1798 
   1799   {
   1800     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   1801     Sweep(false);
   1802     SwapBitmaps();
   1803     heap_->UnBindBitmaps();
   1804 
   1805     // The bitmap was cleared at the start of the GC, there is nothing we need to do here.
   1806     DCHECK(region_space_bitmap_ != nullptr);
   1807     region_space_bitmap_ = nullptr;
   1808   }
   1809 
   1810   CheckEmptyMarkStack();
   1811 
   1812   if (kVerboseMode) {
   1813     LOG(INFO) << "GC end of ReclaimPhase";
   1814   }
   1815 }
   1816 
   1817 std::string ConcurrentCopying::DumpReferenceInfo(mirror::Object* ref,
   1818                                                  const char* ref_name,
   1819                                                  std::string indent) {
   1820   std::ostringstream oss;
   1821   oss << indent << heap_->GetVerification()->DumpObjectInfo(ref, ref_name) << '\n';
   1822   if (ref != nullptr) {
   1823     if (kUseBakerReadBarrier) {
   1824       oss << indent << ref_name << "->GetMarkBit()=" << ref->GetMarkBit() << '\n';
   1825       oss << indent << ref_name << "->GetReadBarrierState()=" << ref->GetReadBarrierState() << '\n';
   1826     }
   1827   }
   1828   if (region_space_->HasAddress(ref)) {
   1829     oss << indent << "Region containing " << ref_name << ":" << '\n';
   1830     region_space_->DumpRegionForObject(oss, ref);
   1831     if (region_space_bitmap_ != nullptr) {
   1832       oss << indent << "region_space_bitmap_->Test(" << ref_name << ")="
   1833           << std::boolalpha << region_space_bitmap_->Test(ref) << std::noboolalpha;
   1834     }
   1835   }
   1836   return oss.str();
   1837 }
   1838 
   1839 std::string ConcurrentCopying::DumpHeapReference(mirror::Object* obj,
   1840                                                  MemberOffset offset,
   1841                                                  mirror::Object* ref) {
   1842   std::ostringstream oss;
   1843   std::string indent = "  ";
   1844   oss << indent << "Invalid reference: ref=" << ref
   1845       << " referenced from: object=" << obj << " offset= " << offset << '\n';
   1846   // Information about `obj`.
   1847   oss << DumpReferenceInfo(obj, "obj", indent) << '\n';
   1848   // Information about `ref`.
   1849   oss << DumpReferenceInfo(ref, "ref", indent);
   1850   return oss.str();
   1851 }
   1852 
   1853 void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj,
   1854                                                MemberOffset offset,
   1855                                                mirror::Object* ref) {
   1856   CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   1857   if (is_asserting_to_space_invariant_) {
   1858     if (region_space_->HasAddress(ref)) {
   1859       // Check to-space invariant in region space (moving space).
   1860       using RegionType = space::RegionSpace::RegionType;
   1861       space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
   1862       if (type == RegionType::kRegionTypeToSpace) {
   1863         // OK.
   1864         return;
   1865       } else if (type == RegionType::kRegionTypeUnevacFromSpace) {
   1866         if (!IsMarkedInUnevacFromSpace(ref)) {
   1867           LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
   1868           LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
   1869         }
   1870         CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
   1871      } else {
   1872         // Not OK: either a from-space ref or a reference in an unused region.
   1873         // Do extra logging.
   1874         if (type == RegionType::kRegionTypeFromSpace) {
   1875           LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
   1876         } else {
   1877           LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
   1878         }
   1879         LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(obj, offset, ref);
   1880         if (obj != nullptr) {
   1881           LogFromSpaceRefHolder(obj, offset);
   1882         }
   1883         ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
   1884         LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
   1885         region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
   1886         PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
   1887         MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
   1888         LOG(FATAL) << "Invalid reference " << ref
   1889                    << " referenced from object " << obj << " at offset " << offset;
   1890       }
   1891     } else {
   1892       // Check to-space invariant in non-moving space.
   1893       AssertToSpaceInvariantInNonMovingSpace(obj, ref);
   1894     }
   1895   }
   1896 }
   1897 
   1898 class RootPrinter {
   1899  public:
   1900   RootPrinter() { }
   1901 
   1902   template <class MirrorType>
   1903   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
   1904       REQUIRES_SHARED(Locks::mutator_lock_) {
   1905     if (!root->IsNull()) {
   1906       VisitRoot(root);
   1907     }
   1908   }
   1909 
   1910   template <class MirrorType>
   1911   void VisitRoot(mirror::Object** root)
   1912       REQUIRES_SHARED(Locks::mutator_lock_) {
   1913     LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << *root;
   1914   }
   1915 
   1916   template <class MirrorType>
   1917   void VisitRoot(mirror::CompressedReference<MirrorType>* root)
   1918       REQUIRES_SHARED(Locks::mutator_lock_) {
   1919     LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << root->AsMirrorPtr();
   1920   }
   1921 };
   1922 
   1923 std::string ConcurrentCopying::DumpGcRoot(mirror::Object* ref) {
   1924   std::ostringstream oss;
   1925   std::string indent = "  ";
   1926   oss << indent << "Invalid GC root: ref=" << ref << '\n';
   1927   // Information about `ref`.
   1928   oss << DumpReferenceInfo(ref, "ref", indent);
   1929   return oss.str();
   1930 }
   1931 
   1932 void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
   1933                                                mirror::Object* ref) {
   1934   CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   1935   if (is_asserting_to_space_invariant_) {
   1936     if (region_space_->HasAddress(ref)) {
   1937       // Check to-space invariant in region space (moving space).
   1938       using RegionType = space::RegionSpace::RegionType;
   1939       space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
   1940       if (type == RegionType::kRegionTypeToSpace) {
   1941         // OK.
   1942         return;
   1943       } else if (type == RegionType::kRegionTypeUnevacFromSpace) {
   1944         if (!IsMarkedInUnevacFromSpace(ref)) {
   1945           LOG(FATAL_WITHOUT_ABORT) << "Found unmarked reference in unevac from-space:";
   1946           LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
   1947         }
   1948         CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
   1949       } else {
   1950         // Not OK: either a from-space ref or a reference in an unused region.
   1951         // Do extra logging.
   1952         if (type == RegionType::kRegionTypeFromSpace) {
   1953           LOG(FATAL_WITHOUT_ABORT) << "Found from-space reference:";
   1954         } else {
   1955           LOG(FATAL_WITHOUT_ABORT) << "Found reference in region with type " << type << ":";
   1956         }
   1957         LOG(FATAL_WITHOUT_ABORT) << DumpGcRoot(ref);
   1958         if (gc_root_source == nullptr) {
   1959           // No info.
   1960         } else if (gc_root_source->HasArtField()) {
   1961           ArtField* field = gc_root_source->GetArtField();
   1962           LOG(FATAL_WITHOUT_ABORT) << "gc root in field " << field << " "
   1963                                    << ArtField::PrettyField(field);
   1964           RootPrinter root_printer;
   1965           field->VisitRoots(root_printer);
   1966         } else if (gc_root_source->HasArtMethod()) {
   1967           ArtMethod* method = gc_root_source->GetArtMethod();
   1968           LOG(FATAL_WITHOUT_ABORT) << "gc root in method " << method << " "
   1969                                    << ArtMethod::PrettyMethod(method);
   1970           RootPrinter root_printer;
   1971           method->VisitRoots(root_printer, kRuntimePointerSize);
   1972         }
   1973         ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
   1974         LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:";
   1975         region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
   1976         PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
   1977         MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
   1978         LOG(FATAL) << "Invalid reference " << ref;
   1979       }
   1980     } else {
   1981       // Check to-space invariant in non-moving space.
   1982       AssertToSpaceInvariantInNonMovingSpace(/* obj */ nullptr, ref);
   1983     }
   1984   }
   1985 }
   1986 
   1987 void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
   1988   if (kUseBakerReadBarrier) {
   1989     LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf()
   1990               << " holder rb_state=" << obj->GetReadBarrierState();
   1991   } else {
   1992     LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf();
   1993   }
   1994   if (region_space_->IsInFromSpace(obj)) {
   1995     LOG(INFO) << "holder is in the from-space.";
   1996   } else if (region_space_->IsInToSpace(obj)) {
   1997     LOG(INFO) << "holder is in the to-space.";
   1998   } else if (region_space_->IsInUnevacFromSpace(obj)) {
   1999     LOG(INFO) << "holder is in the unevac from-space.";
   2000     if (IsMarkedInUnevacFromSpace(obj)) {
   2001       LOG(INFO) << "holder is marked in the region space bitmap.";
   2002     } else {
   2003       LOG(INFO) << "holder is not marked in the region space bitmap.";
   2004     }
   2005   } else {
   2006     // In a non-moving space.
   2007     if (immune_spaces_.ContainsObject(obj)) {
   2008       LOG(INFO) << "holder is in an immune image or the zygote space.";
   2009     } else {
   2010       LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
   2011       accounting::ContinuousSpaceBitmap* mark_bitmap =
   2012           heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
   2013       accounting::LargeObjectBitmap* los_bitmap =
   2014           heap_mark_bitmap_->GetLargeObjectBitmap(obj);
   2015       CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
   2016       bool is_los = mark_bitmap == nullptr;
   2017       if (!is_los && mark_bitmap->Test(obj)) {
   2018         LOG(INFO) << "holder is marked in the mark bit map.";
   2019       } else if (is_los && los_bitmap->Test(obj)) {
   2020         LOG(INFO) << "holder is marked in the los bit map.";
   2021       } else {
   2022         // If ref is on the allocation stack, then it is considered
   2023         // mark/alive (but not necessarily on the live stack.)
   2024         if (IsOnAllocStack(obj)) {
   2025           LOG(INFO) << "holder is on the alloc stack.";
   2026         } else {
   2027           LOG(INFO) << "holder is not marked or on the alloc stack.";
   2028         }
   2029       }
   2030     }
   2031   }
   2032   LOG(INFO) << "offset=" << offset.SizeValue();
   2033 }
   2034 
   2035 void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
   2036                                                                mirror::Object* ref) {
   2037   CHECK(!region_space_->HasAddress(ref)) << "obj=" << obj << " ref=" << ref;
   2038   // In a non-moving space. Check that the ref is marked.
   2039   if (immune_spaces_.ContainsObject(ref)) {
   2040     if (kUseBakerReadBarrier) {
   2041       // Immune object may not be gray if called from the GC.
   2042       if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
   2043         return;
   2044       }
   2045       bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
   2046       CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState())
   2047           << "Unmarked immune space ref. obj=" << obj << " rb_state="
   2048           << (obj != nullptr ? obj->GetReadBarrierState() : 0U)
   2049           << " ref=" << ref << " ref rb_state=" << ref->GetReadBarrierState()
   2050           << " updated_all_immune_objects=" << updated_all_immune_objects;
   2051     }
   2052   } else {
   2053     accounting::ContinuousSpaceBitmap* mark_bitmap =
   2054         heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
   2055     accounting::LargeObjectBitmap* los_bitmap =
   2056         heap_mark_bitmap_->GetLargeObjectBitmap(ref);
   2057     bool is_los = mark_bitmap == nullptr;
   2058     if ((!is_los && mark_bitmap->Test(ref)) ||
   2059         (is_los && los_bitmap->Test(ref))) {
   2060       // OK.
   2061     } else {
   2062       // If `ref` is on the allocation stack, then it may not be
   2063       // marked live, but considered marked/alive (but not
   2064       // necessarily on the live stack).
   2065       CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack."
   2066                                  << " obj=" << obj
   2067                                  << " ref=" << ref
   2068                                  << " is_los=" << std::boolalpha << is_los << std::noboolalpha;
   2069     }
   2070   }
   2071 }
   2072 
   2073 // Used to scan ref fields of an object.
   2074 class ConcurrentCopying::RefFieldsVisitor {
   2075  public:
   2076   explicit RefFieldsVisitor(ConcurrentCopying* collector)
   2077       : collector_(collector) {}
   2078 
   2079   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
   2080       const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
   2081       REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
   2082     collector_->Process(obj, offset);
   2083   }
   2084 
   2085   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
   2086       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
   2087     CHECK(klass->IsTypeOfReferenceClass());
   2088     collector_->DelayReferenceReferent(klass, ref);
   2089   }
   2090 
   2091   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
   2092       ALWAYS_INLINE
   2093       REQUIRES_SHARED(Locks::mutator_lock_) {
   2094     if (!root->IsNull()) {
   2095       VisitRoot(root);
   2096     }
   2097   }
   2098 
   2099   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
   2100       ALWAYS_INLINE
   2101       REQUIRES_SHARED(Locks::mutator_lock_) {
   2102     collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
   2103   }
   2104 
   2105  private:
   2106   ConcurrentCopying* const collector_;
   2107 };
   2108 
   2109 inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
   2110   if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
   2111     // Avoid all read barriers during visit references to help performance.
   2112     // Don't do this in transaction mode because we may read the old value of an field which may
   2113     // trigger read barriers.
   2114     Thread::Current()->ModifyDebugDisallowReadBarrier(1);
   2115   }
   2116   DCHECK(!region_space_->IsInFromSpace(to_ref));
   2117   DCHECK_EQ(Thread::Current(), thread_running_gc_);
   2118   RefFieldsVisitor visitor(this);
   2119   // Disable the read barrier for a performance reason.
   2120   to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
   2121       visitor, visitor);
   2122   if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
   2123     Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
   2124   }
   2125 }
   2126 
   2127 inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
   2128   DCHECK_EQ(Thread::Current(), thread_running_gc_);
   2129   mirror::Object* ref = obj->GetFieldObject<
   2130       mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
   2131   mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>(
   2132       ref,
   2133       /*holder*/ obj,
   2134       offset);
   2135   if (to_ref == ref) {
   2136     return;
   2137   }
   2138   // This may fail if the mutator writes to the field at the same time. But it's ok.
   2139   mirror::Object* expected_ref = ref;
   2140   mirror::Object* new_ref = to_ref;
   2141   do {
   2142     if (expected_ref !=
   2143         obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
   2144       // It was updated by the mutator.
   2145       break;
   2146     }
   2147     // Use release CAS to make sure threads reading the reference see contents of copied objects.
   2148   } while (!obj->CasFieldWeakReleaseObjectWithoutWriteBarrier<false, false, kVerifyNone>(
   2149       offset,
   2150       expected_ref,
   2151       new_ref));
   2152 }
   2153 
   2154 // Process some roots.
   2155 inline void ConcurrentCopying::VisitRoots(
   2156     mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
   2157   for (size_t i = 0; i < count; ++i) {
   2158     mirror::Object** root = roots[i];
   2159     mirror::Object* ref = *root;
   2160     mirror::Object* to_ref = Mark(ref);
   2161     if (to_ref == ref) {
   2162       continue;
   2163     }
   2164     Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
   2165     mirror::Object* expected_ref = ref;
   2166     mirror::Object* new_ref = to_ref;
   2167     do {
   2168       if (expected_ref != addr->LoadRelaxed()) {
   2169         // It was updated by the mutator.
   2170         break;
   2171       }
   2172     } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref));
   2173   }
   2174 }
   2175 
   2176 template<bool kGrayImmuneObject>
   2177 inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
   2178   DCHECK(!root->IsNull());
   2179   mirror::Object* const ref = root->AsMirrorPtr();
   2180   mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
   2181   if (to_ref != ref) {
   2182     auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
   2183     auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
   2184     auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
   2185     // If the cas fails, then it was updated by the mutator.
   2186     do {
   2187       if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
   2188         // It was updated by the mutator.
   2189         break;
   2190       }
   2191     } while (!addr->CompareAndSetWeakRelaxed(expected_ref, new_ref));
   2192   }
   2193 }
   2194 
   2195 inline void ConcurrentCopying::VisitRoots(
   2196     mirror::CompressedReference<mirror::Object>** roots, size_t count,
   2197     const RootInfo& info ATTRIBUTE_UNUSED) {
   2198   for (size_t i = 0; i < count; ++i) {
   2199     mirror::CompressedReference<mirror::Object>* const root = roots[i];
   2200     if (!root->IsNull()) {
   2201       // kGrayImmuneObject is true because this is used for the thread flip.
   2202       MarkRoot</*kGrayImmuneObject*/true>(root);
   2203     }
   2204   }
   2205 }
   2206 
   2207 // Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
   2208 class ConcurrentCopying::ScopedGcGraysImmuneObjects {
   2209  public:
   2210   explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
   2211       : collector_(collector), enabled_(false) {
   2212     if (kUseBakerReadBarrier &&
   2213         collector_->thread_running_gc_ == Thread::Current() &&
   2214         !collector_->gc_grays_immune_objects_) {
   2215       collector_->gc_grays_immune_objects_ = true;
   2216       enabled_ = true;
   2217     }
   2218   }
   2219 
   2220   ~ScopedGcGraysImmuneObjects() {
   2221     if (kUseBakerReadBarrier &&
   2222         collector_->thread_running_gc_ == Thread::Current() &&
   2223         enabled_) {
   2224       DCHECK(collector_->gc_grays_immune_objects_);
   2225       collector_->gc_grays_immune_objects_ = false;
   2226     }
   2227   }
   2228 
   2229  private:
   2230   ConcurrentCopying* const collector_;
   2231   bool enabled_;
   2232 };
   2233 
   2234 // Fill the given memory block with a dummy object. Used to fill in a
   2235 // copy of objects that was lost in race.
   2236 void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
   2237   // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
   2238   // barriers here because we need the updated reference to the int array class, etc. Temporary set
   2239   // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
   2240   ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
   2241   CHECK_ALIGNED(byte_size, kObjectAlignment);
   2242   memset(dummy_obj, 0, byte_size);
   2243   // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
   2244   // Explicitly mark to make sure to get an object in the to-space.
   2245   mirror::Class* int_array_class = down_cast<mirror::Class*>(
   2246       Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
   2247   CHECK(int_array_class != nullptr);
   2248   if (ReadBarrier::kEnableToSpaceInvariantChecks) {
   2249     AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
   2250   }
   2251   size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
   2252   CHECK_EQ(component_size, sizeof(int32_t));
   2253   size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
   2254   if (data_offset > byte_size) {
   2255     // An int array is too big. Use java.lang.Object.
   2256     CHECK(java_lang_Object_ != nullptr);
   2257     if (ReadBarrier::kEnableToSpaceInvariantChecks) {
   2258       AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
   2259     }
   2260     CHECK_EQ(byte_size, (java_lang_Object_->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
   2261     dummy_obj->SetClass(java_lang_Object_);
   2262     CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()));
   2263   } else {
   2264     // Use an int array.
   2265     dummy_obj->SetClass(int_array_class);
   2266     CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
   2267     int32_t length = (byte_size - data_offset) / component_size;
   2268     mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
   2269     dummy_arr->SetLength(length);
   2270     CHECK_EQ(dummy_arr->GetLength(), length)
   2271         << "byte_size=" << byte_size << " length=" << length
   2272         << " component_size=" << component_size << " data_offset=" << data_offset;
   2273     CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()))
   2274         << "byte_size=" << byte_size << " length=" << length
   2275         << " component_size=" << component_size << " data_offset=" << data_offset;
   2276   }
   2277 }
   2278 
   2279 // Reuse the memory blocks that were copy of objects that were lost in race.
   2280 mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
   2281   // Try to reuse the blocks that were unused due to CAS failures.
   2282   CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
   2283   Thread* self = Thread::Current();
   2284   size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
   2285   size_t byte_size;
   2286   uint8_t* addr;
   2287   {
   2288     MutexLock mu(self, skipped_blocks_lock_);
   2289     auto it = skipped_blocks_map_.lower_bound(alloc_size);
   2290     if (it == skipped_blocks_map_.end()) {
   2291       // Not found.
   2292       return nullptr;
   2293     }
   2294     byte_size = it->first;
   2295     CHECK_GE(byte_size, alloc_size);
   2296     if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
   2297       // If remainder would be too small for a dummy object, retry with a larger request size.
   2298       it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
   2299       if (it == skipped_blocks_map_.end()) {
   2300         // Not found.
   2301         return nullptr;
   2302       }
   2303       CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
   2304       CHECK_GE(it->first - alloc_size, min_object_size)
   2305           << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
   2306     }
   2307     // Found a block.
   2308     CHECK(it != skipped_blocks_map_.end());
   2309     byte_size = it->first;
   2310     addr = it->second;
   2311     CHECK_GE(byte_size, alloc_size);
   2312     CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
   2313     CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
   2314     if (kVerboseMode) {
   2315       LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
   2316     }
   2317     skipped_blocks_map_.erase(it);
   2318   }
   2319   memset(addr, 0, byte_size);
   2320   if (byte_size > alloc_size) {
   2321     // Return the remainder to the map.
   2322     CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
   2323     CHECK_GE(byte_size - alloc_size, min_object_size);
   2324     // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
   2325     // violation and possible deadlock. The deadlock case is a recursive case:
   2326     // FillWithDummyObject -> IntArray::GetArrayClass -> Mark -> Copy -> AllocateInSkippedBlock.
   2327     FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
   2328                         byte_size - alloc_size);
   2329     CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
   2330     {
   2331       MutexLock mu(self, skipped_blocks_lock_);
   2332       skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
   2333     }
   2334   }
   2335   return reinterpret_cast<mirror::Object*>(addr);
   2336 }
   2337 
   2338 mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
   2339                                         mirror::Object* holder,
   2340                                         MemberOffset offset) {
   2341   DCHECK(region_space_->IsInFromSpace(from_ref));
   2342   // If the class pointer is null, the object is invalid. This could occur for a dangling pointer
   2343   // from a previous GC that is either inside or outside the allocated region.
   2344   mirror::Class* klass = from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>();
   2345   if (UNLIKELY(klass == nullptr)) {
   2346     heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
   2347   }
   2348   // There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
   2349   // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
   2350   // objects, but it's ok and necessary.
   2351   size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags>();
   2352   size_t region_space_alloc_size = (obj_size <= space::RegionSpace::kRegionSize)
   2353       ? RoundUp(obj_size, space::RegionSpace::kAlignment)
   2354       : RoundUp(obj_size, space::RegionSpace::kRegionSize);
   2355   size_t region_space_bytes_allocated = 0U;
   2356   size_t non_moving_space_bytes_allocated = 0U;
   2357   size_t bytes_allocated = 0U;
   2358   size_t dummy;
   2359   bool fall_back_to_non_moving = false;
   2360   mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>(
   2361       region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
   2362   bytes_allocated = region_space_bytes_allocated;
   2363   if (LIKELY(to_ref != nullptr)) {
   2364     DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
   2365   } else {
   2366     // Failed to allocate in the region space. Try the skipped blocks.
   2367     to_ref = AllocateInSkippedBlock(region_space_alloc_size);
   2368     if (to_ref != nullptr) {
   2369       // Succeeded to allocate in a skipped block.
   2370       if (heap_->use_tlab_) {
   2371         // This is necessary for the tlab case as it's not accounted in the space.
   2372         region_space_->RecordAlloc(to_ref);
   2373       }
   2374       bytes_allocated = region_space_alloc_size;
   2375       heap_->num_bytes_allocated_.fetch_sub(bytes_allocated, std::memory_order_seq_cst);
   2376       to_space_bytes_skipped_.fetch_sub(bytes_allocated, std::memory_order_seq_cst);
   2377       to_space_objects_skipped_.fetch_sub(1, std::memory_order_seq_cst);
   2378     } else {
   2379       // Fall back to the non-moving space.
   2380       fall_back_to_non_moving = true;
   2381       if (kVerboseMode) {
   2382         LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
   2383                   << to_space_bytes_skipped_.LoadSequentiallyConsistent()
   2384                   << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
   2385       }
   2386       to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
   2387                                                &non_moving_space_bytes_allocated, nullptr, &dummy);
   2388       if (UNLIKELY(to_ref == nullptr)) {
   2389         LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
   2390                                  << obj_size << " byte object in region type "
   2391                                  << region_space_->GetRegionType(from_ref);
   2392         LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
   2393       }
   2394       bytes_allocated = non_moving_space_bytes_allocated;
   2395       // Mark it in the mark bitmap.
   2396       accounting::ContinuousSpaceBitmap* mark_bitmap =
   2397           heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
   2398       CHECK(mark_bitmap != nullptr);
   2399       CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
   2400     }
   2401   }
   2402   DCHECK(to_ref != nullptr);
   2403 
   2404   // Copy the object excluding the lock word since that is handled in the loop.
   2405   to_ref->SetClass(klass);
   2406   const size_t kObjectHeaderSize = sizeof(mirror::Object);
   2407   DCHECK_GE(obj_size, kObjectHeaderSize);
   2408   static_assert(kObjectHeaderSize == sizeof(mirror::HeapReference<mirror::Class>) +
   2409                     sizeof(LockWord),
   2410                 "Object header size does not match");
   2411   // Memcpy can tear for words since it may do byte copy. It is only safe to do this since the
   2412   // object in the from space is immutable other than the lock word. b/31423258
   2413   memcpy(reinterpret_cast<uint8_t*>(to_ref) + kObjectHeaderSize,
   2414          reinterpret_cast<const uint8_t*>(from_ref) + kObjectHeaderSize,
   2415          obj_size - kObjectHeaderSize);
   2416 
   2417   // Attempt to install the forward pointer. This is in a loop as the
   2418   // lock word atomic write can fail.
   2419   while (true) {
   2420     LockWord old_lock_word = from_ref->GetLockWord(false);
   2421 
   2422     if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
   2423       // Lost the race. Another thread (either GC or mutator) stored
   2424       // the forwarding pointer first. Make the lost copy (to_ref)
   2425       // look like a valid but dead (dummy) object and keep it for
   2426       // future reuse.
   2427       FillWithDummyObject(to_ref, bytes_allocated);
   2428       if (!fall_back_to_non_moving) {
   2429         DCHECK(region_space_->IsInToSpace(to_ref));
   2430         if (bytes_allocated > space::RegionSpace::kRegionSize) {
   2431           // Free the large alloc.
   2432           region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
   2433         } else {
   2434           // Record the lost copy for later reuse.
   2435           heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
   2436           to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
   2437           to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
   2438           MutexLock mu(Thread::Current(), skipped_blocks_lock_);
   2439           skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
   2440                                                     reinterpret_cast<uint8_t*>(to_ref)));
   2441         }
   2442       } else {
   2443         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
   2444         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
   2445         // Free the non-moving-space chunk.
   2446         accounting::ContinuousSpaceBitmap* mark_bitmap =
   2447             heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
   2448         CHECK(mark_bitmap != nullptr);
   2449         CHECK(mark_bitmap->Clear(to_ref));
   2450         heap_->non_moving_space_->Free(Thread::Current(), to_ref);
   2451       }
   2452 
   2453       // Get the winner's forward ptr.
   2454       mirror::Object* lost_fwd_ptr = to_ref;
   2455       to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
   2456       CHECK(to_ref != nullptr);
   2457       CHECK_NE(to_ref, lost_fwd_ptr);
   2458       CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
   2459           << "to_ref=" << to_ref << " " << heap_->DumpSpaces();
   2460       CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
   2461       return to_ref;
   2462     }
   2463 
   2464     // Copy the old lock word over since we did not copy it yet.
   2465     to_ref->SetLockWord(old_lock_word, false);
   2466     // Set the gray ptr.
   2467     if (kUseBakerReadBarrier) {
   2468       to_ref->SetReadBarrierState(ReadBarrier::GrayState());
   2469     }
   2470 
   2471     // Do a fence to prevent the field CAS in ConcurrentCopying::Process from possibly reordering
   2472     // before the object copy.
   2473     QuasiAtomic::ThreadFenceRelease();
   2474 
   2475     LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
   2476 
   2477     // Try to atomically write the fwd ptr.
   2478     bool success = from_ref->CasLockWordWeakRelaxed(old_lock_word, new_lock_word);
   2479     if (LIKELY(success)) {
   2480       // The CAS succeeded.
   2481       objects_moved_.FetchAndAddRelaxed(1);
   2482       bytes_moved_.FetchAndAddRelaxed(region_space_alloc_size);
   2483       if (LIKELY(!fall_back_to_non_moving)) {
   2484         DCHECK(region_space_->IsInToSpace(to_ref));
   2485       } else {
   2486         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
   2487         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
   2488       }
   2489       if (kUseBakerReadBarrier) {
   2490         DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
   2491       }
   2492       DCHECK(GetFwdPtr(from_ref) == to_ref);
   2493       CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
   2494       PushOntoMarkStack(to_ref);
   2495       return to_ref;
   2496     } else {
   2497       // The CAS failed. It may have lost the race or may have failed
   2498       // due to monitor/hashcode ops. Either way, retry.
   2499     }
   2500   }
   2501 }
   2502 
   2503 mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
   2504   DCHECK(from_ref != nullptr);
   2505   space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
   2506   if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
   2507     // It's already marked.
   2508     return from_ref;
   2509   }
   2510   mirror::Object* to_ref;
   2511   if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
   2512     to_ref = GetFwdPtr(from_ref);
   2513     DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
   2514            heap_->non_moving_space_->HasAddress(to_ref))
   2515         << "from_ref=" << from_ref << " to_ref=" << to_ref;
   2516   } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
   2517     if (IsMarkedInUnevacFromSpace(from_ref)) {
   2518       to_ref = from_ref;
   2519     } else {
   2520       to_ref = nullptr;
   2521     }
   2522   } else {
   2523     // At this point, `from_ref` should not be in the region space
   2524     // (i.e. within an "unused" region).
   2525     DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
   2526     // from_ref is in a non-moving space.
   2527     if (immune_spaces_.ContainsObject(from_ref)) {
   2528       // An immune object is alive.
   2529       to_ref = from_ref;
   2530     } else {
   2531       // Non-immune non-moving space. Use the mark bitmap.
   2532       accounting::ContinuousSpaceBitmap* mark_bitmap =
   2533           heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
   2534       bool is_los = mark_bitmap == nullptr;
   2535       if (!is_los && mark_bitmap->Test(from_ref)) {
   2536         // Already marked.
   2537         to_ref = from_ref;
   2538       } else {
   2539         accounting::LargeObjectBitmap* los_bitmap =
   2540             heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
   2541         // We may not have a large object space for dex2oat, don't assume it exists.
   2542         if (los_bitmap == nullptr) {
   2543           CHECK(heap_->GetLargeObjectsSpace() == nullptr)
   2544               << "LOS bitmap covers the entire address range " << from_ref
   2545               << " " << heap_->DumpSpaces();
   2546         }
   2547         if (los_bitmap != nullptr && is_los && los_bitmap->Test(from_ref)) {
   2548           // Already marked in LOS.
   2549           to_ref = from_ref;
   2550         } else {
   2551           // Not marked.
   2552           if (IsOnAllocStack(from_ref)) {
   2553             // If on the allocation stack, it's considered marked.
   2554             to_ref = from_ref;
   2555           } else {
   2556             // Not marked.
   2557             to_ref = nullptr;
   2558           }
   2559         }
   2560       }
   2561     }
   2562   }
   2563   return to_ref;
   2564 }
   2565 
   2566 bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
   2567   // TODO: Explain why this is here. What release operation does it pair with?
   2568   QuasiAtomic::ThreadFenceAcquire();
   2569   accounting::ObjectStack* alloc_stack = GetAllocationStack();
   2570   return alloc_stack->Contains(ref);
   2571 }
   2572 
   2573 mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref,
   2574                                                  mirror::Object* holder,
   2575                                                  MemberOffset offset) {
   2576   // ref is in a non-moving space (from_ref == to_ref).
   2577   DCHECK(!region_space_->HasAddress(ref)) << ref;
   2578   DCHECK(!immune_spaces_.ContainsObject(ref));
   2579   // Use the mark bitmap.
   2580   accounting::ContinuousSpaceBitmap* mark_bitmap =
   2581       heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
   2582   accounting::LargeObjectBitmap* los_bitmap =
   2583       heap_mark_bitmap_->GetLargeObjectBitmap(ref);
   2584   bool is_los = mark_bitmap == nullptr;
   2585   if (!is_los && mark_bitmap->Test(ref)) {
   2586     // Already marked.
   2587     if (kUseBakerReadBarrier) {
   2588       DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
   2589              ref->GetReadBarrierState() == ReadBarrier::WhiteState());
   2590     }
   2591   } else if (is_los && los_bitmap->Test(ref)) {
   2592     // Already marked in LOS.
   2593     if (kUseBakerReadBarrier) {
   2594       DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
   2595              ref->GetReadBarrierState() == ReadBarrier::WhiteState());
   2596     }
   2597   } else {
   2598     // Not marked.
   2599     if (IsOnAllocStack(ref)) {
   2600       // If it's on the allocation stack, it's considered marked. Keep it white.
   2601       // Objects on the allocation stack need not be marked.
   2602       if (!is_los) {
   2603         DCHECK(!mark_bitmap->Test(ref));
   2604       } else {
   2605         DCHECK(!los_bitmap->Test(ref));
   2606       }
   2607       if (kUseBakerReadBarrier) {
   2608         DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
   2609       }
   2610     } else {
   2611       // For the baker-style RB, we need to handle 'false-gray' cases. See the
   2612       // kRegionTypeUnevacFromSpace-case comment in Mark().
   2613       if (kUseBakerReadBarrier) {
   2614         // Test the bitmap first to reduce the chance of false gray cases.
   2615         if ((!is_los && mark_bitmap->Test(ref)) ||
   2616             (is_los && los_bitmap->Test(ref))) {
   2617           return ref;
   2618         }
   2619       }
   2620       if (is_los && !IsAligned<kPageSize>(ref)) {
   2621         // Ref is a large object that is not aligned, it must be heap corruption. Dump data before
   2622         // AtomicSetReadBarrierState since it will fault if the address is not valid.
   2623         heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
   2624       }
   2625       // Not marked or on the allocation stack. Try to mark it.
   2626       // This may or may not succeed, which is ok.
   2627       bool cas_success = false;
   2628       if (kUseBakerReadBarrier) {
   2629         cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
   2630                                                      ReadBarrier::GrayState());
   2631       }
   2632       if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
   2633         // Already marked.
   2634         if (kUseBakerReadBarrier && cas_success &&
   2635             ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
   2636           PushOntoFalseGrayStack(ref);
   2637         }
   2638       } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
   2639         // Already marked in LOS.
   2640         if (kUseBakerReadBarrier && cas_success &&
   2641             ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
   2642           PushOntoFalseGrayStack(ref);
   2643         }
   2644       } else {
   2645         // Newly marked.
   2646         if (kUseBakerReadBarrier) {
   2647           DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
   2648         }
   2649         PushOntoMarkStack(ref);
   2650       }
   2651     }
   2652   }
   2653   return ref;
   2654 }
   2655 
   2656 void ConcurrentCopying::FinishPhase() {
   2657   Thread* const self = Thread::Current();
   2658   {
   2659     MutexLock mu(self, mark_stack_lock_);
   2660     CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
   2661   }
   2662   // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
   2663   // positives.
   2664   if (!kVerifyNoMissingCardMarks) {
   2665     TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
   2666     // We do not currently use the region space cards at all, madvise them away to save ram.
   2667     heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
   2668   }
   2669   {
   2670     MutexLock mu(self, skipped_blocks_lock_);
   2671     skipped_blocks_map_.clear();
   2672   }
   2673   {
   2674     ReaderMutexLock mu(self, *Locks::mutator_lock_);
   2675     {
   2676       WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
   2677       heap_->ClearMarkedObjects();
   2678     }
   2679     if (kUseBakerReadBarrier && kFilterModUnionCards) {
   2680       TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
   2681       ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
   2682       for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
   2683         DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
   2684         accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
   2685         // Filter out cards that don't need to be set.
   2686         if (table != nullptr) {
   2687           table->FilterCards();
   2688         }
   2689       }
   2690     }
   2691     if (kUseBakerReadBarrier) {
   2692       TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
   2693       DCHECK(rb_mark_bit_stack_ != nullptr);
   2694       const auto* limit = rb_mark_bit_stack_->End();
   2695       for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
   2696         CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0))
   2697             << "rb_mark_bit_stack_->Begin()" << rb_mark_bit_stack_->Begin() << '\n'
   2698             << "rb_mark_bit_stack_->End()" << rb_mark_bit_stack_->End() << '\n'
   2699             << "rb_mark_bit_stack_->IsFull()"
   2700             << std::boolalpha << rb_mark_bit_stack_->IsFull() << std::noboolalpha << '\n'
   2701             << DumpReferenceInfo(it->AsMirrorPtr(), "*it");
   2702       }
   2703       rb_mark_bit_stack_->Reset();
   2704     }
   2705   }
   2706   if (measure_read_barrier_slow_path_) {
   2707     MutexLock mu(self, rb_slow_path_histogram_lock_);
   2708     rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
   2709     rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
   2710     rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
   2711   }
   2712 }
   2713 
   2714 bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
   2715                                                     bool do_atomic_update) {
   2716   mirror::Object* from_ref = field->AsMirrorPtr();
   2717   if (from_ref == nullptr) {
   2718     return true;
   2719   }
   2720   mirror::Object* to_ref = IsMarked(from_ref);
   2721   if (to_ref == nullptr) {
   2722     return false;
   2723   }
   2724   if (from_ref != to_ref) {
   2725     if (do_atomic_update) {
   2726       do {
   2727         if (field->AsMirrorPtr() != from_ref) {
   2728           // Concurrently overwritten by a mutator.
   2729           break;
   2730         }
   2731       } while (!field->CasWeakRelaxed(from_ref, to_ref));
   2732     } else {
   2733       // TODO: Why is this seq_cst when the above is relaxed? Document memory ordering.
   2734       field->Assign</* kIsVolatile */ true>(to_ref);
   2735     }
   2736   }
   2737   return true;
   2738 }
   2739 
   2740 mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
   2741   return Mark(from_ref);
   2742 }
   2743 
   2744 void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
   2745                                                ObjPtr<mirror::Reference> reference) {
   2746   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
   2747 }
   2748 
   2749 void ConcurrentCopying::ProcessReferences(Thread* self) {
   2750   TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
   2751   // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
   2752   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   2753   GetHeap()->GetReferenceProcessor()->ProcessReferences(
   2754       true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
   2755 }
   2756 
   2757 void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
   2758   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   2759   region_space_->RevokeAllThreadLocalBuffers();
   2760 }
   2761 
   2762 mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
   2763   if (Thread::Current() != thread_running_gc_) {
   2764     rb_slow_path_count_.FetchAndAddRelaxed(1u);
   2765   } else {
   2766     rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
   2767   }
   2768   ScopedTrace tr(__FUNCTION__);
   2769   const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
   2770   mirror::Object* ret = Mark(from_ref);
   2771   if (measure_read_barrier_slow_path_) {
   2772     rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
   2773   }
   2774   return ret;
   2775 }
   2776 
   2777 void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
   2778   GarbageCollector::DumpPerformanceInfo(os);
   2779   MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
   2780   if (rb_slow_path_time_histogram_.SampleSize() > 0) {
   2781     Histogram<uint64_t>::CumulativeData cumulative_data;
   2782     rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
   2783     rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
   2784   }
   2785   if (rb_slow_path_count_total_ > 0) {
   2786     os << "Slow path count " << rb_slow_path_count_total_ << "\n";
   2787   }
   2788   if (rb_slow_path_count_gc_total_ > 0) {
   2789     os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
   2790   }
   2791   os << "Cumulative bytes moved " << cumulative_bytes_moved_.LoadRelaxed() << "\n";
   2792   os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
   2793 
   2794   os << "Peak regions allocated "
   2795      << region_space_->GetMaxPeakNumNonFreeRegions() << " ("
   2796      << PrettySize(region_space_->GetMaxPeakNumNonFreeRegions() * space::RegionSpace::kRegionSize)
   2797      << ") / " << region_space_->GetNumRegions() / 2 << " ("
   2798      << PrettySize(region_space_->GetNumRegions() * space::RegionSpace::kRegionSize / 2)
   2799      << ")\n";
   2800 }
   2801 
   2802 }  // namespace collector
   2803 }  // namespace gc
   2804 }  // namespace art
   2805