Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "concurrent_copying.h"
     18 
     19 #include "art_field-inl.h"
     20 #include "base/stl_util.h"
     21 #include "debugger.h"
     22 #include "gc/accounting/heap_bitmap-inl.h"
     23 #include "gc/accounting/space_bitmap-inl.h"
     24 #include "gc/reference_processor.h"
     25 #include "gc/space/image_space.h"
     26 #include "gc/space/space-inl.h"
     27 #include "image-inl.h"
     28 #include "intern_table.h"
     29 #include "mirror/class-inl.h"
     30 #include "mirror/object-inl.h"
     31 #include "scoped_thread_state_change.h"
     32 #include "thread-inl.h"
     33 #include "thread_list.h"
     34 #include "well_known_classes.h"
     35 
     36 namespace art {
     37 namespace gc {
     38 namespace collector {
     39 
     40 static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
     41 
     42 ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
     43     : GarbageCollector(heap,
     44                        name_prefix + (name_prefix.empty() ? "" : " ") +
     45                        "concurrent copying + mark sweep"),
     46       region_space_(nullptr), gc_barrier_(new Barrier(0)),
     47       gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
     48                                                      kDefaultGcMarkStackSize,
     49                                                      kDefaultGcMarkStackSize)),
     50       mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
     51       thread_running_gc_(nullptr),
     52       is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
     53       heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
     54       weak_ref_access_enabled_(true),
     55       skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
     56       rb_table_(heap_->GetReadBarrierTable()),
     57       force_evacuate_all_(false) {
     58   static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
     59                 "The region space size and the read barrier table region size must match");
     60   cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
     61   Thread* self = Thread::Current();
     62   {
     63     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
     64     // Cache this so that we won't have to lock heap_bitmap_lock_ in
     65     // Mark() which could cause a nested lock on heap_bitmap_lock_
     66     // when GC causes a RB while doing GC or a lock order violation
     67     // (class_linker_lock_ and heap_bitmap_lock_).
     68     heap_mark_bitmap_ = heap->GetMarkBitmap();
     69   }
     70   {
     71     MutexLock mu(self, mark_stack_lock_);
     72     for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
     73       accounting::AtomicStack<mirror::Object>* mark_stack =
     74           accounting::AtomicStack<mirror::Object>::Create(
     75               "thread local mark stack", kMarkStackSize, kMarkStackSize);
     76       pooled_mark_stacks_.push_back(mark_stack);
     77     }
     78   }
     79 }
     80 
     81 void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
     82   // Used for preserving soft references, should be OK to not have a CAS here since there should be
     83   // no other threads which can trigger read barriers on the same referent during reference
     84   // processing.
     85   from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
     86   DCHECK(!from_ref->IsNull());
     87 }
     88 
     89 ConcurrentCopying::~ConcurrentCopying() {
     90   STLDeleteElements(&pooled_mark_stacks_);
     91 }
     92 
     93 void ConcurrentCopying::RunPhases() {
     94   CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
     95   CHECK(!is_active_);
     96   is_active_ = true;
     97   Thread* self = Thread::Current();
     98   thread_running_gc_ = self;
     99   Locks::mutator_lock_->AssertNotHeld(self);
    100   {
    101     ReaderMutexLock mu(self, *Locks::mutator_lock_);
    102     InitializePhase();
    103   }
    104   FlipThreadRoots();
    105   {
    106     ReaderMutexLock mu(self, *Locks::mutator_lock_);
    107     MarkingPhase();
    108   }
    109   // Verify no from space refs. This causes a pause.
    110   if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
    111     TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
    112     ScopedPause pause(this);
    113     CheckEmptyMarkStack();
    114     if (kVerboseMode) {
    115       LOG(INFO) << "Verifying no from-space refs";
    116     }
    117     VerifyNoFromSpaceReferences();
    118     if (kVerboseMode) {
    119       LOG(INFO) << "Done verifying no from-space refs";
    120     }
    121     CheckEmptyMarkStack();
    122   }
    123   {
    124     ReaderMutexLock mu(self, *Locks::mutator_lock_);
    125     ReclaimPhase();
    126   }
    127   FinishPhase();
    128   CHECK(is_active_);
    129   is_active_ = false;
    130   thread_running_gc_ = nullptr;
    131 }
    132 
    133 void ConcurrentCopying::BindBitmaps() {
    134   Thread* self = Thread::Current();
    135   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    136   // Mark all of the spaces we never collect as immune.
    137   for (const auto& space : heap_->GetContinuousSpaces()) {
    138     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
    139         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
    140       CHECK(space->IsZygoteSpace() || space->IsImageSpace());
    141       immune_spaces_.AddSpace(space);
    142       const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
    143           "cc zygote space bitmap";
    144       // TODO: try avoiding using bitmaps for image/zygote to save space.
    145       accounting::ContinuousSpaceBitmap* bitmap =
    146           accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
    147       cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
    148       cc_bitmaps_.push_back(bitmap);
    149     } else if (space == region_space_) {
    150       accounting::ContinuousSpaceBitmap* bitmap =
    151           accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
    152                                                     space->Begin(), space->Capacity());
    153       cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
    154       cc_bitmaps_.push_back(bitmap);
    155       region_space_bitmap_ = bitmap;
    156     }
    157   }
    158 }
    159 
    160 void ConcurrentCopying::InitializePhase() {
    161   TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
    162   if (kVerboseMode) {
    163     LOG(INFO) << "GC InitializePhase";
    164     LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
    165               << reinterpret_cast<void*>(region_space_->Limit());
    166   }
    167   CheckEmptyMarkStack();
    168   immune_spaces_.Reset();
    169   bytes_moved_.StoreRelaxed(0);
    170   objects_moved_.StoreRelaxed(0);
    171   if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
    172       GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
    173       GetCurrentIteration()->GetClearSoftReferences()) {
    174     force_evacuate_all_ = true;
    175   } else {
    176     force_evacuate_all_ = false;
    177   }
    178   BindBitmaps();
    179   if (kVerboseMode) {
    180     LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
    181     LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
    182               << "-" << immune_spaces_.GetLargestImmuneRegion().End();
    183     for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
    184       LOG(INFO) << "Immune space: " << *space;
    185     }
    186     LOG(INFO) << "GC end of InitializePhase";
    187   }
    188 }
    189 
    190 // Used to switch the thread roots of a thread from from-space refs to to-space refs.
    191 class ConcurrentCopying::ThreadFlipVisitor : public Closure {
    192  public:
    193   ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
    194       : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
    195   }
    196 
    197   virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
    198     // Note: self is not necessarily equal to thread since thread may be suspended.
    199     Thread* self = Thread::Current();
    200     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
    201         << thread->GetState() << " thread " << thread << " self " << self;
    202     thread->SetIsGcMarking(true);
    203     if (use_tlab_ && thread->HasTlab()) {
    204       if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
    205         // This must come before the revoke.
    206         size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
    207         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
    208         reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
    209             FetchAndAddSequentiallyConsistent(thread_local_objects);
    210       } else {
    211         concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
    212       }
    213     }
    214     if (kUseThreadLocalAllocationStack) {
    215       thread->RevokeThreadLocalAllocationStack();
    216     }
    217     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
    218     thread->VisitRoots(concurrent_copying_);
    219     concurrent_copying_->GetBarrier().Pass(self);
    220   }
    221 
    222  private:
    223   ConcurrentCopying* const concurrent_copying_;
    224   const bool use_tlab_;
    225 };
    226 
    227 // Called back from Runtime::FlipThreadRoots() during a pause.
    228 class ConcurrentCopying::FlipCallback : public Closure {
    229  public:
    230   explicit FlipCallback(ConcurrentCopying* concurrent_copying)
    231       : concurrent_copying_(concurrent_copying) {
    232   }
    233 
    234   virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
    235     ConcurrentCopying* cc = concurrent_copying_;
    236     TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
    237     // Note: self is not necessarily equal to thread since thread may be suspended.
    238     Thread* self = Thread::Current();
    239     CHECK(thread == self);
    240     Locks::mutator_lock_->AssertExclusiveHeld(self);
    241     cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
    242     cc->SwapStacks();
    243     if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
    244       cc->RecordLiveStackFreezeSize(self);
    245       cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
    246       cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
    247     }
    248     cc->is_marking_ = true;
    249     cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
    250     if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
    251       CHECK(Runtime::Current()->IsAotCompiler());
    252       TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
    253       Runtime::Current()->VisitTransactionRoots(cc);
    254     }
    255   }
    256 
    257  private:
    258   ConcurrentCopying* const concurrent_copying_;
    259 };
    260 
    261 // Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
    262 void ConcurrentCopying::FlipThreadRoots() {
    263   TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
    264   if (kVerboseMode) {
    265     LOG(INFO) << "time=" << region_space_->Time();
    266     region_space_->DumpNonFreeRegions(LOG(INFO));
    267   }
    268   Thread* self = Thread::Current();
    269   Locks::mutator_lock_->AssertNotHeld(self);
    270   gc_barrier_->Init(self, 0);
    271   ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
    272   FlipCallback flip_callback(this);
    273   heap_->ThreadFlipBegin(self);  // Sync with JNI critical calls.
    274   size_t barrier_count = Runtime::Current()->FlipThreadRoots(
    275       &thread_flip_visitor, &flip_callback, this);
    276   heap_->ThreadFlipEnd(self);
    277   {
    278     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
    279     gc_barrier_->Increment(self, barrier_count);
    280   }
    281   is_asserting_to_space_invariant_ = true;
    282   QuasiAtomic::ThreadFenceForConstructor();
    283   if (kVerboseMode) {
    284     LOG(INFO) << "time=" << region_space_->Time();
    285     region_space_->DumpNonFreeRegions(LOG(INFO));
    286     LOG(INFO) << "GC end of FlipThreadRoots";
    287   }
    288 }
    289 
    290 void ConcurrentCopying::SwapStacks() {
    291   heap_->SwapStacks();
    292 }
    293 
    294 void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
    295   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    296   live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
    297 }
    298 
    299 // Used to visit objects in the immune spaces.
    300 class ConcurrentCopying::ImmuneSpaceObjVisitor {
    301  public:
    302   explicit ImmuneSpaceObjVisitor(ConcurrentCopying* cc) : collector_(cc) {}
    303 
    304   void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
    305       SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
    306     DCHECK(obj != nullptr);
    307     DCHECK(collector_->immune_spaces_.ContainsObject(obj));
    308     accounting::ContinuousSpaceBitmap* cc_bitmap =
    309         collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
    310     DCHECK(cc_bitmap != nullptr)
    311         << "An immune space object must have a bitmap";
    312     if (kIsDebugBuild) {
    313       DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
    314           << "Immune space object must be already marked";
    315     }
    316     // This may or may not succeed, which is ok.
    317     if (kUseBakerReadBarrier) {
    318       obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
    319     }
    320     if (cc_bitmap->AtomicTestAndSet(obj)) {
    321       // Already marked. Do nothing.
    322     } else {
    323       // Newly marked. Set the gray bit and push it onto the mark stack.
    324       CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
    325       collector_->PushOntoMarkStack(obj);
    326     }
    327   }
    328 
    329  private:
    330   ConcurrentCopying* const collector_;
    331 };
    332 
    333 class EmptyCheckpoint : public Closure {
    334  public:
    335   explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
    336       : concurrent_copying_(concurrent_copying) {
    337   }
    338 
    339   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
    340     // Note: self is not necessarily equal to thread since thread may be suspended.
    341     Thread* self = Thread::Current();
    342     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
    343         << thread->GetState() << " thread " << thread << " self " << self;
    344     // If thread is a running mutator, then act on behalf of the garbage collector.
    345     // See the code in ThreadList::RunCheckpoint.
    346     concurrent_copying_->GetBarrier().Pass(self);
    347   }
    348 
    349  private:
    350   ConcurrentCopying* const concurrent_copying_;
    351 };
    352 
    353 // Concurrently mark roots that are guarded by read barriers and process the mark stack.
    354 void ConcurrentCopying::MarkingPhase() {
    355   TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
    356   if (kVerboseMode) {
    357     LOG(INFO) << "GC MarkingPhase";
    358   }
    359   CHECK(weak_ref_access_enabled_);
    360   {
    361     // Mark the image root. The WB-based collectors do not need to
    362     // scan the image objects from roots by relying on the card table,
    363     // but it's necessary for the RB to-space invariant to hold.
    364     TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
    365     for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
    366       if (space->IsImageSpace()) {
    367         gc::space::ImageSpace* image = space->AsImageSpace();
    368         if (image != nullptr) {
    369           mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
    370           mirror::Object* marked_image_root = Mark(image_root);
    371           CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
    372           if (ReadBarrier::kEnableToSpaceInvariantChecks) {
    373             AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
    374           }
    375         }
    376       }
    377     }
    378   }
    379   {
    380     TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
    381     Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
    382   }
    383   {
    384     // TODO: don't visit the transaction roots if it's not active.
    385     TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
    386     Runtime::Current()->VisitNonThreadRoots(this);
    387   }
    388 
    389   // Immune spaces.
    390   for (auto& space : immune_spaces_.GetSpaces()) {
    391     DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
    392     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
    393     ImmuneSpaceObjVisitor visitor(this);
    394     live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
    395                                   reinterpret_cast<uintptr_t>(space->Limit()),
    396                                   visitor);
    397   }
    398 
    399   Thread* self = Thread::Current();
    400   {
    401     TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
    402     // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
    403     // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
    404     // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
    405     // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
    406     // reach the point where we process weak references, we can avoid using a lock when accessing
    407     // the GC mark stack, which makes mark stack processing more efficient.
    408 
    409     // Process the mark stack once in the thread local stack mode. This marks most of the live
    410     // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
    411     // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
    412     // objects and push refs on the mark stack.
    413     ProcessMarkStack();
    414     // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
    415     // for the last time before transitioning to the shared mark stack mode, which would process new
    416     // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
    417     // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
    418     // important to do these together in a single checkpoint so that we can ensure that mutators
    419     // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
    420     // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
    421     // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
    422     // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
    423     SwitchToSharedMarkStackMode();
    424     CHECK(!self->GetWeakRefAccessEnabled());
    425     // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
    426     // (which may be non-empty if there were refs found on thread-local mark stacks during the above
    427     // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
    428     // (via read barriers) have no way to produce any more refs to process. Marking converges once
    429     // before we process weak refs below.
    430     ProcessMarkStack();
    431     CheckEmptyMarkStack();
    432     // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
    433     // lock from this point on.
    434     SwitchToGcExclusiveMarkStackMode();
    435     CheckEmptyMarkStack();
    436     if (kVerboseMode) {
    437       LOG(INFO) << "ProcessReferences";
    438     }
    439     // Process weak references. This may produce new refs to process and have them processed via
    440     // ProcessMarkStack (in the GC exclusive mark stack mode).
    441     ProcessReferences(self);
    442     CheckEmptyMarkStack();
    443     if (kVerboseMode) {
    444       LOG(INFO) << "SweepSystemWeaks";
    445     }
    446     SweepSystemWeaks(self);
    447     if (kVerboseMode) {
    448       LOG(INFO) << "SweepSystemWeaks done";
    449     }
    450     // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
    451     // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
    452     // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
    453     ProcessMarkStack();
    454     CheckEmptyMarkStack();
    455     // Re-enable weak ref accesses.
    456     ReenableWeakRefAccess(self);
    457     // Free data for class loaders that we unloaded.
    458     Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
    459     // Marking is done. Disable marking.
    460     DisableMarking();
    461     CheckEmptyMarkStack();
    462   }
    463 
    464   CHECK(weak_ref_access_enabled_);
    465   if (kVerboseMode) {
    466     LOG(INFO) << "GC end of MarkingPhase";
    467   }
    468 }
    469 
    470 void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
    471   if (kVerboseMode) {
    472     LOG(INFO) << "ReenableWeakRefAccess";
    473   }
    474   weak_ref_access_enabled_.StoreRelaxed(true);  // This is for new threads.
    475   QuasiAtomic::ThreadFenceForConstructor();
    476   // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
    477   {
    478     MutexLock mu(self, *Locks::thread_list_lock_);
    479     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    480     for (Thread* thread : thread_list) {
    481       thread->SetWeakRefAccessEnabled(true);
    482     }
    483   }
    484   // Unblock blocking threads.
    485   GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
    486   Runtime::Current()->BroadcastForNewSystemWeaks();
    487 }
    488 
    489 class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
    490  public:
    491   explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
    492       : concurrent_copying_(concurrent_copying) {
    493   }
    494 
    495   void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
    496     // Note: self is not necessarily equal to thread since thread may be suspended.
    497     Thread* self = Thread::Current();
    498     DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
    499         << thread->GetState() << " thread " << thread << " self " << self;
    500     // Disable the thread-local is_gc_marking flag.
    501     // Note a thread that has just started right before this checkpoint may have already this flag
    502     // set to false, which is ok.
    503     thread->SetIsGcMarking(false);
    504     // If thread is a running mutator, then act on behalf of the garbage collector.
    505     // See the code in ThreadList::RunCheckpoint.
    506     concurrent_copying_->GetBarrier().Pass(self);
    507   }
    508 
    509  private:
    510   ConcurrentCopying* const concurrent_copying_;
    511 };
    512 
    513 void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
    514   Thread* self = Thread::Current();
    515   DisableMarkingCheckpoint check_point(this);
    516   ThreadList* thread_list = Runtime::Current()->GetThreadList();
    517   gc_barrier_->Init(self, 0);
    518   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
    519   // If there are no threads to wait which implies that all the checkpoint functions are finished,
    520   // then no need to release the mutator lock.
    521   if (barrier_count == 0) {
    522     return;
    523   }
    524   // Release locks then wait for all mutator threads to pass the barrier.
    525   Locks::mutator_lock_->SharedUnlock(self);
    526   {
    527     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
    528     gc_barrier_->Increment(self, barrier_count);
    529   }
    530   Locks::mutator_lock_->SharedLock(self);
    531 }
    532 
    533 void ConcurrentCopying::DisableMarking() {
    534   // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
    535   // thread-local flags so that a new thread starting up will get the correct is_marking flag.
    536   is_marking_ = false;
    537   QuasiAtomic::ThreadFenceForConstructor();
    538   // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
    539   // still in the middle of a read barrier which may have a from-space ref cached in a local
    540   // variable.
    541   IssueDisableMarkingCheckpoint();
    542   if (kUseTableLookupReadBarrier) {
    543     heap_->rb_table_->ClearAll();
    544     DCHECK(heap_->rb_table_->IsAllCleared());
    545   }
    546   is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
    547   mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
    548 }
    549 
    550 void ConcurrentCopying::IssueEmptyCheckpoint() {
    551   Thread* self = Thread::Current();
    552   EmptyCheckpoint check_point(this);
    553   ThreadList* thread_list = Runtime::Current()->GetThreadList();
    554   gc_barrier_->Init(self, 0);
    555   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
    556   // If there are no threads to wait which implys that all the checkpoint functions are finished,
    557   // then no need to release the mutator lock.
    558   if (barrier_count == 0) {
    559     return;
    560   }
    561   // Release locks then wait for all mutator threads to pass the barrier.
    562   Locks::mutator_lock_->SharedUnlock(self);
    563   {
    564     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
    565     gc_barrier_->Increment(self, barrier_count);
    566   }
    567   Locks::mutator_lock_->SharedLock(self);
    568 }
    569 
    570 void ConcurrentCopying::ExpandGcMarkStack() {
    571   DCHECK(gc_mark_stack_->IsFull());
    572   const size_t new_size = gc_mark_stack_->Capacity() * 2;
    573   std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
    574                                                    gc_mark_stack_->End());
    575   gc_mark_stack_->Resize(new_size);
    576   for (auto& ref : temp) {
    577     gc_mark_stack_->PushBack(ref.AsMirrorPtr());
    578   }
    579   DCHECK(!gc_mark_stack_->IsFull());
    580 }
    581 
    582 void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
    583   CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
    584       << " " << to_ref << " " << PrettyTypeOf(to_ref);
    585   Thread* self = Thread::Current();  // TODO: pass self as an argument from call sites?
    586   CHECK(thread_running_gc_ != nullptr);
    587   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
    588   if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
    589     if (LIKELY(self == thread_running_gc_)) {
    590       // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
    591       CHECK(self->GetThreadLocalMarkStack() == nullptr);
    592       if (UNLIKELY(gc_mark_stack_->IsFull())) {
    593         ExpandGcMarkStack();
    594       }
    595       gc_mark_stack_->PushBack(to_ref);
    596     } else {
    597       // Otherwise, use a thread-local mark stack.
    598       accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
    599       if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
    600         MutexLock mu(self, mark_stack_lock_);
    601         // Get a new thread local mark stack.
    602         accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
    603         if (!pooled_mark_stacks_.empty()) {
    604           // Use a pooled mark stack.
    605           new_tl_mark_stack = pooled_mark_stacks_.back();
    606           pooled_mark_stacks_.pop_back();
    607         } else {
    608           // None pooled. Create a new one.
    609           new_tl_mark_stack =
    610               accounting::AtomicStack<mirror::Object>::Create(
    611                   "thread local mark stack", 4 * KB, 4 * KB);
    612         }
    613         DCHECK(new_tl_mark_stack != nullptr);
    614         DCHECK(new_tl_mark_stack->IsEmpty());
    615         new_tl_mark_stack->PushBack(to_ref);
    616         self->SetThreadLocalMarkStack(new_tl_mark_stack);
    617         if (tl_mark_stack != nullptr) {
    618           // Store the old full stack into a vector.
    619           revoked_mark_stacks_.push_back(tl_mark_stack);
    620         }
    621       } else {
    622         tl_mark_stack->PushBack(to_ref);
    623       }
    624     }
    625   } else if (mark_stack_mode == kMarkStackModeShared) {
    626     // Access the shared GC mark stack with a lock.
    627     MutexLock mu(self, mark_stack_lock_);
    628     if (UNLIKELY(gc_mark_stack_->IsFull())) {
    629       ExpandGcMarkStack();
    630     }
    631     gc_mark_stack_->PushBack(to_ref);
    632   } else {
    633     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
    634              static_cast<uint32_t>(kMarkStackModeGcExclusive))
    635         << "ref=" << to_ref
    636         << " self->gc_marking=" << self->GetIsGcMarking()
    637         << " cc->is_marking=" << is_marking_;
    638     CHECK(self == thread_running_gc_)
    639         << "Only GC-running thread should access the mark stack "
    640         << "in the GC exclusive mark stack mode";
    641     // Access the GC mark stack without a lock.
    642     if (UNLIKELY(gc_mark_stack_->IsFull())) {
    643       ExpandGcMarkStack();
    644     }
    645     gc_mark_stack_->PushBack(to_ref);
    646   }
    647 }
    648 
    649 accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
    650   return heap_->allocation_stack_.get();
    651 }
    652 
    653 accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
    654   return heap_->live_stack_.get();
    655 }
    656 
    657 // The following visitors are that used to verify that there's no
    658 // references to the from-space left after marking.
    659 class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
    660  public:
    661   explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
    662       : collector_(collector) {}
    663 
    664   void operator()(mirror::Object* ref) const
    665       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
    666     if (ref == nullptr) {
    667       // OK.
    668       return;
    669     }
    670     collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
    671     if (kUseBakerReadBarrier) {
    672       if (collector_->RegionSpace()->IsInToSpace(ref)) {
    673         CHECK(ref->GetReadBarrierPointer() == nullptr)
    674             << "To-space ref " << ref << " " << PrettyTypeOf(ref)
    675             << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
    676       } else {
    677         CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
    678               (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
    679                collector_->IsOnAllocStack(ref)))
    680             << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
    681             << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
    682             << " but isn't on the alloc stack (and has white rb_ptr)."
    683             << " Is it in the non-moving space="
    684             << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
    685       }
    686     }
    687   }
    688 
    689   void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
    690       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
    691     DCHECK(root != nullptr);
    692     operator()(root);
    693   }
    694 
    695  private:
    696   ConcurrentCopying* const collector_;
    697 };
    698 
    699 class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
    700  public:
    701   explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
    702       : collector_(collector) {}
    703 
    704   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
    705       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
    706     mirror::Object* ref =
    707         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
    708     VerifyNoFromSpaceRefsVisitor visitor(collector_);
    709     visitor(ref);
    710   }
    711   void operator()(mirror::Class* klass, mirror::Reference* ref) const
    712       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
    713     CHECK(klass->IsTypeOfReferenceClass());
    714     this->operator()(ref, mirror::Reference::ReferentOffset(), false);
    715   }
    716 
    717   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
    718       SHARED_REQUIRES(Locks::mutator_lock_) {
    719     if (!root->IsNull()) {
    720       VisitRoot(root);
    721     }
    722   }
    723 
    724   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
    725       SHARED_REQUIRES(Locks::mutator_lock_) {
    726     VerifyNoFromSpaceRefsVisitor visitor(collector_);
    727     visitor(root->AsMirrorPtr());
    728   }
    729 
    730  private:
    731   ConcurrentCopying* const collector_;
    732 };
    733 
    734 class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
    735  public:
    736   explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
    737       : collector_(collector) {}
    738   void operator()(mirror::Object* obj) const
    739       SHARED_REQUIRES(Locks::mutator_lock_) {
    740     ObjectCallback(obj, collector_);
    741   }
    742   static void ObjectCallback(mirror::Object* obj, void *arg)
    743       SHARED_REQUIRES(Locks::mutator_lock_) {
    744     CHECK(obj != nullptr);
    745     ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
    746     space::RegionSpace* region_space = collector->RegionSpace();
    747     CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
    748     VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
    749     obj->VisitReferences(visitor, visitor);
    750     if (kUseBakerReadBarrier) {
    751       if (collector->RegionSpace()->IsInToSpace(obj)) {
    752         CHECK(obj->GetReadBarrierPointer() == nullptr)
    753             << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
    754       } else {
    755         CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
    756               (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
    757                collector->IsOnAllocStack(obj)))
    758             << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
    759             << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
    760             << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
    761             << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
    762       }
    763     }
    764   }
    765 
    766  private:
    767   ConcurrentCopying* const collector_;
    768 };
    769 
    770 // Verify there's no from-space references left after the marking phase.
    771 void ConcurrentCopying::VerifyNoFromSpaceReferences() {
    772   Thread* self = Thread::Current();
    773   DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
    774   // Verify all threads have is_gc_marking to be false
    775   {
    776     MutexLock mu(self, *Locks::thread_list_lock_);
    777     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
    778     for (Thread* thread : thread_list) {
    779       CHECK(!thread->GetIsGcMarking());
    780     }
    781   }
    782   VerifyNoFromSpaceRefsObjectVisitor visitor(this);
    783   // Roots.
    784   {
    785     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
    786     VerifyNoFromSpaceRefsVisitor ref_visitor(this);
    787     Runtime::Current()->VisitRoots(&ref_visitor);
    788   }
    789   // The to-space.
    790   region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
    791   // Non-moving spaces.
    792   {
    793     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    794     heap_->GetMarkBitmap()->Visit(visitor);
    795   }
    796   // The alloc stack.
    797   {
    798     VerifyNoFromSpaceRefsVisitor ref_visitor(this);
    799     for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
    800         it < end; ++it) {
    801       mirror::Object* const obj = it->AsMirrorPtr();
    802       if (obj != nullptr && obj->GetClass() != nullptr) {
    803         // TODO: need to call this only if obj is alive?
    804         ref_visitor(obj);
    805         visitor(obj);
    806       }
    807     }
    808   }
    809   // TODO: LOS. But only refs in LOS are classes.
    810 }
    811 
    812 // The following visitors are used to assert the to-space invariant.
    813 class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
    814  public:
    815   explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
    816       : collector_(collector) {}
    817 
    818   void operator()(mirror::Object* ref) const
    819       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
    820     if (ref == nullptr) {
    821       // OK.
    822       return;
    823     }
    824     collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
    825   }
    826 
    827  private:
    828   ConcurrentCopying* const collector_;
    829 };
    830 
    831 class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
    832  public:
    833   explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
    834       : collector_(collector) {}
    835 
    836   void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
    837       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
    838     mirror::Object* ref =
    839         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
    840     AssertToSpaceInvariantRefsVisitor visitor(collector_);
    841     visitor(ref);
    842   }
    843   void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
    844       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
    845     CHECK(klass->IsTypeOfReferenceClass());
    846   }
    847 
    848   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
    849       SHARED_REQUIRES(Locks::mutator_lock_) {
    850     if (!root->IsNull()) {
    851       VisitRoot(root);
    852     }
    853   }
    854 
    855   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
    856       SHARED_REQUIRES(Locks::mutator_lock_) {
    857     AssertToSpaceInvariantRefsVisitor visitor(collector_);
    858     visitor(root->AsMirrorPtr());
    859   }
    860 
    861  private:
    862   ConcurrentCopying* const collector_;
    863 };
    864 
    865 class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
    866  public:
    867   explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
    868       : collector_(collector) {}
    869   void operator()(mirror::Object* obj) const
    870       SHARED_REQUIRES(Locks::mutator_lock_) {
    871     ObjectCallback(obj, collector_);
    872   }
    873   static void ObjectCallback(mirror::Object* obj, void *arg)
    874       SHARED_REQUIRES(Locks::mutator_lock_) {
    875     CHECK(obj != nullptr);
    876     ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
    877     space::RegionSpace* region_space = collector->RegionSpace();
    878     CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
    879     collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
    880     AssertToSpaceInvariantFieldVisitor visitor(collector);
    881     obj->VisitReferences(visitor, visitor);
    882   }
    883 
    884  private:
    885   ConcurrentCopying* const collector_;
    886 };
    887 
    888 class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
    889  public:
    890   RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
    891                                        bool disable_weak_ref_access)
    892       : concurrent_copying_(concurrent_copying),
    893         disable_weak_ref_access_(disable_weak_ref_access) {
    894   }
    895 
    896   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
    897     // Note: self is not necessarily equal to thread since thread may be suspended.
    898     Thread* self = Thread::Current();
    899     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
    900         << thread->GetState() << " thread " << thread << " self " << self;
    901     // Revoke thread local mark stacks.
    902     accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
    903     if (tl_mark_stack != nullptr) {
    904       MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
    905       concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
    906       thread->SetThreadLocalMarkStack(nullptr);
    907     }
    908     // Disable weak ref access.
    909     if (disable_weak_ref_access_) {
    910       thread->SetWeakRefAccessEnabled(false);
    911     }
    912     // If thread is a running mutator, then act on behalf of the garbage collector.
    913     // See the code in ThreadList::RunCheckpoint.
    914     concurrent_copying_->GetBarrier().Pass(self);
    915   }
    916 
    917  private:
    918   ConcurrentCopying* const concurrent_copying_;
    919   const bool disable_weak_ref_access_;
    920 };
    921 
    922 void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
    923   Thread* self = Thread::Current();
    924   RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
    925   ThreadList* thread_list = Runtime::Current()->GetThreadList();
    926   gc_barrier_->Init(self, 0);
    927   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
    928   // If there are no threads to wait which implys that all the checkpoint functions are finished,
    929   // then no need to release the mutator lock.
    930   if (barrier_count == 0) {
    931     return;
    932   }
    933   Locks::mutator_lock_->SharedUnlock(self);
    934   {
    935     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
    936     gc_barrier_->Increment(self, barrier_count);
    937   }
    938   Locks::mutator_lock_->SharedLock(self);
    939 }
    940 
    941 void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
    942   Thread* self = Thread::Current();
    943   CHECK_EQ(self, thread);
    944   accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
    945   if (tl_mark_stack != nullptr) {
    946     CHECK(is_marking_);
    947     MutexLock mu(self, mark_stack_lock_);
    948     revoked_mark_stacks_.push_back(tl_mark_stack);
    949     thread->SetThreadLocalMarkStack(nullptr);
    950   }
    951 }
    952 
    953 void ConcurrentCopying::ProcessMarkStack() {
    954   if (kVerboseMode) {
    955     LOG(INFO) << "ProcessMarkStack. ";
    956   }
    957   bool empty_prev = false;
    958   while (true) {
    959     bool empty = ProcessMarkStackOnce();
    960     if (empty_prev && empty) {
    961       // Saw empty mark stack for a second time, done.
    962       break;
    963     }
    964     empty_prev = empty;
    965   }
    966 }
    967 
    968 bool ConcurrentCopying::ProcessMarkStackOnce() {
    969   Thread* self = Thread::Current();
    970   CHECK(thread_running_gc_ != nullptr);
    971   CHECK(self == thread_running_gc_);
    972   CHECK(self->GetThreadLocalMarkStack() == nullptr);
    973   size_t count = 0;
    974   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
    975   if (mark_stack_mode == kMarkStackModeThreadLocal) {
    976     // Process the thread-local mark stacks and the GC mark stack.
    977     count += ProcessThreadLocalMarkStacks(false);
    978     while (!gc_mark_stack_->IsEmpty()) {
    979       mirror::Object* to_ref = gc_mark_stack_->PopBack();
    980       ProcessMarkStackRef(to_ref);
    981       ++count;
    982     }
    983     gc_mark_stack_->Reset();
    984   } else if (mark_stack_mode == kMarkStackModeShared) {
    985     // Process the shared GC mark stack with a lock.
    986     {
    987       MutexLock mu(self, mark_stack_lock_);
    988       CHECK(revoked_mark_stacks_.empty());
    989     }
    990     while (true) {
    991       std::vector<mirror::Object*> refs;
    992       {
    993         // Copy refs with lock. Note the number of refs should be small.
    994         MutexLock mu(self, mark_stack_lock_);
    995         if (gc_mark_stack_->IsEmpty()) {
    996           break;
    997         }
    998         for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
    999              p != gc_mark_stack_->End(); ++p) {
   1000           refs.push_back(p->AsMirrorPtr());
   1001         }
   1002         gc_mark_stack_->Reset();
   1003       }
   1004       for (mirror::Object* ref : refs) {
   1005         ProcessMarkStackRef(ref);
   1006         ++count;
   1007       }
   1008     }
   1009   } else {
   1010     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
   1011              static_cast<uint32_t>(kMarkStackModeGcExclusive));
   1012     {
   1013       MutexLock mu(self, mark_stack_lock_);
   1014       CHECK(revoked_mark_stacks_.empty());
   1015     }
   1016     // Process the GC mark stack in the exclusive mode. No need to take the lock.
   1017     while (!gc_mark_stack_->IsEmpty()) {
   1018       mirror::Object* to_ref = gc_mark_stack_->PopBack();
   1019       ProcessMarkStackRef(to_ref);
   1020       ++count;
   1021     }
   1022     gc_mark_stack_->Reset();
   1023   }
   1024 
   1025   // Return true if the stack was empty.
   1026   return count == 0;
   1027 }
   1028 
   1029 size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
   1030   // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
   1031   RevokeThreadLocalMarkStacks(disable_weak_ref_access);
   1032   size_t count = 0;
   1033   std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
   1034   {
   1035     MutexLock mu(Thread::Current(), mark_stack_lock_);
   1036     // Make a copy of the mark stack vector.
   1037     mark_stacks = revoked_mark_stacks_;
   1038     revoked_mark_stacks_.clear();
   1039   }
   1040   for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
   1041     for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
   1042       mirror::Object* to_ref = p->AsMirrorPtr();
   1043       ProcessMarkStackRef(to_ref);
   1044       ++count;
   1045     }
   1046     {
   1047       MutexLock mu(Thread::Current(), mark_stack_lock_);
   1048       if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
   1049         // The pool has enough. Delete it.
   1050         delete mark_stack;
   1051       } else {
   1052         // Otherwise, put it into the pool for later reuse.
   1053         mark_stack->Reset();
   1054         pooled_mark_stacks_.push_back(mark_stack);
   1055       }
   1056     }
   1057   }
   1058   return count;
   1059 }
   1060 
   1061 inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
   1062   DCHECK(!region_space_->IsInFromSpace(to_ref));
   1063   if (kUseBakerReadBarrier) {
   1064     DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
   1065         << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
   1066         << " is_marked=" << IsMarked(to_ref);
   1067   }
   1068   // Scan ref fields.
   1069   Scan(to_ref);
   1070   // Mark the gray ref as white or black.
   1071   if (kUseBakerReadBarrier) {
   1072     DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
   1073         << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
   1074         << " is_marked=" << IsMarked(to_ref);
   1075   }
   1076 #ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
   1077   if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
   1078                 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
   1079                 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
   1080     // Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We
   1081     // will change it to black or white later in ReferenceQueue::DequeuePendingReference().
   1082     DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
   1083   } else {
   1084     // We may occasionally leave a Reference black or white in the queue if its referent happens to
   1085     // be concurrently marked after the Scan() call above has enqueued the Reference, in which case
   1086     // the above IsInToSpace() evaluates to true and we change the color from gray to black or white
   1087     // here in this else block.
   1088     if (kUseBakerReadBarrier) {
   1089       if (region_space_->IsInToSpace(to_ref)) {
   1090         // If to-space, change from gray to white.
   1091         bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
   1092             ReadBarrier::GrayPtr(),
   1093             ReadBarrier::WhitePtr());
   1094         DCHECK(success) << "Must succeed as we won the race.";
   1095         DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
   1096       } else {
   1097         // If non-moving space/unevac from space, change from gray
   1098         // to black. We can't change gray to white because it's not
   1099         // safe to use CAS if two threads change values in opposite
   1100         // directions (A->B and B->A). So, we change it to black to
   1101         // indicate non-moving objects that have been marked
   1102         // through. Note we'd need to change from black to white
   1103         // later (concurrently).
   1104         bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
   1105             ReadBarrier::GrayPtr(),
   1106             ReadBarrier::BlackPtr());
   1107         DCHECK(success) << "Must succeed as we won the race.";
   1108         DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
   1109       }
   1110     }
   1111   }
   1112 #else
   1113   DCHECK(!kUseBakerReadBarrier);
   1114 #endif
   1115   if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
   1116     AssertToSpaceInvariantObjectVisitor visitor(this);
   1117     visitor(to_ref);
   1118   }
   1119 }
   1120 
   1121 void ConcurrentCopying::SwitchToSharedMarkStackMode() {
   1122   Thread* self = Thread::Current();
   1123   CHECK(thread_running_gc_ != nullptr);
   1124   CHECK_EQ(self, thread_running_gc_);
   1125   CHECK(self->GetThreadLocalMarkStack() == nullptr);
   1126   MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   1127   CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
   1128            static_cast<uint32_t>(kMarkStackModeThreadLocal));
   1129   mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
   1130   CHECK(weak_ref_access_enabled_.LoadRelaxed());
   1131   weak_ref_access_enabled_.StoreRelaxed(false);
   1132   QuasiAtomic::ThreadFenceForConstructor();
   1133   // Process the thread local mark stacks one last time after switching to the shared mark stack
   1134   // mode and disable weak ref accesses.
   1135   ProcessThreadLocalMarkStacks(true);
   1136   if (kVerboseMode) {
   1137     LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
   1138   }
   1139 }
   1140 
   1141 void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
   1142   Thread* self = Thread::Current();
   1143   CHECK(thread_running_gc_ != nullptr);
   1144   CHECK_EQ(self, thread_running_gc_);
   1145   CHECK(self->GetThreadLocalMarkStack() == nullptr);
   1146   MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   1147   CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
   1148            static_cast<uint32_t>(kMarkStackModeShared));
   1149   mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
   1150   QuasiAtomic::ThreadFenceForConstructor();
   1151   if (kVerboseMode) {
   1152     LOG(INFO) << "Switched to GC exclusive mark stack mode";
   1153   }
   1154 }
   1155 
   1156 void ConcurrentCopying::CheckEmptyMarkStack() {
   1157   Thread* self = Thread::Current();
   1158   CHECK(thread_running_gc_ != nullptr);
   1159   CHECK_EQ(self, thread_running_gc_);
   1160   CHECK(self->GetThreadLocalMarkStack() == nullptr);
   1161   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   1162   if (mark_stack_mode == kMarkStackModeThreadLocal) {
   1163     // Thread-local mark stack mode.
   1164     RevokeThreadLocalMarkStacks(false);
   1165     MutexLock mu(Thread::Current(), mark_stack_lock_);
   1166     if (!revoked_mark_stacks_.empty()) {
   1167       for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
   1168         while (!mark_stack->IsEmpty()) {
   1169           mirror::Object* obj = mark_stack->PopBack();
   1170           if (kUseBakerReadBarrier) {
   1171             mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
   1172             LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
   1173                       << " is_marked=" << IsMarked(obj);
   1174           } else {
   1175             LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
   1176                       << " is_marked=" << IsMarked(obj);
   1177           }
   1178         }
   1179       }
   1180       LOG(FATAL) << "mark stack is not empty";
   1181     }
   1182   } else {
   1183     // Shared, GC-exclusive, or off.
   1184     MutexLock mu(Thread::Current(), mark_stack_lock_);
   1185     CHECK(gc_mark_stack_->IsEmpty());
   1186     CHECK(revoked_mark_stacks_.empty());
   1187   }
   1188 }
   1189 
   1190 void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
   1191   TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
   1192   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
   1193   Runtime::Current()->SweepSystemWeaks(this);
   1194 }
   1195 
   1196 void ConcurrentCopying::Sweep(bool swap_bitmaps) {
   1197   {
   1198     TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
   1199     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
   1200     if (kEnableFromSpaceAccountingCheck) {
   1201       CHECK_GE(live_stack_freeze_size_, live_stack->Size());
   1202     }
   1203     heap_->MarkAllocStackAsLive(live_stack);
   1204     live_stack->Reset();
   1205   }
   1206   CheckEmptyMarkStack();
   1207   TimingLogger::ScopedTiming split("Sweep", GetTimings());
   1208   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
   1209     if (space->IsContinuousMemMapAllocSpace()) {
   1210       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
   1211       if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
   1212         continue;
   1213       }
   1214       TimingLogger::ScopedTiming split2(
   1215           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
   1216       RecordFree(alloc_space->Sweep(swap_bitmaps));
   1217     }
   1218   }
   1219   SweepLargeObjects(swap_bitmaps);
   1220 }
   1221 
   1222 void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
   1223   TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
   1224   RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
   1225 }
   1226 
   1227 class ConcurrentCopying::ClearBlackPtrsVisitor {
   1228  public:
   1229   explicit ClearBlackPtrsVisitor(ConcurrentCopying* cc) : collector_(cc) {}
   1230   void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
   1231       SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
   1232     DCHECK(obj != nullptr);
   1233     DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
   1234     DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
   1235     obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
   1236     DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
   1237   }
   1238 
   1239  private:
   1240   ConcurrentCopying* const collector_;
   1241 };
   1242 
   1243 // Clear the black ptrs in non-moving objects back to white.
   1244 void ConcurrentCopying::ClearBlackPtrs() {
   1245   CHECK(kUseBakerReadBarrier);
   1246   TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
   1247   ClearBlackPtrsVisitor visitor(this);
   1248   for (auto& space : heap_->GetContinuousSpaces()) {
   1249     if (space == region_space_) {
   1250       continue;
   1251     }
   1252     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
   1253     if (kVerboseMode) {
   1254       LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
   1255     }
   1256     mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
   1257                                   reinterpret_cast<uintptr_t>(space->Limit()),
   1258                                   visitor);
   1259   }
   1260   space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
   1261   large_object_space->GetMarkBitmap()->VisitMarkedRange(
   1262       reinterpret_cast<uintptr_t>(large_object_space->Begin()),
   1263       reinterpret_cast<uintptr_t>(large_object_space->End()),
   1264       visitor);
   1265   // Objects on the allocation stack?
   1266   if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
   1267     size_t count = GetAllocationStack()->Size();
   1268     auto* it = GetAllocationStack()->Begin();
   1269     auto* end = GetAllocationStack()->End();
   1270     for (size_t i = 0; i < count; ++i, ++it) {
   1271       CHECK_LT(it, end);
   1272       mirror::Object* obj = it->AsMirrorPtr();
   1273       if (obj != nullptr) {
   1274         // Must have been cleared above.
   1275         CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
   1276       }
   1277     }
   1278   }
   1279 }
   1280 
   1281 void ConcurrentCopying::ReclaimPhase() {
   1282   TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
   1283   if (kVerboseMode) {
   1284     LOG(INFO) << "GC ReclaimPhase";
   1285   }
   1286   Thread* self = Thread::Current();
   1287 
   1288   {
   1289     // Double-check that the mark stack is empty.
   1290     // Note: need to set this after VerifyNoFromSpaceRef().
   1291     is_asserting_to_space_invariant_ = false;
   1292     QuasiAtomic::ThreadFenceForConstructor();
   1293     if (kVerboseMode) {
   1294       LOG(INFO) << "Issue an empty check point. ";
   1295     }
   1296     IssueEmptyCheckpoint();
   1297     // Disable the check.
   1298     is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
   1299     CheckEmptyMarkStack();
   1300   }
   1301 
   1302   {
   1303     // Record freed objects.
   1304     TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
   1305     // Don't include thread-locals that are in the to-space.
   1306     uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
   1307     uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
   1308     uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
   1309     uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
   1310     uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
   1311     uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
   1312     if (kEnableFromSpaceAccountingCheck) {
   1313       CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
   1314       CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
   1315     }
   1316     CHECK_LE(to_objects, from_objects);
   1317     CHECK_LE(to_bytes, from_bytes);
   1318     int64_t freed_bytes = from_bytes - to_bytes;
   1319     int64_t freed_objects = from_objects - to_objects;
   1320     if (kVerboseMode) {
   1321       LOG(INFO) << "RecordFree:"
   1322                 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
   1323                 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
   1324                 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
   1325                 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
   1326                 << " from_space size=" << region_space_->FromSpaceSize()
   1327                 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
   1328                 << " to_space size=" << region_space_->ToSpaceSize();
   1329       LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
   1330     }
   1331     RecordFree(ObjectBytePair(freed_objects, freed_bytes));
   1332     if (kVerboseMode) {
   1333       LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
   1334     }
   1335   }
   1336 
   1337   {
   1338     TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
   1339     ComputeUnevacFromSpaceLiveRatio();
   1340   }
   1341 
   1342   {
   1343     TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
   1344     region_space_->ClearFromSpace();
   1345   }
   1346 
   1347   {
   1348     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   1349     if (kUseBakerReadBarrier) {
   1350       ClearBlackPtrs();
   1351     }
   1352     Sweep(false);
   1353     SwapBitmaps();
   1354     heap_->UnBindBitmaps();
   1355 
   1356     // Remove bitmaps for the immune spaces.
   1357     while (!cc_bitmaps_.empty()) {
   1358       accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
   1359       cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
   1360       delete cc_bitmap;
   1361       cc_bitmaps_.pop_back();
   1362     }
   1363     region_space_bitmap_ = nullptr;
   1364   }
   1365 
   1366   CheckEmptyMarkStack();
   1367 
   1368   if (kVerboseMode) {
   1369     LOG(INFO) << "GC end of ReclaimPhase";
   1370   }
   1371 }
   1372 
   1373 class ConcurrentCopying::ComputeUnevacFromSpaceLiveRatioVisitor {
   1374  public:
   1375   explicit ComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
   1376       : collector_(cc) {}
   1377   void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_)
   1378       SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
   1379     DCHECK(ref != nullptr);
   1380     DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
   1381     DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
   1382     if (kUseBakerReadBarrier) {
   1383       DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
   1384       // Clear the black ptr.
   1385       ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
   1386       DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
   1387     }
   1388     size_t obj_size = ref->SizeOf();
   1389     size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
   1390     collector_->region_space_->AddLiveBytes(ref, alloc_size);
   1391   }
   1392 
   1393  private:
   1394   ConcurrentCopying* const collector_;
   1395 };
   1396 
   1397 // Compute how much live objects are left in regions.
   1398 void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
   1399   region_space_->AssertAllRegionLiveBytesZeroOrCleared();
   1400   ComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
   1401   region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
   1402                                          reinterpret_cast<uintptr_t>(region_space_->Limit()),
   1403                                          visitor);
   1404 }
   1405 
   1406 // Assert the to-space invariant.
   1407 void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
   1408                                                mirror::Object* ref) {
   1409   CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   1410   if (is_asserting_to_space_invariant_) {
   1411     if (region_space_->IsInToSpace(ref)) {
   1412       // OK.
   1413       return;
   1414     } else if (region_space_->IsInUnevacFromSpace(ref)) {
   1415       CHECK(region_space_bitmap_->Test(ref)) << ref;
   1416     } else if (region_space_->IsInFromSpace(ref)) {
   1417       // Not OK. Do extra logging.
   1418       if (obj != nullptr) {
   1419         LogFromSpaceRefHolder(obj, offset);
   1420       }
   1421       ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
   1422       CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
   1423     } else {
   1424       AssertToSpaceInvariantInNonMovingSpace(obj, ref);
   1425     }
   1426   }
   1427 }
   1428 
   1429 class RootPrinter {
   1430  public:
   1431   RootPrinter() { }
   1432 
   1433   template <class MirrorType>
   1434   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
   1435       SHARED_REQUIRES(Locks::mutator_lock_) {
   1436     if (!root->IsNull()) {
   1437       VisitRoot(root);
   1438     }
   1439   }
   1440 
   1441   template <class MirrorType>
   1442   void VisitRoot(mirror::Object** root)
   1443       SHARED_REQUIRES(Locks::mutator_lock_) {
   1444     LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
   1445   }
   1446 
   1447   template <class MirrorType>
   1448   void VisitRoot(mirror::CompressedReference<MirrorType>* root)
   1449       SHARED_REQUIRES(Locks::mutator_lock_) {
   1450     LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
   1451   }
   1452 };
   1453 
   1454 void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
   1455                                                mirror::Object* ref) {
   1456   CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   1457   if (is_asserting_to_space_invariant_) {
   1458     if (region_space_->IsInToSpace(ref)) {
   1459       // OK.
   1460       return;
   1461     } else if (region_space_->IsInUnevacFromSpace(ref)) {
   1462       CHECK(region_space_bitmap_->Test(ref)) << ref;
   1463     } else if (region_space_->IsInFromSpace(ref)) {
   1464       // Not OK. Do extra logging.
   1465       if (gc_root_source == nullptr) {
   1466         // No info.
   1467       } else if (gc_root_source->HasArtField()) {
   1468         ArtField* field = gc_root_source->GetArtField();
   1469         LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
   1470         RootPrinter root_printer;
   1471         field->VisitRoots(root_printer);
   1472       } else if (gc_root_source->HasArtMethod()) {
   1473         ArtMethod* method = gc_root_source->GetArtMethod();
   1474         LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
   1475         RootPrinter root_printer;
   1476         method->VisitRoots(root_printer, sizeof(void*));
   1477       }
   1478       ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
   1479       region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
   1480       PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
   1481       MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
   1482       CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
   1483     } else {
   1484       AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
   1485     }
   1486   }
   1487 }
   1488 
   1489 void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
   1490   if (kUseBakerReadBarrier) {
   1491     LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
   1492               << " holder rb_ptr=" << obj->GetReadBarrierPointer();
   1493   } else {
   1494     LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
   1495   }
   1496   if (region_space_->IsInFromSpace(obj)) {
   1497     LOG(INFO) << "holder is in the from-space.";
   1498   } else if (region_space_->IsInToSpace(obj)) {
   1499     LOG(INFO) << "holder is in the to-space.";
   1500   } else if (region_space_->IsInUnevacFromSpace(obj)) {
   1501     LOG(INFO) << "holder is in the unevac from-space.";
   1502     if (region_space_bitmap_->Test(obj)) {
   1503       LOG(INFO) << "holder is marked in the region space bitmap.";
   1504     } else {
   1505       LOG(INFO) << "holder is not marked in the region space bitmap.";
   1506     }
   1507   } else {
   1508     // In a non-moving space.
   1509     if (immune_spaces_.ContainsObject(obj)) {
   1510       LOG(INFO) << "holder is in an immune image or the zygote space.";
   1511       accounting::ContinuousSpaceBitmap* cc_bitmap =
   1512           cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
   1513       CHECK(cc_bitmap != nullptr)
   1514           << "An immune space object must have a bitmap.";
   1515       if (cc_bitmap->Test(obj)) {
   1516         LOG(INFO) << "holder is marked in the bit map.";
   1517       } else {
   1518         LOG(INFO) << "holder is NOT marked in the bit map.";
   1519       }
   1520     } else {
   1521       LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
   1522       accounting::ContinuousSpaceBitmap* mark_bitmap =
   1523           heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
   1524       accounting::LargeObjectBitmap* los_bitmap =
   1525           heap_mark_bitmap_->GetLargeObjectBitmap(obj);
   1526       CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
   1527       bool is_los = mark_bitmap == nullptr;
   1528       if (!is_los && mark_bitmap->Test(obj)) {
   1529         LOG(INFO) << "holder is marked in the mark bit map.";
   1530       } else if (is_los && los_bitmap->Test(obj)) {
   1531         LOG(INFO) << "holder is marked in the los bit map.";
   1532       } else {
   1533         // If ref is on the allocation stack, then it is considered
   1534         // mark/alive (but not necessarily on the live stack.)
   1535         if (IsOnAllocStack(obj)) {
   1536           LOG(INFO) << "holder is on the alloc stack.";
   1537         } else {
   1538           LOG(INFO) << "holder is not marked or on the alloc stack.";
   1539         }
   1540       }
   1541     }
   1542   }
   1543   LOG(INFO) << "offset=" << offset.SizeValue();
   1544 }
   1545 
   1546 void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
   1547                                                                mirror::Object* ref) {
   1548   // In a non-moving spaces. Check that the ref is marked.
   1549   if (immune_spaces_.ContainsObject(ref)) {
   1550     accounting::ContinuousSpaceBitmap* cc_bitmap =
   1551         cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
   1552     CHECK(cc_bitmap != nullptr)
   1553         << "An immune space ref must have a bitmap. " << ref;
   1554     if (kUseBakerReadBarrier) {
   1555       CHECK(cc_bitmap->Test(ref))
   1556           << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
   1557           << obj->GetReadBarrierPointer() << " ref=" << ref;
   1558     } else {
   1559       CHECK(cc_bitmap->Test(ref))
   1560           << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
   1561     }
   1562   } else {
   1563     accounting::ContinuousSpaceBitmap* mark_bitmap =
   1564         heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
   1565     accounting::LargeObjectBitmap* los_bitmap =
   1566         heap_mark_bitmap_->GetLargeObjectBitmap(ref);
   1567     CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
   1568     bool is_los = mark_bitmap == nullptr;
   1569     if ((!is_los && mark_bitmap->Test(ref)) ||
   1570         (is_los && los_bitmap->Test(ref))) {
   1571       // OK.
   1572     } else {
   1573       // If ref is on the allocation stack, then it may not be
   1574       // marked live, but considered marked/alive (but not
   1575       // necessarily on the live stack).
   1576       CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
   1577                                  << "obj=" << obj << " ref=" << ref;
   1578     }
   1579   }
   1580 }
   1581 
   1582 // Used to scan ref fields of an object.
   1583 class ConcurrentCopying::RefFieldsVisitor {
   1584  public:
   1585   explicit RefFieldsVisitor(ConcurrentCopying* collector)
   1586       : collector_(collector) {}
   1587 
   1588   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
   1589       const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
   1590       SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
   1591     collector_->Process(obj, offset);
   1592   }
   1593 
   1594   void operator()(mirror::Class* klass, mirror::Reference* ref) const
   1595       SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
   1596     CHECK(klass->IsTypeOfReferenceClass());
   1597     collector_->DelayReferenceReferent(klass, ref);
   1598   }
   1599 
   1600   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
   1601       ALWAYS_INLINE
   1602       SHARED_REQUIRES(Locks::mutator_lock_) {
   1603     if (!root->IsNull()) {
   1604       VisitRoot(root);
   1605     }
   1606   }
   1607 
   1608   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
   1609       ALWAYS_INLINE
   1610       SHARED_REQUIRES(Locks::mutator_lock_) {
   1611     collector_->MarkRoot(root);
   1612   }
   1613 
   1614  private:
   1615   ConcurrentCopying* const collector_;
   1616 };
   1617 
   1618 // Scan ref fields of an object.
   1619 inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
   1620   DCHECK(!region_space_->IsInFromSpace(to_ref));
   1621   RefFieldsVisitor visitor(this);
   1622   // Disable the read barrier for a performance reason.
   1623   to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
   1624       visitor, visitor);
   1625 }
   1626 
   1627 // Process a field.
   1628 inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
   1629   mirror::Object* ref = obj->GetFieldObject<
   1630       mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
   1631   mirror::Object* to_ref = Mark(ref);
   1632   if (to_ref == ref) {
   1633     return;
   1634   }
   1635   // This may fail if the mutator writes to the field at the same time. But it's ok.
   1636   mirror::Object* expected_ref = ref;
   1637   mirror::Object* new_ref = to_ref;
   1638   do {
   1639     if (expected_ref !=
   1640         obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
   1641       // It was updated by the mutator.
   1642       break;
   1643     }
   1644   } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
   1645       false, false, kVerifyNone>(offset, expected_ref, new_ref));
   1646 }
   1647 
   1648 // Process some roots.
   1649 inline void ConcurrentCopying::VisitRoots(
   1650     mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
   1651   for (size_t i = 0; i < count; ++i) {
   1652     mirror::Object** root = roots[i];
   1653     mirror::Object* ref = *root;
   1654     mirror::Object* to_ref = Mark(ref);
   1655     if (to_ref == ref) {
   1656       continue;
   1657     }
   1658     Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
   1659     mirror::Object* expected_ref = ref;
   1660     mirror::Object* new_ref = to_ref;
   1661     do {
   1662       if (expected_ref != addr->LoadRelaxed()) {
   1663         // It was updated by the mutator.
   1664         break;
   1665       }
   1666     } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
   1667   }
   1668 }
   1669 
   1670 inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
   1671   DCHECK(!root->IsNull());
   1672   mirror::Object* const ref = root->AsMirrorPtr();
   1673   mirror::Object* to_ref = Mark(ref);
   1674   if (to_ref != ref) {
   1675     auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
   1676     auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
   1677     auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
   1678     // If the cas fails, then it was updated by the mutator.
   1679     do {
   1680       if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
   1681         // It was updated by the mutator.
   1682         break;
   1683       }
   1684     } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
   1685   }
   1686 }
   1687 
   1688 inline void ConcurrentCopying::VisitRoots(
   1689     mirror::CompressedReference<mirror::Object>** roots, size_t count,
   1690     const RootInfo& info ATTRIBUTE_UNUSED) {
   1691   for (size_t i = 0; i < count; ++i) {
   1692     mirror::CompressedReference<mirror::Object>* const root = roots[i];
   1693     if (!root->IsNull()) {
   1694       MarkRoot(root);
   1695     }
   1696   }
   1697 }
   1698 
   1699 // Fill the given memory block with a dummy object. Used to fill in a
   1700 // copy of objects that was lost in race.
   1701 void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
   1702   CHECK_ALIGNED(byte_size, kObjectAlignment);
   1703   memset(dummy_obj, 0, byte_size);
   1704   mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
   1705   CHECK(int_array_class != nullptr);
   1706   AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
   1707   size_t component_size = int_array_class->GetComponentSize();
   1708   CHECK_EQ(component_size, sizeof(int32_t));
   1709   size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
   1710   if (data_offset > byte_size) {
   1711     // An int array is too big. Use java.lang.Object.
   1712     mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
   1713     AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
   1714     CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
   1715     dummy_obj->SetClass(java_lang_Object);
   1716     CHECK_EQ(byte_size, dummy_obj->SizeOf());
   1717   } else {
   1718     // Use an int array.
   1719     dummy_obj->SetClass(int_array_class);
   1720     CHECK(dummy_obj->IsArrayInstance());
   1721     int32_t length = (byte_size - data_offset) / component_size;
   1722     dummy_obj->AsArray()->SetLength(length);
   1723     CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
   1724         << "byte_size=" << byte_size << " length=" << length
   1725         << " component_size=" << component_size << " data_offset=" << data_offset;
   1726     CHECK_EQ(byte_size, dummy_obj->SizeOf())
   1727         << "byte_size=" << byte_size << " length=" << length
   1728         << " component_size=" << component_size << " data_offset=" << data_offset;
   1729   }
   1730 }
   1731 
   1732 // Reuse the memory blocks that were copy of objects that were lost in race.
   1733 mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
   1734   // Try to reuse the blocks that were unused due to CAS failures.
   1735   CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
   1736   Thread* self = Thread::Current();
   1737   size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
   1738   MutexLock mu(self, skipped_blocks_lock_);
   1739   auto it = skipped_blocks_map_.lower_bound(alloc_size);
   1740   if (it == skipped_blocks_map_.end()) {
   1741     // Not found.
   1742     return nullptr;
   1743   }
   1744   {
   1745     size_t byte_size = it->first;
   1746     CHECK_GE(byte_size, alloc_size);
   1747     if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
   1748       // If remainder would be too small for a dummy object, retry with a larger request size.
   1749       it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
   1750       if (it == skipped_blocks_map_.end()) {
   1751         // Not found.
   1752         return nullptr;
   1753       }
   1754       CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
   1755       CHECK_GE(it->first - alloc_size, min_object_size)
   1756           << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
   1757     }
   1758   }
   1759   // Found a block.
   1760   CHECK(it != skipped_blocks_map_.end());
   1761   size_t byte_size = it->first;
   1762   uint8_t* addr = it->second;
   1763   CHECK_GE(byte_size, alloc_size);
   1764   CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
   1765   CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
   1766   if (kVerboseMode) {
   1767     LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
   1768   }
   1769   skipped_blocks_map_.erase(it);
   1770   memset(addr, 0, byte_size);
   1771   if (byte_size > alloc_size) {
   1772     // Return the remainder to the map.
   1773     CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
   1774     CHECK_GE(byte_size - alloc_size, min_object_size);
   1775     FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
   1776                         byte_size - alloc_size);
   1777     CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
   1778     skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
   1779   }
   1780   return reinterpret_cast<mirror::Object*>(addr);
   1781 }
   1782 
   1783 mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
   1784   DCHECK(region_space_->IsInFromSpace(from_ref));
   1785   // No read barrier to avoid nested RB that might violate the to-space
   1786   // invariant. Note that from_ref is a from space ref so the SizeOf()
   1787   // call will access the from-space meta objects, but it's ok and necessary.
   1788   size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
   1789   size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
   1790   size_t region_space_bytes_allocated = 0U;
   1791   size_t non_moving_space_bytes_allocated = 0U;
   1792   size_t bytes_allocated = 0U;
   1793   size_t dummy;
   1794   mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
   1795       region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
   1796   bytes_allocated = region_space_bytes_allocated;
   1797   if (to_ref != nullptr) {
   1798     DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
   1799   }
   1800   bool fall_back_to_non_moving = false;
   1801   if (UNLIKELY(to_ref == nullptr)) {
   1802     // Failed to allocate in the region space. Try the skipped blocks.
   1803     to_ref = AllocateInSkippedBlock(region_space_alloc_size);
   1804     if (to_ref != nullptr) {
   1805       // Succeeded to allocate in a skipped block.
   1806       if (heap_->use_tlab_) {
   1807         // This is necessary for the tlab case as it's not accounted in the space.
   1808         region_space_->RecordAlloc(to_ref);
   1809       }
   1810       bytes_allocated = region_space_alloc_size;
   1811     } else {
   1812       // Fall back to the non-moving space.
   1813       fall_back_to_non_moving = true;
   1814       if (kVerboseMode) {
   1815         LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
   1816                   << to_space_bytes_skipped_.LoadSequentiallyConsistent()
   1817                   << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
   1818       }
   1819       fall_back_to_non_moving = true;
   1820       to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
   1821                                                &non_moving_space_bytes_allocated, nullptr, &dummy);
   1822       CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
   1823       bytes_allocated = non_moving_space_bytes_allocated;
   1824       // Mark it in the mark bitmap.
   1825       accounting::ContinuousSpaceBitmap* mark_bitmap =
   1826           heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
   1827       CHECK(mark_bitmap != nullptr);
   1828       CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
   1829     }
   1830   }
   1831   DCHECK(to_ref != nullptr);
   1832 
   1833   // Attempt to install the forward pointer. This is in a loop as the
   1834   // lock word atomic write can fail.
   1835   while (true) {
   1836     // Copy the object. TODO: copy only the lockword in the second iteration and on?
   1837     memcpy(to_ref, from_ref, obj_size);
   1838 
   1839     LockWord old_lock_word = to_ref->GetLockWord(false);
   1840 
   1841     if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
   1842       // Lost the race. Another thread (either GC or mutator) stored
   1843       // the forwarding pointer first. Make the lost copy (to_ref)
   1844       // look like a valid but dead (dummy) object and keep it for
   1845       // future reuse.
   1846       FillWithDummyObject(to_ref, bytes_allocated);
   1847       if (!fall_back_to_non_moving) {
   1848         DCHECK(region_space_->IsInToSpace(to_ref));
   1849         if (bytes_allocated > space::RegionSpace::kRegionSize) {
   1850           // Free the large alloc.
   1851           region_space_->FreeLarge(to_ref, bytes_allocated);
   1852         } else {
   1853           // Record the lost copy for later reuse.
   1854           heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
   1855           to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
   1856           to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
   1857           MutexLock mu(Thread::Current(), skipped_blocks_lock_);
   1858           skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
   1859                                                     reinterpret_cast<uint8_t*>(to_ref)));
   1860         }
   1861       } else {
   1862         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
   1863         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
   1864         // Free the non-moving-space chunk.
   1865         accounting::ContinuousSpaceBitmap* mark_bitmap =
   1866             heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
   1867         CHECK(mark_bitmap != nullptr);
   1868         CHECK(mark_bitmap->Clear(to_ref));
   1869         heap_->non_moving_space_->Free(Thread::Current(), to_ref);
   1870       }
   1871 
   1872       // Get the winner's forward ptr.
   1873       mirror::Object* lost_fwd_ptr = to_ref;
   1874       to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
   1875       CHECK(to_ref != nullptr);
   1876       CHECK_NE(to_ref, lost_fwd_ptr);
   1877       CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
   1878       CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
   1879       return to_ref;
   1880     }
   1881 
   1882     // Set the gray ptr.
   1883     if (kUseBakerReadBarrier) {
   1884       to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
   1885     }
   1886 
   1887     LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
   1888 
   1889     // Try to atomically write the fwd ptr.
   1890     bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
   1891     if (LIKELY(success)) {
   1892       // The CAS succeeded.
   1893       objects_moved_.FetchAndAddSequentiallyConsistent(1);
   1894       bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
   1895       if (LIKELY(!fall_back_to_non_moving)) {
   1896         DCHECK(region_space_->IsInToSpace(to_ref));
   1897       } else {
   1898         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
   1899         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
   1900       }
   1901       if (kUseBakerReadBarrier) {
   1902         DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
   1903       }
   1904       DCHECK(GetFwdPtr(from_ref) == to_ref);
   1905       CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
   1906       PushOntoMarkStack(to_ref);
   1907       return to_ref;
   1908     } else {
   1909       // The CAS failed. It may have lost the race or may have failed
   1910       // due to monitor/hashcode ops. Either way, retry.
   1911     }
   1912   }
   1913 }
   1914 
   1915 mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
   1916   DCHECK(from_ref != nullptr);
   1917   space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
   1918   if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
   1919     // It's already marked.
   1920     return from_ref;
   1921   }
   1922   mirror::Object* to_ref;
   1923   if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
   1924     to_ref = GetFwdPtr(from_ref);
   1925     DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
   1926            heap_->non_moving_space_->HasAddress(to_ref))
   1927         << "from_ref=" << from_ref << " to_ref=" << to_ref;
   1928   } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
   1929     if (region_space_bitmap_->Test(from_ref)) {
   1930       to_ref = from_ref;
   1931     } else {
   1932       to_ref = nullptr;
   1933     }
   1934   } else {
   1935     // from_ref is in a non-moving space.
   1936     if (immune_spaces_.ContainsObject(from_ref)) {
   1937       accounting::ContinuousSpaceBitmap* cc_bitmap =
   1938           cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
   1939       DCHECK(cc_bitmap != nullptr)
   1940           << "An immune space object must have a bitmap";
   1941       if (kIsDebugBuild) {
   1942         DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
   1943             << "Immune space object must be already marked";
   1944       }
   1945       if (cc_bitmap->Test(from_ref)) {
   1946         // Already marked.
   1947         to_ref = from_ref;
   1948       } else {
   1949         // Newly marked.
   1950         to_ref = nullptr;
   1951       }
   1952     } else {
   1953       // Non-immune non-moving space. Use the mark bitmap.
   1954       accounting::ContinuousSpaceBitmap* mark_bitmap =
   1955           heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
   1956       accounting::LargeObjectBitmap* los_bitmap =
   1957           heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
   1958       CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
   1959       bool is_los = mark_bitmap == nullptr;
   1960       if (!is_los && mark_bitmap->Test(from_ref)) {
   1961         // Already marked.
   1962         to_ref = from_ref;
   1963       } else if (is_los && los_bitmap->Test(from_ref)) {
   1964         // Already marked in LOS.
   1965         to_ref = from_ref;
   1966       } else {
   1967         // Not marked.
   1968         if (IsOnAllocStack(from_ref)) {
   1969           // If on the allocation stack, it's considered marked.
   1970           to_ref = from_ref;
   1971         } else {
   1972           // Not marked.
   1973           to_ref = nullptr;
   1974         }
   1975       }
   1976     }
   1977   }
   1978   return to_ref;
   1979 }
   1980 
   1981 bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
   1982   QuasiAtomic::ThreadFenceAcquire();
   1983   accounting::ObjectStack* alloc_stack = GetAllocationStack();
   1984   return alloc_stack->Contains(ref);
   1985 }
   1986 
   1987 mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
   1988   // ref is in a non-moving space (from_ref == to_ref).
   1989   DCHECK(!region_space_->HasAddress(ref)) << ref;
   1990   if (immune_spaces_.ContainsObject(ref)) {
   1991     accounting::ContinuousSpaceBitmap* cc_bitmap =
   1992         cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
   1993     DCHECK(cc_bitmap != nullptr)
   1994         << "An immune space object must have a bitmap";
   1995     if (kIsDebugBuild) {
   1996       DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref))
   1997           << "Immune space object must be already marked";
   1998     }
   1999     // This may or may not succeed, which is ok.
   2000     if (kUseBakerReadBarrier) {
   2001       ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
   2002     }
   2003     if (cc_bitmap->AtomicTestAndSet(ref)) {
   2004       // Already marked.
   2005     } else {
   2006       // Newly marked.
   2007       if (kUseBakerReadBarrier) {
   2008         DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
   2009       }
   2010       PushOntoMarkStack(ref);
   2011     }
   2012   } else {
   2013     // Use the mark bitmap.
   2014     accounting::ContinuousSpaceBitmap* mark_bitmap =
   2015         heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
   2016     accounting::LargeObjectBitmap* los_bitmap =
   2017         heap_mark_bitmap_->GetLargeObjectBitmap(ref);
   2018     CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
   2019     bool is_los = mark_bitmap == nullptr;
   2020     if (!is_los && mark_bitmap->Test(ref)) {
   2021       // Already marked.
   2022       if (kUseBakerReadBarrier) {
   2023         DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
   2024                ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
   2025       }
   2026     } else if (is_los && los_bitmap->Test(ref)) {
   2027       // Already marked in LOS.
   2028       if (kUseBakerReadBarrier) {
   2029         DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
   2030                ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
   2031       }
   2032     } else {
   2033       // Not marked.
   2034       if (IsOnAllocStack(ref)) {
   2035         // If it's on the allocation stack, it's considered marked. Keep it white.
   2036         // Objects on the allocation stack need not be marked.
   2037         if (!is_los) {
   2038           DCHECK(!mark_bitmap->Test(ref));
   2039         } else {
   2040           DCHECK(!los_bitmap->Test(ref));
   2041         }
   2042         if (kUseBakerReadBarrier) {
   2043           DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
   2044         }
   2045       } else {
   2046         // Not marked or on the allocation stack. Try to mark it.
   2047         // This may or may not succeed, which is ok.
   2048         if (kUseBakerReadBarrier) {
   2049           ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
   2050         }
   2051         if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
   2052           // Already marked.
   2053         } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
   2054           // Already marked in LOS.
   2055         } else {
   2056           // Newly marked.
   2057           if (kUseBakerReadBarrier) {
   2058             DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
   2059           }
   2060           PushOntoMarkStack(ref);
   2061         }
   2062       }
   2063     }
   2064   }
   2065   return ref;
   2066 }
   2067 
   2068 void ConcurrentCopying::FinishPhase() {
   2069   Thread* const self = Thread::Current();
   2070   {
   2071     MutexLock mu(self, mark_stack_lock_);
   2072     CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
   2073   }
   2074   region_space_ = nullptr;
   2075   {
   2076     MutexLock mu(Thread::Current(), skipped_blocks_lock_);
   2077     skipped_blocks_map_.clear();
   2078   }
   2079   ReaderMutexLock mu(self, *Locks::mutator_lock_);
   2080   WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
   2081   heap_->ClearMarkedObjects();
   2082 }
   2083 
   2084 bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
   2085   mirror::Object* from_ref = field->AsMirrorPtr();
   2086   mirror::Object* to_ref = IsMarked(from_ref);
   2087   if (to_ref == nullptr) {
   2088     return false;
   2089   }
   2090   if (from_ref != to_ref) {
   2091     QuasiAtomic::ThreadFenceRelease();
   2092     field->Assign(to_ref);
   2093     QuasiAtomic::ThreadFenceSequentiallyConsistent();
   2094   }
   2095   return true;
   2096 }
   2097 
   2098 mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
   2099   return Mark(from_ref);
   2100 }
   2101 
   2102 void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
   2103   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
   2104 }
   2105 
   2106 void ConcurrentCopying::ProcessReferences(Thread* self) {
   2107   TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
   2108   // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
   2109   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   2110   GetHeap()->GetReferenceProcessor()->ProcessReferences(
   2111       true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
   2112 }
   2113 
   2114 void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
   2115   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   2116   region_space_->RevokeAllThreadLocalBuffers();
   2117 }
   2118 
   2119 }  // namespace collector
   2120 }  // namespace gc
   2121 }  // namespace art
   2122