Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "mark_sweep.h"
     18 
     19 #include <functional>
     20 #include <numeric>
     21 #include <climits>
     22 #include <vector>
     23 
     24 #include "base/bounded_fifo.h"
     25 #include "base/logging.h"
     26 #include "base/macros.h"
     27 #include "base/mutex-inl.h"
     28 #include "base/timing_logger.h"
     29 #include "gc/accounting/card_table-inl.h"
     30 #include "gc/accounting/heap_bitmap-inl.h"
     31 #include "gc/accounting/mod_union_table.h"
     32 #include "gc/accounting/space_bitmap-inl.h"
     33 #include "gc/heap.h"
     34 #include "gc/reference_processor.h"
     35 #include "gc/space/image_space.h"
     36 #include "gc/space/large_object_space.h"
     37 #include "gc/space/space-inl.h"
     38 #include "mark_sweep-inl.h"
     39 #include "mirror/art_field-inl.h"
     40 #include "mirror/object-inl.h"
     41 #include "runtime.h"
     42 #include "scoped_thread_state_change.h"
     43 #include "thread-inl.h"
     44 #include "thread_list.h"
     45 
     46 using ::art::mirror::Object;
     47 
     48 namespace art {
     49 namespace gc {
     50 namespace collector {
     51 
     52 // Performance options.
     53 static constexpr bool kUseRecursiveMark = false;
     54 static constexpr bool kUseMarkStackPrefetch = true;
     55 static constexpr size_t kSweepArrayChunkFreeSize = 1024;
     56 static constexpr bool kPreCleanCards = true;
     57 
     58 // Parallelism options.
     59 static constexpr bool kParallelCardScan = true;
     60 static constexpr bool kParallelRecursiveMark = true;
     61 // Don't attempt to parallelize mark stack processing unless the mark stack is at least n
     62 // elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
     63 // having this can add overhead in ProcessReferences since we may end up doing many calls of
     64 // ProcessMarkStack with very small mark stacks.
     65 static constexpr size_t kMinimumParallelMarkStackSize = 128;
     66 static constexpr bool kParallelProcessMarkStack = true;
     67 
     68 // Profiling and information flags.
     69 static constexpr bool kProfileLargeObjects = false;
     70 static constexpr bool kMeasureOverhead = false;
     71 static constexpr bool kCountTasks = false;
     72 static constexpr bool kCountJavaLangRefs = false;
     73 static constexpr bool kCountMarkedObjects = false;
     74 
     75 // Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
     76 static constexpr bool kCheckLocks = kDebugLocking;
     77 static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
     78 
     79 // If true, revoke the rosalloc thread-local buffers at the
     80 // checkpoint, as opposed to during the pause.
     81 static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
     82 
     83 void MarkSweep::BindBitmaps() {
     84   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
     85   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     86   // Mark all of the spaces we never collect as immune.
     87   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
     88     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
     89       CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
     90     }
     91   }
     92 }
     93 
     94 MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
     95     : GarbageCollector(heap,
     96                        name_prefix +
     97                        (is_concurrent ? "concurrent mark sweep": "mark sweep")),
     98       current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
     99       gc_barrier_(new Barrier(0)),
    100       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
    101       is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
    102   std::string error_msg;
    103   MemMap* mem_map = MemMap::MapAnonymous(
    104       "mark sweep sweep array free buffer", nullptr,
    105       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
    106       PROT_READ | PROT_WRITE, false, &error_msg);
    107   CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
    108   sweep_array_free_buffer_mem_map_.reset(mem_map);
    109 }
    110 
    111 void MarkSweep::InitializePhase() {
    112   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    113   mark_stack_ = heap_->GetMarkStack();
    114   DCHECK(mark_stack_ != nullptr);
    115   immune_region_.Reset();
    116   class_count_.StoreRelaxed(0);
    117   array_count_.StoreRelaxed(0);
    118   other_count_.StoreRelaxed(0);
    119   large_object_test_.StoreRelaxed(0);
    120   large_object_mark_.StoreRelaxed(0);
    121   overhead_time_ .StoreRelaxed(0);
    122   work_chunks_created_.StoreRelaxed(0);
    123   work_chunks_deleted_.StoreRelaxed(0);
    124   reference_count_.StoreRelaxed(0);
    125   mark_null_count_.StoreRelaxed(0);
    126   mark_immune_count_.StoreRelaxed(0);
    127   mark_fastpath_count_.StoreRelaxed(0);
    128   mark_slowpath_count_.StoreRelaxed(0);
    129   {
    130     // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
    131     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
    132     mark_bitmap_ = heap_->GetMarkBitmap();
    133   }
    134   if (!GetCurrentIteration()->GetClearSoftReferences()) {
    135     // Always clear soft references if a non-sticky collection.
    136     GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
    137   }
    138 }
    139 
    140 void MarkSweep::RunPhases() {
    141   Thread* self = Thread::Current();
    142   InitializePhase();
    143   Locks::mutator_lock_->AssertNotHeld(self);
    144   if (IsConcurrent()) {
    145     GetHeap()->PreGcVerification(this);
    146     {
    147       ReaderMutexLock mu(self, *Locks::mutator_lock_);
    148       MarkingPhase();
    149     }
    150     ScopedPause pause(this);
    151     GetHeap()->PrePauseRosAllocVerification(this);
    152     PausePhase();
    153     RevokeAllThreadLocalBuffers();
    154   } else {
    155     ScopedPause pause(this);
    156     GetHeap()->PreGcVerificationPaused(this);
    157     MarkingPhase();
    158     GetHeap()->PrePauseRosAllocVerification(this);
    159     PausePhase();
    160     RevokeAllThreadLocalBuffers();
    161   }
    162   {
    163     // Sweeping always done concurrently, even for non concurrent mark sweep.
    164     ReaderMutexLock mu(self, *Locks::mutator_lock_);
    165     ReclaimPhase();
    166   }
    167   GetHeap()->PostGcVerification(this);
    168   FinishPhase();
    169 }
    170 
    171 void MarkSweep::ProcessReferences(Thread* self) {
    172   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    173   GetHeap()->GetReferenceProcessor()->ProcessReferences(
    174       true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
    175       &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
    176 }
    177 
    178 void MarkSweep::PausePhase() {
    179   TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
    180   Thread* self = Thread::Current();
    181   Locks::mutator_lock_->AssertExclusiveHeld(self);
    182   if (IsConcurrent()) {
    183     // Handle the dirty objects if we are a concurrent GC.
    184     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    185     // Re-mark root set.
    186     ReMarkRoots();
    187     // Scan dirty objects, this is only required if we are not doing concurrent GC.
    188     RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
    189   }
    190   {
    191     TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
    192     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    193     heap_->SwapStacks(self);
    194     live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
    195     // Need to revoke all the thread local allocation stacks since we just swapped the allocation
    196     // stacks and don't want anybody to allocate into the live stack.
    197     RevokeAllThreadLocalAllocationStacks(self);
    198   }
    199   heap_->PreSweepingGcVerification(this);
    200   // Disallow new system weaks to prevent a race which occurs when someone adds a new system
    201   // weak before we sweep them. Since this new system weak may not be marked, the GC may
    202   // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
    203   // reference to a string that is about to be swept.
    204   Runtime::Current()->DisallowNewSystemWeaks();
    205   // Enable the reference processing slow path, needs to be done with mutators paused since there
    206   // is no lock in the GetReferent fast path.
    207   GetHeap()->GetReferenceProcessor()->EnableSlowPath();
    208 }
    209 
    210 void MarkSweep::PreCleanCards() {
    211   // Don't do this for non concurrent GCs since they don't have any dirty cards.
    212   if (kPreCleanCards && IsConcurrent()) {
    213     TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    214     Thread* self = Thread::Current();
    215     CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
    216     // Process dirty cards and add dirty cards to mod union tables, also ages cards.
    217     heap_->ProcessCards(GetTimings(), false);
    218     // The checkpoint root marking is required to avoid a race condition which occurs if the
    219     // following happens during a reference write:
    220     // 1. mutator dirties the card (write barrier)
    221     // 2. GC ages the card (the above ProcessCards call)
    222     // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
    223     // 4. mutator writes the value (corresponding to the write barrier in 1.)
    224     // This causes the GC to age the card but not necessarily mark the reference which the mutator
    225     // wrote into the object stored in the card.
    226     // Having the checkpoint fixes this issue since it ensures that the card mark and the
    227     // reference write are visible to the GC before the card is scanned (this is due to locks being
    228     // acquired / released in the checkpoint code).
    229     // The other roots are also marked to help reduce the pause.
    230     MarkRootsCheckpoint(self, false);
    231     MarkNonThreadRoots();
    232     MarkConcurrentRoots(
    233         static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
    234     // Process the newly aged cards.
    235     RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
    236     // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
    237     // in the next GC.
    238   }
    239 }
    240 
    241 void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
    242   if (kUseThreadLocalAllocationStack) {
    243     TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    244     Locks::mutator_lock_->AssertExclusiveHeld(self);
    245     heap_->RevokeAllThreadLocalAllocationStacks(self);
    246   }
    247 }
    248 
    249 void MarkSweep::MarkingPhase() {
    250   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    251   Thread* self = Thread::Current();
    252   BindBitmaps();
    253   FindDefaultSpaceBitmap();
    254   // Process dirty cards and add dirty cards to mod union tables.
    255   heap_->ProcessCards(GetTimings(), false);
    256   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    257   MarkRoots(self);
    258   MarkReachableObjects();
    259   // Pre-clean dirtied cards to reduce pauses.
    260   PreCleanCards();
    261 }
    262 
    263 void MarkSweep::UpdateAndMarkModUnion() {
    264   for (const auto& space : heap_->GetContinuousSpaces()) {
    265     if (immune_region_.ContainsSpace(space)) {
    266       const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
    267           "UpdateAndMarkImageModUnionTable";
    268       TimingLogger::ScopedTiming t(name, GetTimings());
    269       accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
    270       CHECK(mod_union_table != nullptr);
    271       mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
    272     }
    273   }
    274 }
    275 
    276 void MarkSweep::MarkReachableObjects() {
    277   UpdateAndMarkModUnion();
    278   // Recursively mark all the non-image bits set in the mark bitmap.
    279   RecursiveMark();
    280 }
    281 
    282 void MarkSweep::ReclaimPhase() {
    283   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    284   Thread* self = Thread::Current();
    285   // Process the references concurrently.
    286   ProcessReferences(self);
    287   SweepSystemWeaks(self);
    288   Runtime::Current()->AllowNewSystemWeaks();
    289   {
    290     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    291     // Reclaim unmarked objects.
    292     Sweep(false);
    293     // Swap the live and mark bitmaps for each space which we modified space. This is an
    294     // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
    295     // bitmaps.
    296     SwapBitmaps();
    297     // Unbind the live and mark bitmaps.
    298     GetHeap()->UnBindBitmaps();
    299   }
    300 }
    301 
    302 void MarkSweep::FindDefaultSpaceBitmap() {
    303   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    304   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
    305     accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
    306     // We want to have the main space instead of non moving if possible.
    307     if (bitmap != nullptr &&
    308         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
    309       current_space_bitmap_ = bitmap;
    310       // If we are not the non moving space exit the loop early since this will be good enough.
    311       if (space != heap_->GetNonMovingSpace()) {
    312         break;
    313       }
    314     }
    315   }
    316   CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
    317       << heap_->DumpSpaces();
    318 }
    319 
    320 void MarkSweep::ExpandMarkStack() {
    321   ResizeMarkStack(mark_stack_->Capacity() * 2);
    322 }
    323 
    324 void MarkSweep::ResizeMarkStack(size_t new_size) {
    325   // Rare case, no need to have Thread::Current be a parameter.
    326   if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
    327     // Someone else acquired the lock and expanded the mark stack before us.
    328     return;
    329   }
    330   std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
    331   CHECK_LE(mark_stack_->Size(), new_size);
    332   mark_stack_->Resize(new_size);
    333   for (const auto& obj : temp) {
    334     mark_stack_->PushBack(obj);
    335   }
    336 }
    337 
    338 inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
    339   DCHECK(obj != nullptr);
    340   if (MarkObjectParallel(obj)) {
    341     MutexLock mu(Thread::Current(), mark_stack_lock_);
    342     if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
    343       ExpandMarkStack();
    344     }
    345     // The object must be pushed on to the mark stack.
    346     mark_stack_->PushBack(obj);
    347   }
    348 }
    349 
    350 mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
    351   MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
    352   mark_sweep->MarkObject(obj);
    353   return obj;
    354 }
    355 
    356 void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
    357   reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
    358 }
    359 
    360 bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
    361   return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr());
    362 }
    363 
    364 class MarkSweepMarkObjectSlowPath {
    365  public:
    366   explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
    367   }
    368 
    369   void operator()(const Object* obj) const ALWAYS_INLINE {
    370     if (kProfileLargeObjects) {
    371       // TODO: Differentiate between marking and testing somehow.
    372       ++mark_sweep_->large_object_test_;
    373       ++mark_sweep_->large_object_mark_;
    374     }
    375     space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
    376     if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
    377                  (kIsDebugBuild && !large_object_space->Contains(obj)))) {
    378       LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
    379       LOG(ERROR) << "Attempting see if it's a bad root";
    380       mark_sweep_->VerifyRoots();
    381       LOG(FATAL) << "Can't mark invalid object";
    382     }
    383   }
    384 
    385  private:
    386   MarkSweep* const mark_sweep_;
    387 };
    388 
    389 inline void MarkSweep::MarkObjectNonNull(Object* obj) {
    390   DCHECK(obj != nullptr);
    391   if (kUseBakerOrBrooksReadBarrier) {
    392     // Verify all the objects have the correct pointer installed.
    393     obj->AssertReadBarrierPointer();
    394   }
    395   if (immune_region_.ContainsObject(obj)) {
    396     if (kCountMarkedObjects) {
    397       ++mark_immune_count_;
    398     }
    399     DCHECK(mark_bitmap_->Test(obj));
    400   } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
    401     if (kCountMarkedObjects) {
    402       ++mark_fastpath_count_;
    403     }
    404     if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
    405       PushOnMarkStack(obj);  // This object was not previously marked.
    406     }
    407   } else {
    408     if (kCountMarkedObjects) {
    409       ++mark_slowpath_count_;
    410     }
    411     MarkSweepMarkObjectSlowPath visitor(this);
    412     // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
    413     // will check again.
    414     if (!mark_bitmap_->Set(obj, visitor)) {
    415       PushOnMarkStack(obj);  // Was not already marked, push.
    416     }
    417   }
    418 }
    419 
    420 inline void MarkSweep::PushOnMarkStack(Object* obj) {
    421   if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
    422     // Lock is not needed but is here anyways to please annotalysis.
    423     MutexLock mu(Thread::Current(), mark_stack_lock_);
    424     ExpandMarkStack();
    425   }
    426   // The object must be pushed on to the mark stack.
    427   mark_stack_->PushBack(obj);
    428 }
    429 
    430 inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
    431   DCHECK(obj != nullptr);
    432   if (kUseBakerOrBrooksReadBarrier) {
    433     // Verify all the objects have the correct pointer installed.
    434     obj->AssertReadBarrierPointer();
    435   }
    436   if (immune_region_.ContainsObject(obj)) {
    437     DCHECK(IsMarked(obj));
    438     return false;
    439   }
    440   // Try to take advantage of locality of references within a space, failing this find the space
    441   // the hard way.
    442   accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
    443   if (LIKELY(object_bitmap->HasAddress(obj))) {
    444     return !object_bitmap->AtomicTestAndSet(obj);
    445   }
    446   MarkSweepMarkObjectSlowPath visitor(this);
    447   return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
    448 }
    449 
    450 // Used to mark objects when processing the mark stack. If an object is null, it is not marked.
    451 inline void MarkSweep::MarkObject(Object* obj) {
    452   if (obj != nullptr) {
    453     MarkObjectNonNull(obj);
    454   } else if (kCountMarkedObjects) {
    455     ++mark_null_count_;
    456   }
    457 }
    458 
    459 void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/,
    460                                          RootType /*root_type*/) {
    461   reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
    462 }
    463 
    464 void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/,
    465                                  RootType /*root_type*/) {
    466   CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root));
    467 }
    468 
    469 void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
    470                                  RootType /*root_type*/) {
    471   reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
    472 }
    473 
    474 void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
    475                                    const StackVisitor* visitor, RootType root_type) {
    476   reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type);
    477 }
    478 
    479 void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor,
    480                            RootType root_type) {
    481   // See if the root is on any space bitmap.
    482   if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
    483     space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
    484     if (!large_object_space->Contains(root)) {
    485       LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
    486       if (visitor != NULL) {
    487         LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
    488       }
    489     }
    490   }
    491 }
    492 
    493 void MarkSweep::VerifyRoots() {
    494   Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
    495 }
    496 
    497 void MarkSweep::MarkRoots(Thread* self) {
    498   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    499   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
    500     // If we exclusively hold the mutator lock, all threads must be suspended.
    501     Runtime::Current()->VisitRoots(MarkRootCallback, this);
    502     RevokeAllThreadLocalAllocationStacks(self);
    503   } else {
    504     MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
    505     // At this point the live stack should no longer have any mutators which push into it.
    506     MarkNonThreadRoots();
    507     MarkConcurrentRoots(
    508         static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
    509   }
    510 }
    511 
    512 void MarkSweep::MarkNonThreadRoots() {
    513   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    514   Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
    515 }
    516 
    517 void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
    518   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    519   // Visit all runtime roots and clear dirty flags.
    520   Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
    521 }
    522 
    523 class ScanObjectVisitor {
    524  public:
    525   explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
    526       : mark_sweep_(mark_sweep) {}
    527 
    528   void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    529       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
    530     if (kCheckLocks) {
    531       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    532       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
    533     }
    534     mark_sweep_->ScanObject(obj);
    535   }
    536 
    537  private:
    538   MarkSweep* const mark_sweep_;
    539 };
    540 
    541 class DelayReferenceReferentVisitor {
    542  public:
    543   explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
    544   }
    545 
    546   void operator()(mirror::Class* klass, mirror::Reference* ref) const
    547       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    548       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
    549     collector_->DelayReferenceReferent(klass, ref);
    550   }
    551 
    552  private:
    553   MarkSweep* const collector_;
    554 };
    555 
    556 template <bool kUseFinger = false>
    557 class MarkStackTask : public Task {
    558  public:
    559   MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
    560                 Object** mark_stack)
    561       : mark_sweep_(mark_sweep),
    562         thread_pool_(thread_pool),
    563         mark_stack_pos_(mark_stack_size) {
    564     // We may have to copy part of an existing mark stack when another mark stack overflows.
    565     if (mark_stack_size != 0) {
    566       DCHECK(mark_stack != NULL);
    567       // TODO: Check performance?
    568       std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
    569     }
    570     if (kCountTasks) {
    571       ++mark_sweep_->work_chunks_created_;
    572     }
    573   }
    574 
    575   static const size_t kMaxSize = 1 * KB;
    576 
    577  protected:
    578   class MarkObjectParallelVisitor {
    579    public:
    580     explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
    581                                        MarkSweep* mark_sweep) ALWAYS_INLINE
    582             : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
    583 
    584     void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
    585         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    586       mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
    587       if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
    588         if (kUseFinger) {
    589           android_memory_barrier();
    590           if (reinterpret_cast<uintptr_t>(ref) >=
    591               static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
    592             return;
    593           }
    594         }
    595         chunk_task_->MarkStackPush(ref);
    596       }
    597     }
    598 
    599    private:
    600     MarkStackTask<kUseFinger>* const chunk_task_;
    601     MarkSweep* const mark_sweep_;
    602   };
    603 
    604   class ScanObjectParallelVisitor {
    605    public:
    606     explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
    607         : chunk_task_(chunk_task) {}
    608 
    609     // No thread safety analysis since multiple threads will use this visitor.
    610     void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    611         EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
    612       MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
    613       MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
    614       DelayReferenceReferentVisitor ref_visitor(mark_sweep);
    615       mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
    616     }
    617 
    618    private:
    619     MarkStackTask<kUseFinger>* const chunk_task_;
    620   };
    621 
    622   virtual ~MarkStackTask() {
    623     // Make sure that we have cleared our mark stack.
    624     DCHECK_EQ(mark_stack_pos_, 0U);
    625     if (kCountTasks) {
    626       ++mark_sweep_->work_chunks_deleted_;
    627     }
    628   }
    629 
    630   MarkSweep* const mark_sweep_;
    631   ThreadPool* const thread_pool_;
    632   // Thread local mark stack for this task.
    633   Object* mark_stack_[kMaxSize];
    634   // Mark stack position.
    635   size_t mark_stack_pos_;
    636 
    637   void MarkStackPush(Object* obj) ALWAYS_INLINE {
    638     if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
    639       // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
    640       mark_stack_pos_ /= 2;
    641       auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
    642                                      mark_stack_ + mark_stack_pos_);
    643       thread_pool_->AddTask(Thread::Current(), task);
    644     }
    645     DCHECK(obj != nullptr);
    646     DCHECK_LT(mark_stack_pos_, kMaxSize);
    647     mark_stack_[mark_stack_pos_++] = obj;
    648   }
    649 
    650   virtual void Finalize() {
    651     delete this;
    652   }
    653 
    654   // Scans all of the objects
    655   virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    656       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
    657     ScanObjectParallelVisitor visitor(this);
    658     // TODO: Tune this.
    659     static const size_t kFifoSize = 4;
    660     BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
    661     for (;;) {
    662       Object* obj = nullptr;
    663       if (kUseMarkStackPrefetch) {
    664         while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
    665           Object* obj = mark_stack_[--mark_stack_pos_];
    666           DCHECK(obj != nullptr);
    667           __builtin_prefetch(obj);
    668           prefetch_fifo.push_back(obj);
    669         }
    670         if (UNLIKELY(prefetch_fifo.empty())) {
    671           break;
    672         }
    673         obj = prefetch_fifo.front();
    674         prefetch_fifo.pop_front();
    675       } else {
    676         if (UNLIKELY(mark_stack_pos_ == 0)) {
    677           break;
    678         }
    679         obj = mark_stack_[--mark_stack_pos_];
    680       }
    681       DCHECK(obj != nullptr);
    682       visitor(obj);
    683     }
    684   }
    685 };
    686 
    687 class CardScanTask : public MarkStackTask<false> {
    688  public:
    689   CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
    690                accounting::ContinuousSpaceBitmap* bitmap,
    691                byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
    692                Object** mark_stack_obj)
    693       : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
    694         bitmap_(bitmap),
    695         begin_(begin),
    696         end_(end),
    697         minimum_age_(minimum_age) {
    698   }
    699 
    700  protected:
    701   accounting::ContinuousSpaceBitmap* const bitmap_;
    702   byte* const begin_;
    703   byte* const end_;
    704   const byte minimum_age_;
    705 
    706   virtual void Finalize() {
    707     delete this;
    708   }
    709 
    710   virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
    711     ScanObjectParallelVisitor visitor(this);
    712     accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
    713     size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
    714     VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
    715         << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
    716     // Finish by emptying our local mark stack.
    717     MarkStackTask::Run(self);
    718   }
    719 };
    720 
    721 size_t MarkSweep::GetThreadCount(bool paused) const {
    722   if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
    723     return 1;
    724   }
    725   if (paused) {
    726     return heap_->GetParallelGCThreadCount() + 1;
    727   } else {
    728     return heap_->GetConcGCThreadCount() + 1;
    729   }
    730 }
    731 
    732 void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
    733   accounting::CardTable* card_table = GetHeap()->GetCardTable();
    734   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
    735   size_t thread_count = GetThreadCount(paused);
    736   // The parallel version with only one thread is faster for card scanning, TODO: fix.
    737   if (kParallelCardScan && thread_count > 1) {
    738     Thread* self = Thread::Current();
    739     // Can't have a different split for each space since multiple spaces can have their cards being
    740     // scanned at the same time.
    741     TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
    742         GetTimings());
    743     // Try to take some of the mark stack since we can pass this off to the worker tasks.
    744     Object** mark_stack_begin = mark_stack_->Begin();
    745     Object** mark_stack_end = mark_stack_->End();
    746     const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
    747     // Estimated number of work tasks we will create.
    748     const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
    749     DCHECK_NE(mark_stack_tasks, 0U);
    750     const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
    751                                              mark_stack_size / mark_stack_tasks + 1);
    752     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
    753       if (space->GetMarkBitmap() == nullptr) {
    754         continue;
    755       }
    756       byte* card_begin = space->Begin();
    757       byte* card_end = space->End();
    758       // Align up the end address. For example, the image space's end
    759       // may not be card-size-aligned.
    760       card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
    761       DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
    762       DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
    763       // Calculate how many bytes of heap we will scan,
    764       const size_t address_range = card_end - card_begin;
    765       // Calculate how much address range each task gets.
    766       const size_t card_delta = RoundUp(address_range / thread_count + 1,
    767                                         accounting::CardTable::kCardSize);
    768       // Create the worker tasks for this space.
    769       while (card_begin != card_end) {
    770         // Add a range of cards.
    771         size_t addr_remaining = card_end - card_begin;
    772         size_t card_increment = std::min(card_delta, addr_remaining);
    773         // Take from the back of the mark stack.
    774         size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
    775         size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
    776         mark_stack_end -= mark_stack_increment;
    777         mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
    778         DCHECK_EQ(mark_stack_end, mark_stack_->End());
    779         // Add the new task to the thread pool.
    780         auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
    781                                       card_begin + card_increment, minimum_age,
    782                                       mark_stack_increment, mark_stack_end);
    783         thread_pool->AddTask(self, task);
    784         card_begin += card_increment;
    785       }
    786     }
    787 
    788     // Note: the card scan below may dirty new cards (and scan them)
    789     // as a side effect when a Reference object is encountered and
    790     // queued during the marking. See b/11465268.
    791     thread_pool->SetMaxActiveWorkers(thread_count - 1);
    792     thread_pool->StartWorkers(self);
    793     thread_pool->Wait(self, true, true);
    794     thread_pool->StopWorkers(self);
    795   } else {
    796     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
    797       if (space->GetMarkBitmap() != nullptr) {
    798         // Image spaces are handled properly since live == marked for them.
    799         const char* name = nullptr;
    800         switch (space->GetGcRetentionPolicy()) {
    801         case space::kGcRetentionPolicyNeverCollect:
    802           name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
    803           break;
    804         case space::kGcRetentionPolicyFullCollect:
    805           name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
    806           break;
    807         case space::kGcRetentionPolicyAlwaysCollect:
    808           name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
    809           break;
    810         default:
    811           LOG(FATAL) << "Unreachable";
    812         }
    813         TimingLogger::ScopedTiming t(name, GetTimings());
    814         ScanObjectVisitor visitor(this);
    815         card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
    816                          minimum_age);
    817       }
    818     }
    819   }
    820 }
    821 
    822 class RecursiveMarkTask : public MarkStackTask<false> {
    823  public:
    824   RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
    825                     accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
    826       : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin),
    827         end_(end) {
    828   }
    829 
    830  protected:
    831   accounting::ContinuousSpaceBitmap* const bitmap_;
    832   const uintptr_t begin_;
    833   const uintptr_t end_;
    834 
    835   virtual void Finalize() {
    836     delete this;
    837   }
    838 
    839   // Scans all of the objects
    840   virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
    841     ScanObjectParallelVisitor visitor(this);
    842     bitmap_->VisitMarkedRange(begin_, end_, visitor);
    843     // Finish by emptying our local mark stack.
    844     MarkStackTask::Run(self);
    845   }
    846 };
    847 
    848 // Populates the mark stack based on the set of marked objects and
    849 // recursively marks until the mark stack is emptied.
    850 void MarkSweep::RecursiveMark() {
    851   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    852   // RecursiveMark will build the lists of known instances of the Reference classes. See
    853   // DelayReferenceReferent for details.
    854   if (kUseRecursiveMark) {
    855     const bool partial = GetGcType() == kGcTypePartial;
    856     ScanObjectVisitor scan_visitor(this);
    857     auto* self = Thread::Current();
    858     ThreadPool* thread_pool = heap_->GetThreadPool();
    859     size_t thread_count = GetThreadCount(false);
    860     const bool parallel = kParallelRecursiveMark && thread_count > 1;
    861     mark_stack_->Reset();
    862     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
    863       if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
    864           (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
    865         current_space_bitmap_ = space->GetMarkBitmap();
    866         if (current_space_bitmap_ == nullptr) {
    867           continue;
    868         }
    869         if (parallel) {
    870           // We will use the mark stack the future.
    871           // CHECK(mark_stack_->IsEmpty());
    872           // This function does not handle heap end increasing, so we must use the space end.
    873           uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
    874           uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
    875           atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
    876 
    877           // Create a few worker tasks.
    878           const size_t n = thread_count * 2;
    879           while (begin != end) {
    880             uintptr_t start = begin;
    881             uintptr_t delta = (end - begin) / n;
    882             delta = RoundUp(delta, KB);
    883             if (delta < 16 * KB) delta = end - begin;
    884             begin += delta;
    885             auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
    886                                                begin);
    887             thread_pool->AddTask(self, task);
    888           }
    889           thread_pool->SetMaxActiveWorkers(thread_count - 1);
    890           thread_pool->StartWorkers(self);
    891           thread_pool->Wait(self, true, true);
    892           thread_pool->StopWorkers(self);
    893         } else {
    894           // This function does not handle heap end increasing, so we must use the space end.
    895           uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
    896           uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
    897           current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
    898         }
    899       }
    900     }
    901   }
    902   ProcessMarkStack(false);
    903 }
    904 
    905 mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
    906   if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
    907     return object;
    908   }
    909   return nullptr;
    910 }
    911 
    912 void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
    913   ScanGrayObjects(paused, minimum_age);
    914   ProcessMarkStack(paused);
    915 }
    916 
    917 void MarkSweep::ReMarkRoots() {
    918   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    919   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
    920   Runtime::Current()->VisitRoots(
    921       MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
    922                                                           kVisitRootFlagStopLoggingNewRoots |
    923                                                           kVisitRootFlagClearRootLog));
    924   if (kVerifyRootsMarked) {
    925     TimingLogger::ScopedTiming t("(Paused)VerifyRoots", GetTimings());
    926     Runtime::Current()->VisitRoots(VerifyRootMarked, this);
    927   }
    928 }
    929 
    930 void MarkSweep::SweepSystemWeaks(Thread* self) {
    931   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    932   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    933   Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
    934 }
    935 
    936 mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
    937   reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
    938   // We don't actually want to sweep the object, so lets return "marked"
    939   return obj;
    940 }
    941 
    942 void MarkSweep::VerifyIsLive(const Object* obj) {
    943   if (!heap_->GetLiveBitmap()->Test(obj)) {
    944     accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get();
    945     CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) !=
    946         allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
    947   }
    948 }
    949 
    950 void MarkSweep::VerifySystemWeaks() {
    951   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    952   // Verify system weaks, uses a special object visitor which returns the input object.
    953   Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
    954 }
    955 
    956 class CheckpointMarkThreadRoots : public Closure {
    957  public:
    958   explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
    959                                      bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
    960       : mark_sweep_(mark_sweep),
    961         revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
    962             revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
    963   }
    964 
    965   virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
    966     ATRACE_BEGIN("Marking thread roots");
    967     // Note: self is not necessarily equal to thread since thread may be suspended.
    968     Thread* self = Thread::Current();
    969     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
    970         << thread->GetState() << " thread " << thread << " self " << self;
    971     thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
    972     ATRACE_END();
    973     if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
    974       ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
    975       mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
    976       ATRACE_END();
    977     }
    978     mark_sweep_->GetBarrier().Pass(self);
    979   }
    980 
    981  private:
    982   MarkSweep* const mark_sweep_;
    983   const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
    984 };
    985 
    986 void MarkSweep::MarkRootsCheckpoint(Thread* self,
    987                                     bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
    988   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    989   CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
    990   ThreadList* thread_list = Runtime::Current()->GetThreadList();
    991   // Request the check point is run on all threads returning a count of the threads that must
    992   // run through the barrier including self.
    993   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
    994   // Release locks then wait for all mutator threads to pass the barrier.
    995   // TODO: optimize to not release locks when there are no threads to wait for.
    996   Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
    997   Locks::mutator_lock_->SharedUnlock(self);
    998   {
    999     ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
   1000     gc_barrier_->Increment(self, barrier_count);
   1001   }
   1002   Locks::mutator_lock_->SharedLock(self);
   1003   Locks::heap_bitmap_lock_->ExclusiveLock(self);
   1004 }
   1005 
   1006 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
   1007   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   1008   Thread* self = Thread::Current();
   1009   mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
   1010       sweep_array_free_buffer_mem_map_->BaseBegin());
   1011   size_t chunk_free_pos = 0;
   1012   ObjectBytePair freed;
   1013   ObjectBytePair freed_los;
   1014   // How many objects are left in the array, modified after each space is swept.
   1015   Object** objects = allocations->Begin();
   1016   size_t count = allocations->Size();
   1017   // Change the order to ensure that the non-moving space last swept as an optimization.
   1018   std::vector<space::ContinuousSpace*> sweep_spaces;
   1019   space::ContinuousSpace* non_moving_space = nullptr;
   1020   for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
   1021     if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
   1022         space->GetLiveBitmap() != nullptr) {
   1023       if (space == heap_->GetNonMovingSpace()) {
   1024         non_moving_space = space;
   1025       } else {
   1026         sweep_spaces.push_back(space);
   1027       }
   1028     }
   1029   }
   1030   // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
   1031   // the other alloc spaces as an optimization.
   1032   if (non_moving_space != nullptr) {
   1033     sweep_spaces.push_back(non_moving_space);
   1034   }
   1035   // Start by sweeping the continuous spaces.
   1036   for (space::ContinuousSpace* space : sweep_spaces) {
   1037     space::AllocSpace* alloc_space = space->AsAllocSpace();
   1038     accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
   1039     accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
   1040     if (swap_bitmaps) {
   1041       std::swap(live_bitmap, mark_bitmap);
   1042     }
   1043     Object** out = objects;
   1044     for (size_t i = 0; i < count; ++i) {
   1045       Object* obj = objects[i];
   1046       if (kUseThreadLocalAllocationStack && obj == nullptr) {
   1047         continue;
   1048       }
   1049       if (space->HasAddress(obj)) {
   1050         // This object is in the space, remove it from the array and add it to the sweep buffer
   1051         // if needed.
   1052         if (!mark_bitmap->Test(obj)) {
   1053           if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
   1054             TimingLogger::ScopedTiming t("FreeList", GetTimings());
   1055             freed.objects += chunk_free_pos;
   1056             freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
   1057             chunk_free_pos = 0;
   1058           }
   1059           chunk_free_buffer[chunk_free_pos++] = obj;
   1060         }
   1061       } else {
   1062         *(out++) = obj;
   1063       }
   1064     }
   1065     if (chunk_free_pos > 0) {
   1066       TimingLogger::ScopedTiming t("FreeList", GetTimings());
   1067       freed.objects += chunk_free_pos;
   1068       freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
   1069       chunk_free_pos = 0;
   1070     }
   1071     // All of the references which space contained are no longer in the allocation stack, update
   1072     // the count.
   1073     count = out - objects;
   1074   }
   1075   // Handle the large object space.
   1076   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
   1077   accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
   1078   accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
   1079   if (swap_bitmaps) {
   1080     std::swap(large_live_objects, large_mark_objects);
   1081   }
   1082   for (size_t i = 0; i < count; ++i) {
   1083     Object* obj = objects[i];
   1084     // Handle large objects.
   1085     if (kUseThreadLocalAllocationStack && obj == nullptr) {
   1086       continue;
   1087     }
   1088     if (!large_mark_objects->Test(obj)) {
   1089       ++freed_los.objects;
   1090       freed_los.bytes += large_object_space->Free(self, obj);
   1091     }
   1092   }
   1093   {
   1094     TimingLogger::ScopedTiming t("RecordFree", GetTimings());
   1095     RecordFree(freed);
   1096     RecordFreeLOS(freed_los);
   1097     t.NewTiming("ResetStack");
   1098     allocations->Reset();
   1099   }
   1100   sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
   1101 }
   1102 
   1103 void MarkSweep::Sweep(bool swap_bitmaps) {
   1104   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   1105   // Ensure that nobody inserted items in the live stack after we swapped the stacks.
   1106   CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
   1107   {
   1108     TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
   1109     // Mark everything allocated since the last as GC live so that we can sweep concurrently,
   1110     // knowing that new allocations won't be marked as live.
   1111     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
   1112     heap_->MarkAllocStackAsLive(live_stack);
   1113     live_stack->Reset();
   1114     DCHECK(mark_stack_->IsEmpty());
   1115   }
   1116   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
   1117     if (space->IsContinuousMemMapAllocSpace()) {
   1118       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
   1119       TimingLogger::ScopedTiming split(
   1120           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
   1121       RecordFree(alloc_space->Sweep(swap_bitmaps));
   1122     }
   1123   }
   1124   SweepLargeObjects(swap_bitmaps);
   1125 }
   1126 
   1127 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
   1128   TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
   1129   RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
   1130 }
   1131 
   1132 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
   1133 // marked, put it on the appropriate list in the heap for later processing.
   1134 void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
   1135   if (kCountJavaLangRefs) {
   1136     ++reference_count_;
   1137   }
   1138   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback,
   1139                                                          this);
   1140 }
   1141 
   1142 class MarkObjectVisitor {
   1143  public:
   1144   explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
   1145   }
   1146 
   1147   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
   1148       ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
   1149       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
   1150     if (kCheckLocks) {
   1151       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   1152       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
   1153     }
   1154     mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset));
   1155   }
   1156 
   1157  private:
   1158   MarkSweep* const mark_sweep_;
   1159 };
   1160 
   1161 // Scans an object reference.  Determines the type of the reference
   1162 // and dispatches to a specialized scanning routine.
   1163 void MarkSweep::ScanObject(Object* obj) {
   1164   MarkObjectVisitor mark_visitor(this);
   1165   DelayReferenceReferentVisitor ref_visitor(this);
   1166   ScanObjectVisit(obj, mark_visitor, ref_visitor);
   1167 }
   1168 
   1169 void MarkSweep::ProcessMarkStackCallback(void* arg) {
   1170   reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
   1171 }
   1172 
   1173 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
   1174   Thread* self = Thread::Current();
   1175   ThreadPool* thread_pool = GetHeap()->GetThreadPool();
   1176   const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
   1177                                      static_cast<size_t>(MarkStackTask<false>::kMaxSize));
   1178   CHECK_GT(chunk_size, 0U);
   1179   // Split the current mark stack up into work tasks.
   1180   for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
   1181     const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
   1182     thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
   1183     it += delta;
   1184   }
   1185   thread_pool->SetMaxActiveWorkers(thread_count - 1);
   1186   thread_pool->StartWorkers(self);
   1187   thread_pool->Wait(self, true, true);
   1188   thread_pool->StopWorkers(self);
   1189   mark_stack_->Reset();
   1190   CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
   1191            work_chunks_deleted_.LoadSequentiallyConsistent())
   1192       << " some of the work chunks were leaked";
   1193 }
   1194 
   1195 // Scan anything that's on the mark stack.
   1196 void MarkSweep::ProcessMarkStack(bool paused) {
   1197   TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
   1198   size_t thread_count = GetThreadCount(paused);
   1199   if (kParallelProcessMarkStack && thread_count > 1 &&
   1200       mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
   1201     ProcessMarkStackParallel(thread_count);
   1202   } else {
   1203     // TODO: Tune this.
   1204     static const size_t kFifoSize = 4;
   1205     BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
   1206     for (;;) {
   1207       Object* obj = NULL;
   1208       if (kUseMarkStackPrefetch) {
   1209         while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
   1210           Object* obj = mark_stack_->PopBack();
   1211           DCHECK(obj != NULL);
   1212           __builtin_prefetch(obj);
   1213           prefetch_fifo.push_back(obj);
   1214         }
   1215         if (prefetch_fifo.empty()) {
   1216           break;
   1217         }
   1218         obj = prefetch_fifo.front();
   1219         prefetch_fifo.pop_front();
   1220       } else {
   1221         if (mark_stack_->IsEmpty()) {
   1222           break;
   1223         }
   1224         obj = mark_stack_->PopBack();
   1225       }
   1226       DCHECK(obj != nullptr);
   1227       ScanObject(obj);
   1228     }
   1229   }
   1230 }
   1231 
   1232 inline bool MarkSweep::IsMarked(const Object* object) const {
   1233   if (immune_region_.ContainsObject(object)) {
   1234     return true;
   1235   }
   1236   if (current_space_bitmap_->HasAddress(object)) {
   1237     return current_space_bitmap_->Test(object);
   1238   }
   1239   return mark_bitmap_->Test(object);
   1240 }
   1241 
   1242 void MarkSweep::FinishPhase() {
   1243   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   1244   if (kCountScannedTypes) {
   1245     VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
   1246         << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
   1247   }
   1248   if (kCountTasks) {
   1249     VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
   1250   }
   1251   if (kMeasureOverhead) {
   1252     VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
   1253   }
   1254   if (kProfileLargeObjects) {
   1255     VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
   1256         << " marked " << large_object_mark_.LoadRelaxed();
   1257   }
   1258   if (kCountJavaLangRefs) {
   1259     VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
   1260   }
   1261   if (kCountMarkedObjects) {
   1262     VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
   1263         << " immune=" <<  mark_immune_count_.LoadRelaxed()
   1264         << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
   1265         << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
   1266   }
   1267   CHECK(mark_stack_->IsEmpty());  // Ensure that the mark stack is empty.
   1268   mark_stack_->Reset();
   1269   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   1270   heap_->ClearMarkedObjects();
   1271 }
   1272 
   1273 void MarkSweep::RevokeAllThreadLocalBuffers() {
   1274   if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
   1275     // If concurrent, rosalloc thread-local buffers are revoked at the
   1276     // thread checkpoint. Bump pointer space thread-local buffers must
   1277     // not be in use.
   1278     GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
   1279   } else {
   1280     TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   1281     GetHeap()->RevokeAllThreadLocalBuffers();
   1282   }
   1283 }
   1284 
   1285 }  // namespace collector
   1286 }  // namespace gc
   1287 }  // namespace art
   1288