Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "semi_space-inl.h"
     18 
     19 #include <functional>
     20 #include <numeric>
     21 #include <climits>
     22 #include <vector>
     23 
     24 #include "base/logging.h"
     25 #include "base/macros.h"
     26 #include "base/mutex-inl.h"
     27 #include "base/timing_logger.h"
     28 #include "gc/accounting/heap_bitmap-inl.h"
     29 #include "gc/accounting/mod_union_table.h"
     30 #include "gc/accounting/remembered_set.h"
     31 #include "gc/accounting/space_bitmap-inl.h"
     32 #include "gc/heap.h"
     33 #include "gc/reference_processor.h"
     34 #include "gc/space/bump_pointer_space.h"
     35 #include "gc/space/bump_pointer_space-inl.h"
     36 #include "gc/space/image_space.h"
     37 #include "gc/space/large_object_space.h"
     38 #include "gc/space/space-inl.h"
     39 #include "indirect_reference_table.h"
     40 #include "intern_table.h"
     41 #include "jni_internal.h"
     42 #include "mark_sweep-inl.h"
     43 #include "monitor.h"
     44 #include "mirror/reference-inl.h"
     45 #include "mirror/object-inl.h"
     46 #include "runtime.h"
     47 #include "thread-inl.h"
     48 #include "thread_list.h"
     49 
     50 using ::art::mirror::Object;
     51 
     52 namespace art {
     53 namespace gc {
     54 namespace collector {
     55 
     56 static constexpr bool kProtectFromSpace = true;
     57 static constexpr bool kStoreStackTraces = false;
     58 static constexpr size_t kBytesPromotedThreshold = 4 * MB;
     59 static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
     60 
     61 void SemiSpace::BindBitmaps() {
     62   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
     63   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
     64   // Mark all of the spaces we never collect as immune.
     65   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
     66     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
     67         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
     68       CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
     69     } else if (space->GetLiveBitmap() != nullptr) {
     70       if (space == to_space_ || collect_from_space_only_) {
     71         if (collect_from_space_only_) {
     72           // Bind the bitmaps of the main free list space and the non-moving space we are doing a
     73           // bump pointer space only collection.
     74           CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
     75                 space == GetHeap()->GetNonMovingSpace());
     76         }
     77         CHECK(space->IsContinuousMemMapAllocSpace());
     78         space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
     79       }
     80     }
     81   }
     82   if (collect_from_space_only_) {
     83     // We won't collect the large object space if a bump pointer space only collection.
     84     is_large_object_space_immune_ = true;
     85   }
     86 }
     87 
     88 SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
     89     : GarbageCollector(heap,
     90                        name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
     91       to_space_(nullptr),
     92       from_space_(nullptr),
     93       generational_(generational),
     94       last_gc_to_space_end_(nullptr),
     95       bytes_promoted_(0),
     96       bytes_promoted_since_last_whole_heap_collection_(0),
     97       large_object_bytes_allocated_at_last_whole_heap_collection_(0),
     98       collect_from_space_only_(generational),
     99       collector_name_(name_),
    100       swap_semi_spaces_(true) {
    101 }
    102 
    103 void SemiSpace::RunPhases() {
    104   Thread* self = Thread::Current();
    105   InitializePhase();
    106   // Semi-space collector is special since it is sometimes called with the mutators suspended
    107   // during the zygote creation and collector transitions. If we already exclusively hold the
    108   // mutator lock, then we can't lock it again since it will cause a deadlock.
    109   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
    110     GetHeap()->PreGcVerificationPaused(this);
    111     GetHeap()->PrePauseRosAllocVerification(this);
    112     MarkingPhase();
    113     ReclaimPhase();
    114     GetHeap()->PostGcVerificationPaused(this);
    115   } else {
    116     Locks::mutator_lock_->AssertNotHeld(self);
    117     {
    118       ScopedPause pause(this);
    119       GetHeap()->PreGcVerificationPaused(this);
    120       GetHeap()->PrePauseRosAllocVerification(this);
    121       MarkingPhase();
    122     }
    123     {
    124       ReaderMutexLock mu(self, *Locks::mutator_lock_);
    125       ReclaimPhase();
    126     }
    127     GetHeap()->PostGcVerification(this);
    128   }
    129   FinishPhase();
    130 }
    131 
    132 void SemiSpace::InitializePhase() {
    133   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    134   mark_stack_ = heap_->GetMarkStack();
    135   DCHECK(mark_stack_ != nullptr);
    136   immune_region_.Reset();
    137   is_large_object_space_immune_ = false;
    138   saved_bytes_ = 0;
    139   bytes_moved_ = 0;
    140   objects_moved_ = 0;
    141   self_ = Thread::Current();
    142   CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
    143   // Set the initial bitmap.
    144   to_space_live_bitmap_ = to_space_->GetLiveBitmap();
    145   {
    146     // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
    147     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
    148     mark_bitmap_ = heap_->GetMarkBitmap();
    149   }
    150   if (generational_) {
    151     promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
    152   }
    153   fallback_space_ = GetHeap()->GetNonMovingSpace();
    154 }
    155 
    156 void SemiSpace::ProcessReferences(Thread* self) {
    157   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    158   GetHeap()->GetReferenceProcessor()->ProcessReferences(
    159       false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
    160       &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
    161 }
    162 
    163 void SemiSpace::MarkingPhase() {
    164   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    165   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
    166   if (kStoreStackTraces) {
    167     Locks::mutator_lock_->AssertExclusiveHeld(self_);
    168     // Store the stack traces into the runtime fault string in case we Get a heap corruption
    169     // related crash later.
    170     ThreadState old_state = self_->SetStateUnsafe(kRunnable);
    171     std::ostringstream oss;
    172     Runtime* runtime = Runtime::Current();
    173     runtime->GetThreadList()->DumpForSigQuit(oss);
    174     runtime->GetThreadList()->DumpNativeStacks(oss);
    175     runtime->SetFaultMessage(oss.str());
    176     CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
    177   }
    178   // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
    179   // to prevent fragmentation.
    180   RevokeAllThreadLocalBuffers();
    181   if (generational_) {
    182     if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
    183         GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
    184         GetCurrentIteration()->GetClearSoftReferences()) {
    185       // If an explicit, native allocation-triggered, or last attempt
    186       // collection, collect the whole heap.
    187       collect_from_space_only_ = false;
    188     }
    189     if (!collect_from_space_only_) {
    190       VLOG(heap) << "Whole heap collection";
    191       name_ = collector_name_ + " whole";
    192     } else {
    193       VLOG(heap) << "Bump pointer space only collection";
    194       name_ = collector_name_ + " bps";
    195     }
    196   }
    197 
    198   if (!collect_from_space_only_) {
    199     // If non-generational, always clear soft references.
    200     // If generational, clear soft references if a whole heap collection.
    201     GetCurrentIteration()->SetClearSoftReferences(true);
    202   }
    203   Locks::mutator_lock_->AssertExclusiveHeld(self_);
    204   if (generational_) {
    205     // If last_gc_to_space_end_ is out of the bounds of the from-space
    206     // (the to-space from last GC), then point it to the beginning of
    207     // the from-space. For example, the very first GC or the
    208     // pre-zygote compaction.
    209     if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
    210       last_gc_to_space_end_ = from_space_->Begin();
    211     }
    212     // Reset this before the marking starts below.
    213     bytes_promoted_ = 0;
    214   }
    215   // Assume the cleared space is already empty.
    216   BindBitmaps();
    217   // Process dirty cards and add dirty cards to mod-union tables.
    218   heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_);
    219   // Clear the whole card table since we can not Get any additional dirty cards during the
    220   // paused GC. This saves memory but only works for pause the world collectors.
    221   t.NewTiming("ClearCardTable");
    222   heap_->GetCardTable()->ClearCardTable();
    223   // Need to do this before the checkpoint since we don't want any threads to add references to
    224   // the live stack during the recursive mark.
    225   if (kUseThreadLocalAllocationStack) {
    226     TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings());
    227     heap_->RevokeAllThreadLocalAllocationStacks(self_);
    228   }
    229   heap_->SwapStacks(self_);
    230   {
    231     WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
    232     MarkRoots();
    233     // Recursively mark remaining objects.
    234     MarkReachableObjects();
    235   }
    236   ProcessReferences(self_);
    237   {
    238     ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
    239     SweepSystemWeaks();
    240   }
    241   // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
    242   // before they are properly counted.
    243   RevokeAllThreadLocalBuffers();
    244   // Record freed memory.
    245   const int64_t from_bytes = from_space_->GetBytesAllocated();
    246   const int64_t to_bytes = bytes_moved_;
    247   const uint64_t from_objects = from_space_->GetObjectsAllocated();
    248   const uint64_t to_objects = objects_moved_;
    249   CHECK_LE(to_objects, from_objects);
    250   // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
    251   // space.
    252   RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
    253   // Clear and protect the from space.
    254   from_space_->Clear();
    255   VLOG(heap) << "Protecting from_space_: " << *from_space_;
    256   from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ);
    257   heap_->PreSweepingGcVerification(this);
    258   if (swap_semi_spaces_) {
    259     heap_->SwapSemiSpaces();
    260   }
    261 }
    262 
    263 class SemiSpaceScanObjectVisitor {
    264  public:
    265   explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
    266   void operator()(Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
    267                                                               Locks::heap_bitmap_lock_) {
    268     DCHECK(obj != nullptr);
    269     semi_space_->ScanObject(obj);
    270   }
    271  private:
    272   SemiSpace* const semi_space_;
    273 };
    274 
    275 // Used to verify that there's no references to the from-space.
    276 class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
    277  public:
    278   explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) :
    279       from_space_(from_space) {}
    280 
    281   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
    282       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
    283     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
    284     if (from_space_->HasAddress(ref)) {
    285       Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
    286       LOG(FATAL) << ref << " found in from space";
    287     }
    288   }
    289  private:
    290   space::ContinuousMemMapAllocSpace* from_space_;
    291 };
    292 
    293 void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
    294   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
    295   SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_);
    296   obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor());
    297 }
    298 
    299 class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor {
    300  public:
    301   explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
    302   void operator()(Object* obj) const
    303       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
    304     DCHECK(obj != nullptr);
    305     semi_space_->VerifyNoFromSpaceReferences(obj);
    306   }
    307  private:
    308   SemiSpace* const semi_space_;
    309 };
    310 
    311 void SemiSpace::MarkReachableObjects() {
    312   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    313   {
    314     TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
    315     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
    316     heap_->MarkAllocStackAsLive(live_stack);
    317     live_stack->Reset();
    318   }
    319   for (auto& space : heap_->GetContinuousSpaces()) {
    320     // If the space is immune then we need to mark the references to other spaces.
    321     accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
    322     if (table != nullptr) {
    323       // TODO: Improve naming.
    324       TimingLogger::ScopedTiming t2(
    325           space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
    326                                    "UpdateAndMarkImageModUnionTable",
    327                                    GetTimings());
    328       table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
    329       DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
    330     } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) {
    331       // If the space has no mod union table (the non-moving space and main spaces when the bump
    332       // pointer space only collection is enabled,) then we need to scan its live bitmap or dirty
    333       // cards as roots (including the objects on the live stack which have just marked in the live
    334       // bitmap above in MarkAllocStackAsLive().)
    335       DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
    336           << "Space " << space->GetName() << " "
    337           << "generational_=" << generational_ << " "
    338           << "collect_from_space_only_=" << collect_from_space_only_;
    339       accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
    340       CHECK_EQ(rem_set != nullptr, kUseRememberedSet);
    341       if (rem_set != nullptr) {
    342         TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
    343         rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
    344                                          from_space_, this);
    345         if (kIsDebugBuild) {
    346           // Verify that there are no from-space references that
    347           // remain in the space, that is, the remembered set (and the
    348           // card table) didn't miss any from-space references in the
    349           // space.
    350           accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
    351           SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this);
    352           live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
    353                                         reinterpret_cast<uintptr_t>(space->End()),
    354                                         visitor);
    355         }
    356       } else {
    357         TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
    358         accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
    359         SemiSpaceScanObjectVisitor visitor(this);
    360         live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
    361                                       reinterpret_cast<uintptr_t>(space->End()),
    362                                       visitor);
    363       }
    364     }
    365   }
    366 
    367   CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
    368   if (is_large_object_space_immune_) {
    369     TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
    370     DCHECK(collect_from_space_only_);
    371     // Delay copying the live set to the marked set until here from
    372     // BindBitmaps() as the large objects on the allocation stack may
    373     // be newly added to the live set above in MarkAllocStackAsLive().
    374     GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
    375 
    376     // When the large object space is immune, we need to scan the
    377     // large object space as roots as they contain references to their
    378     // classes (primitive array classes) that could move though they
    379     // don't contain any other references.
    380     space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
    381     accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
    382     SemiSpaceScanObjectVisitor visitor(this);
    383     large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
    384                                         reinterpret_cast<uintptr_t>(large_object_space->End()),
    385                                         visitor);
    386   }
    387   // Recursively process the mark stack.
    388   ProcessMarkStack();
    389 }
    390 
    391 void SemiSpace::ReclaimPhase() {
    392   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    393   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
    394   // Reclaim unmarked objects.
    395   Sweep(false);
    396   // Swap the live and mark bitmaps for each space which we modified space. This is an
    397   // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
    398   // bitmaps.
    399   SwapBitmaps();
    400   // Unbind the live and mark bitmaps.
    401   GetHeap()->UnBindBitmaps();
    402   if (saved_bytes_ > 0) {
    403     VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
    404   }
    405   if (generational_) {
    406     // Record the end (top) of the to space so we can distinguish
    407     // between objects that were allocated since the last GC and the
    408     // older objects.
    409     last_gc_to_space_end_ = to_space_->End();
    410   }
    411 }
    412 
    413 void SemiSpace::ResizeMarkStack(size_t new_size) {
    414   std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
    415   CHECK_LE(mark_stack_->Size(), new_size);
    416   mark_stack_->Resize(new_size);
    417   for (const auto& obj : temp) {
    418     mark_stack_->PushBack(obj);
    419   }
    420 }
    421 
    422 inline void SemiSpace::MarkStackPush(Object* obj) {
    423   if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
    424     ResizeMarkStack(mark_stack_->Capacity() * 2);
    425   }
    426   // The object must be pushed on to the mark stack.
    427   mark_stack_->PushBack(obj);
    428 }
    429 
    430 static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
    431   if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
    432     // We will dirty the current page and somewhere in the middle of the next page. This means
    433     // that the next object copied will also dirty that page.
    434     // TODO: Worth considering the last object copied? We may end up dirtying one page which is
    435     // not necessary per GC.
    436     memcpy(dest, src, size);
    437     return 0;
    438   }
    439   size_t saved_bytes = 0;
    440   byte* byte_dest = reinterpret_cast<byte*>(dest);
    441   if (kIsDebugBuild) {
    442     for (size_t i = 0; i < size; ++i) {
    443       CHECK_EQ(byte_dest[i], 0U);
    444     }
    445   }
    446   // Process the start of the page. The page must already be dirty, don't bother with checking.
    447   const byte* byte_src = reinterpret_cast<const byte*>(src);
    448   const byte* limit = byte_src + size;
    449   size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
    450   // Copy the bytes until the start of the next page.
    451   memcpy(dest, src, page_remain);
    452   byte_src += page_remain;
    453   byte_dest += page_remain;
    454   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
    455   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
    456   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
    457   while (byte_src + kPageSize < limit) {
    458     bool all_zero = true;
    459     uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
    460     const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
    461     for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
    462       // Assumes the destination of the copy is all zeros.
    463       if (word_src[i] != 0) {
    464         all_zero = false;
    465         word_dest[i] = word_src[i];
    466       }
    467     }
    468     if (all_zero) {
    469       // Avoided copying into the page since it was all zeros.
    470       saved_bytes += kPageSize;
    471     }
    472     byte_src += kPageSize;
    473     byte_dest += kPageSize;
    474   }
    475   // Handle the part of the page at the end.
    476   memcpy(byte_dest, byte_src, limit - byte_src);
    477   return saved_bytes;
    478 }
    479 
    480 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
    481   const size_t object_size = obj->SizeOf();
    482   size_t bytes_allocated;
    483   mirror::Object* forward_address = nullptr;
    484   if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
    485     // If it's allocated before the last GC (older), move
    486     // (pseudo-promote) it to the main free list space (as sort
    487     // of an old generation.)
    488     forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
    489                                                            nullptr);
    490     if (UNLIKELY(forward_address == nullptr)) {
    491       // If out of space, fall back to the to-space.
    492       forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
    493       // No logic for marking the bitmap, so it must be null.
    494       DCHECK(to_space_live_bitmap_ == nullptr);
    495     } else {
    496       bytes_promoted_ += bytes_allocated;
    497       // Dirty the card at the destionation as it may contain
    498       // references (including the class pointer) to the bump pointer
    499       // space.
    500       GetHeap()->WriteBarrierEveryFieldOf(forward_address);
    501       // Handle the bitmaps marking.
    502       accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
    503       DCHECK(live_bitmap != nullptr);
    504       accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
    505       DCHECK(mark_bitmap != nullptr);
    506       DCHECK(!live_bitmap->Test(forward_address));
    507       if (collect_from_space_only_) {
    508         // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
    509         DCHECK_EQ(live_bitmap, mark_bitmap);
    510 
    511         // If a bump pointer space only collection, delay the live
    512         // bitmap marking of the promoted object until it's popped off
    513         // the mark stack (ProcessMarkStack()). The rationale: we may
    514         // be in the middle of scanning the objects in the promo
    515         // destination space for
    516         // non-moving-space-to-bump-pointer-space references by
    517         // iterating over the marked bits of the live bitmap
    518         // (MarkReachableObjects()). If we don't delay it (and instead
    519         // mark the promoted object here), the above promo destination
    520         // space scan could encounter the just-promoted object and
    521         // forward the references in the promoted object's fields even
    522         // through it is pushed onto the mark stack. If this happens,
    523         // the promoted object would be in an inconsistent state, that
    524         // is, it's on the mark stack (gray) but its fields are
    525         // already forwarded (black), which would cause a
    526         // DCHECK(!to_space_->HasAddress(obj)) failure below.
    527       } else {
    528         // Mark forward_address on the live bit map.
    529         live_bitmap->Set(forward_address);
    530         // Mark forward_address on the mark bit map.
    531         DCHECK(!mark_bitmap->Test(forward_address));
    532         mark_bitmap->Set(forward_address);
    533       }
    534     }
    535   } else {
    536     // If it's allocated after the last GC (younger), copy it to the to-space.
    537     forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
    538     if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
    539       to_space_live_bitmap_->Set(forward_address);
    540     }
    541   }
    542   // If it's still null, attempt to use the fallback space.
    543   if (UNLIKELY(forward_address == nullptr)) {
    544     forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
    545                                                          nullptr);
    546     CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
    547     accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
    548     if (bitmap != nullptr) {
    549       bitmap->Set(forward_address);
    550     }
    551   }
    552   ++objects_moved_;
    553   bytes_moved_ += bytes_allocated;
    554   // Copy over the object and add it to the mark stack since we still need to update its
    555   // references.
    556   saved_bytes_ +=
    557       CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
    558   if (kUseBakerOrBrooksReadBarrier) {
    559     obj->AssertReadBarrierPointer();
    560     if (kUseBrooksReadBarrier) {
    561       DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
    562       forward_address->SetReadBarrierPointer(forward_address);
    563     }
    564     forward_address->AssertReadBarrierPointer();
    565   }
    566   DCHECK(to_space_->HasAddress(forward_address) ||
    567          fallback_space_->HasAddress(forward_address) ||
    568          (generational_ && promo_dest_space_->HasAddress(forward_address)))
    569       << forward_address << "\n" << GetHeap()->DumpSpaces();
    570   return forward_address;
    571 }
    572 
    573 void SemiSpace::ProcessMarkStackCallback(void* arg) {
    574   reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack();
    575 }
    576 
    577 mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) {
    578   auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
    579   reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
    580   return ref.AsMirrorPtr();
    581 }
    582 
    583 void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
    584                                           void* arg) {
    585   reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr);
    586 }
    587 
    588 void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
    589                                                void* arg) {
    590   reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref);
    591 }
    592 
    593 void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
    594                                  RootType /*root_type*/) {
    595   auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
    596   reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
    597   if (*root != ref.AsMirrorPtr()) {
    598     *root = ref.AsMirrorPtr();
    599   }
    600 }
    601 
    602 // Marks all objects in the root set.
    603 void SemiSpace::MarkRoots() {
    604   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    605   Runtime::Current()->VisitRoots(MarkRootCallback, this);
    606 }
    607 
    608 bool SemiSpace::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object,
    609                                             void* arg) {
    610   mirror::Object* obj = object->AsMirrorPtr();
    611   mirror::Object* new_obj =
    612       reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(obj);
    613   if (new_obj == nullptr) {
    614     return false;
    615   }
    616   if (new_obj != obj) {
    617     // Write barrier is not necessary since it still points to the same object, just at a different
    618     // address.
    619     object->Assign(new_obj);
    620   }
    621   return true;
    622 }
    623 
    624 mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
    625   return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
    626 }
    627 
    628 void SemiSpace::SweepSystemWeaks() {
    629   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    630   Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
    631 }
    632 
    633 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
    634   return space != from_space_ && space != to_space_;
    635 }
    636 
    637 void SemiSpace::Sweep(bool swap_bitmaps) {
    638   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    639   DCHECK(mark_stack_->IsEmpty());
    640   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
    641     if (space->IsContinuousMemMapAllocSpace()) {
    642       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
    643       if (!ShouldSweepSpace(alloc_space)) {
    644         continue;
    645       }
    646       TimingLogger::ScopedTiming split(
    647           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
    648       RecordFree(alloc_space->Sweep(swap_bitmaps));
    649     }
    650   }
    651   if (!is_large_object_space_immune_) {
    652     SweepLargeObjects(swap_bitmaps);
    653   }
    654 }
    655 
    656 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
    657   DCHECK(!is_large_object_space_immune_);
    658   TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
    659   RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
    660 }
    661 
    662 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
    663 // marked, put it on the appropriate list in the heap for later processing.
    664 void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
    665   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
    666                                                          &HeapReferenceMarkedCallback, this);
    667 }
    668 
    669 class SemiSpaceMarkObjectVisitor {
    670  public:
    671   explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) {
    672   }
    673 
    674   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
    675       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
    676     // Object was already verified when we scanned it.
    677     collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
    678   }
    679 
    680   void operator()(mirror::Class* klass, mirror::Reference* ref) const
    681       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
    682       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
    683     collector_->DelayReferenceReferent(klass, ref);
    684   }
    685 
    686  private:
    687   SemiSpace* const collector_;
    688 };
    689 
    690 // Visit all of the references of an object and update.
    691 void SemiSpace::ScanObject(Object* obj) {
    692   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
    693   SemiSpaceMarkObjectVisitor visitor(this);
    694   obj->VisitReferences<kMovingClasses>(visitor, visitor);
    695 }
    696 
    697 // Scan anything that's on the mark stack.
    698 void SemiSpace::ProcessMarkStack() {
    699   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    700   accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
    701   if (collect_from_space_only_) {
    702     // If a bump pointer space only collection (and the promotion is
    703     // enabled,) we delay the live-bitmap marking of promoted objects
    704     // from MarkObject() until this function.
    705     live_bitmap = promo_dest_space_->GetLiveBitmap();
    706     DCHECK(live_bitmap != nullptr);
    707     accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
    708     DCHECK(mark_bitmap != nullptr);
    709     DCHECK_EQ(live_bitmap, mark_bitmap);
    710   }
    711   while (!mark_stack_->IsEmpty()) {
    712     Object* obj = mark_stack_->PopBack();
    713     if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
    714       // obj has just been promoted. Mark the live bitmap for it,
    715       // which is delayed from MarkObject().
    716       DCHECK(!live_bitmap->Test(obj));
    717       live_bitmap->Set(obj);
    718     }
    719     ScanObject(obj);
    720   }
    721 }
    722 
    723 inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
    724     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
    725   // All immune objects are assumed marked.
    726   if (from_space_->HasAddress(obj)) {
    727     // Returns either the forwarding address or nullptr.
    728     return GetForwardingAddressInFromSpace(obj);
    729   } else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
    730              to_space_->HasAddress(obj)) {
    731     return obj;  // Already forwarded, must be marked.
    732   }
    733   return mark_bitmap_->Test(obj) ? obj : nullptr;
    734 }
    735 
    736 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
    737   DCHECK(to_space != nullptr);
    738   to_space_ = to_space;
    739 }
    740 
    741 void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
    742   DCHECK(from_space != nullptr);
    743   from_space_ = from_space;
    744 }
    745 
    746 void SemiSpace::FinishPhase() {
    747   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    748   // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
    749   // further action is done by the heap.
    750   to_space_ = nullptr;
    751   from_space_ = nullptr;
    752   CHECK(mark_stack_->IsEmpty());
    753   mark_stack_->Reset();
    754   if (generational_) {
    755     // Decide whether to do a whole heap collection or a bump pointer
    756     // only space collection at the next collection by updating
    757     // collect_from_space_only_.
    758     if (collect_from_space_only_) {
    759       // Disable collect_from_space_only_ if the bytes promoted since the
    760       // last whole heap collection or the large object bytes
    761       // allocated exceeds a threshold.
    762       bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
    763       bool bytes_promoted_threshold_exceeded =
    764           bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
    765       uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
    766       uint64_t last_los_bytes_allocated =
    767           large_object_bytes_allocated_at_last_whole_heap_collection_;
    768       bool large_object_bytes_threshold_exceeded =
    769           current_los_bytes_allocated >=
    770           last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
    771       if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
    772         collect_from_space_only_ = false;
    773       }
    774     } else {
    775       // Reset the counters.
    776       bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
    777       large_object_bytes_allocated_at_last_whole_heap_collection_ =
    778           GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
    779       collect_from_space_only_ = true;
    780     }
    781   }
    782   // Clear all of the spaces' mark bitmaps.
    783   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
    784   heap_->ClearMarkedObjects();
    785 }
    786 
    787 void SemiSpace::RevokeAllThreadLocalBuffers() {
    788   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    789   GetHeap()->RevokeAllThreadLocalBuffers();
    790 }
    791 
    792 }  // namespace collector
    793 }  // namespace gc
    794 }  // namespace art
    795