Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "semi_space-inl.h"
     18 
     19 #include <climits>
     20 #include <functional>
     21 #include <numeric>
     22 #include <sstream>
     23 #include <vector>
     24 
     25 #include "base/logging.h"
     26 #include "base/macros.h"
     27 #include "base/mutex-inl.h"
     28 #include "base/timing_logger.h"
     29 #include "gc/accounting/heap_bitmap-inl.h"
     30 #include "gc/accounting/mod_union_table.h"
     31 #include "gc/accounting/remembered_set.h"
     32 #include "gc/accounting/space_bitmap-inl.h"
     33 #include "gc/heap.h"
     34 #include "gc/reference_processor.h"
     35 #include "gc/space/bump_pointer_space.h"
     36 #include "gc/space/bump_pointer_space-inl.h"
     37 #include "gc/space/image_space.h"
     38 #include "gc/space/large_object_space.h"
     39 #include "gc/space/space-inl.h"
     40 #include "indirect_reference_table.h"
     41 #include "intern_table.h"
     42 #include "jni_internal.h"
     43 #include "mark_sweep-inl.h"
     44 #include "monitor.h"
     45 #include "mirror/reference-inl.h"
     46 #include "mirror/object-inl.h"
     47 #include "mirror/object-refvisitor-inl.h"
     48 #include "runtime.h"
     49 #include "thread-inl.h"
     50 #include "thread_list.h"
     51 
     52 using ::art::mirror::Object;
     53 
     54 namespace art {
     55 namespace gc {
     56 namespace collector {
     57 
     58 static constexpr bool kProtectFromSpace = true;
     59 static constexpr bool kStoreStackTraces = false;
     60 static constexpr size_t kBytesPromotedThreshold = 4 * MB;
     61 static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
     62 
     63 void SemiSpace::BindBitmaps() {
     64   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
     65   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
     66   // Mark all of the spaces we never collect as immune.
     67   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
     68     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
     69         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
     70       immune_spaces_.AddSpace(space);
     71     } else if (space->GetLiveBitmap() != nullptr) {
     72       // TODO: We can probably also add this space to the immune region.
     73       if (space == to_space_ || collect_from_space_only_) {
     74         if (collect_from_space_only_) {
     75           // Bind the bitmaps of the main free list space and the non-moving space we are doing a
     76           // bump pointer space only collection.
     77           CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
     78                 space == GetHeap()->GetNonMovingSpace());
     79         }
     80         CHECK(space->IsContinuousMemMapAllocSpace());
     81         space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
     82       }
     83     }
     84   }
     85   if (collect_from_space_only_) {
     86     // We won't collect the large object space if a bump pointer space only collection.
     87     is_large_object_space_immune_ = true;
     88   }
     89 }
     90 
     91 SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
     92     : GarbageCollector(heap,
     93                        name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"),
     94       mark_stack_(nullptr),
     95       is_large_object_space_immune_(false),
     96       to_space_(nullptr),
     97       to_space_live_bitmap_(nullptr),
     98       from_space_(nullptr),
     99       mark_bitmap_(nullptr),
    100       self_(nullptr),
    101       generational_(generational),
    102       last_gc_to_space_end_(nullptr),
    103       bytes_promoted_(0),
    104       bytes_promoted_since_last_whole_heap_collection_(0),
    105       large_object_bytes_allocated_at_last_whole_heap_collection_(0),
    106       collect_from_space_only_(generational),
    107       promo_dest_space_(nullptr),
    108       fallback_space_(nullptr),
    109       bytes_moved_(0U),
    110       objects_moved_(0U),
    111       saved_bytes_(0U),
    112       collector_name_(name_),
    113       swap_semi_spaces_(true) {
    114 }
    115 
    116 void SemiSpace::RunPhases() {
    117   Thread* self = Thread::Current();
    118   InitializePhase();
    119   // Semi-space collector is special since it is sometimes called with the mutators suspended
    120   // during the zygote creation and collector transitions. If we already exclusively hold the
    121   // mutator lock, then we can't lock it again since it will cause a deadlock.
    122   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
    123     GetHeap()->PreGcVerificationPaused(this);
    124     GetHeap()->PrePauseRosAllocVerification(this);
    125     MarkingPhase();
    126     ReclaimPhase();
    127     GetHeap()->PostGcVerificationPaused(this);
    128   } else {
    129     Locks::mutator_lock_->AssertNotHeld(self);
    130     {
    131       ScopedPause pause(this);
    132       GetHeap()->PreGcVerificationPaused(this);
    133       GetHeap()->PrePauseRosAllocVerification(this);
    134       MarkingPhase();
    135     }
    136     {
    137       ReaderMutexLock mu(self, *Locks::mutator_lock_);
    138       ReclaimPhase();
    139     }
    140     GetHeap()->PostGcVerification(this);
    141   }
    142   FinishPhase();
    143 }
    144 
    145 void SemiSpace::InitializePhase() {
    146   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    147   mark_stack_ = heap_->GetMarkStack();
    148   DCHECK(mark_stack_ != nullptr);
    149   immune_spaces_.Reset();
    150   is_large_object_space_immune_ = false;
    151   saved_bytes_ = 0;
    152   bytes_moved_ = 0;
    153   objects_moved_ = 0;
    154   self_ = Thread::Current();
    155   CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
    156   // Set the initial bitmap.
    157   to_space_live_bitmap_ = to_space_->GetLiveBitmap();
    158   {
    159     // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
    160     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
    161     mark_bitmap_ = heap_->GetMarkBitmap();
    162   }
    163   if (generational_) {
    164     promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
    165   }
    166   fallback_space_ = GetHeap()->GetNonMovingSpace();
    167 }
    168 
    169 void SemiSpace::ProcessReferences(Thread* self) {
    170   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
    171   GetHeap()->GetReferenceProcessor()->ProcessReferences(
    172       false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
    173 }
    174 
    175 void SemiSpace::MarkingPhase() {
    176   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    177   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
    178   if (kStoreStackTraces) {
    179     Locks::mutator_lock_->AssertExclusiveHeld(self_);
    180     // Store the stack traces into the runtime fault string in case we Get a heap corruption
    181     // related crash later.
    182     ThreadState old_state = self_->SetStateUnsafe(kRunnable);
    183     std::ostringstream oss;
    184     Runtime* runtime = Runtime::Current();
    185     runtime->GetThreadList()->DumpForSigQuit(oss);
    186     runtime->GetThreadList()->DumpNativeStacks(oss);
    187     runtime->SetFaultMessage(oss.str());
    188     CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
    189   }
    190   // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
    191   // to prevent fragmentation.
    192   RevokeAllThreadLocalBuffers();
    193   if (generational_) {
    194     if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
    195         GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
    196         GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAllocBlocking ||
    197         GetCurrentIteration()->GetClearSoftReferences()) {
    198       // If an explicit, native allocation-triggered, or last attempt
    199       // collection, collect the whole heap.
    200       collect_from_space_only_ = false;
    201     }
    202     if (!collect_from_space_only_) {
    203       VLOG(heap) << "Whole heap collection";
    204       name_ = collector_name_ + " whole";
    205     } else {
    206       VLOG(heap) << "Bump pointer space only collection";
    207       name_ = collector_name_ + " bps";
    208     }
    209   }
    210 
    211   if (!collect_from_space_only_) {
    212     // If non-generational, always clear soft references.
    213     // If generational, clear soft references if a whole heap collection.
    214     GetCurrentIteration()->SetClearSoftReferences(true);
    215   }
    216   Locks::mutator_lock_->AssertExclusiveHeld(self_);
    217   if (generational_) {
    218     // If last_gc_to_space_end_ is out of the bounds of the from-space
    219     // (the to-space from last GC), then point it to the beginning of
    220     // the from-space. For example, the very first GC or the
    221     // pre-zygote compaction.
    222     if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
    223       last_gc_to_space_end_ = from_space_->Begin();
    224     }
    225     // Reset this before the marking starts below.
    226     bytes_promoted_ = 0;
    227   }
    228   // Assume the cleared space is already empty.
    229   BindBitmaps();
    230   // Process dirty cards and add dirty cards to mod-union tables.
    231   heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_, false, true);
    232   // Clear the whole card table since we cannot get any additional dirty cards during the
    233   // paused GC. This saves memory but only works for pause the world collectors.
    234   t.NewTiming("ClearCardTable");
    235   heap_->GetCardTable()->ClearCardTable();
    236   // Need to do this before the checkpoint since we don't want any threads to add references to
    237   // the live stack during the recursive mark.
    238   if (kUseThreadLocalAllocationStack) {
    239     TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings());
    240     heap_->RevokeAllThreadLocalAllocationStacks(self_);
    241   }
    242   heap_->SwapStacks();
    243   {
    244     WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
    245     MarkRoots();
    246     // Recursively mark remaining objects.
    247     MarkReachableObjects();
    248   }
    249   ProcessReferences(self_);
    250   {
    251     ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
    252     SweepSystemWeaks();
    253   }
    254   Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
    255   // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
    256   // before they are properly counted.
    257   RevokeAllThreadLocalBuffers();
    258   GetHeap()->RecordFreeRevoke();  // this is for the non-moving rosalloc space used by GSS.
    259   // Record freed memory.
    260   const int64_t from_bytes = from_space_->GetBytesAllocated();
    261   const int64_t to_bytes = bytes_moved_;
    262   const uint64_t from_objects = from_space_->GetObjectsAllocated();
    263   const uint64_t to_objects = objects_moved_;
    264   CHECK_LE(to_objects, from_objects);
    265   // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
    266   // space.
    267   RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
    268   // Clear and protect the from space.
    269   from_space_->Clear();
    270   // b/31172841. Temporarily disable the from-space protection with host debug build
    271   // due to some protection issue in the build server.
    272   if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
    273     if (!from_space_->IsRosAllocSpace()) {
    274       // Protect with PROT_NONE.
    275       VLOG(heap) << "Protecting from_space_ : " << *from_space_;
    276       from_space_->GetMemMap()->Protect(PROT_NONE);
    277     } else {
    278       // If RosAllocSpace, we'll leave it as PROT_READ here so the
    279       // rosaloc verification can read the metadata magic number and
    280       // protect it with PROT_NONE later in FinishPhase().
    281       VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
    282       from_space_->GetMemMap()->Protect(PROT_READ);
    283     }
    284   }
    285   heap_->PreSweepingGcVerification(this);
    286   if (swap_semi_spaces_) {
    287     heap_->SwapSemiSpaces();
    288   }
    289 }
    290 
    291 // Used to verify that there's no references to the from-space.
    292 class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
    293  public:
    294   explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space)
    295       : from_space_(from_space) {}
    296 
    297   void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
    298       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
    299     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
    300     if (from_space_->HasAddress(ref)) {
    301       LOG(FATAL) << ref << " found in from space";
    302     }
    303   }
    304 
    305   // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
    306   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
    307       NO_THREAD_SAFETY_ANALYSIS {
    308     if (!root->IsNull()) {
    309       VisitRoot(root);
    310     }
    311   }
    312 
    313   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
    314       NO_THREAD_SAFETY_ANALYSIS {
    315     if (kIsDebugBuild) {
    316       Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
    317       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
    318     }
    319     CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
    320   }
    321 
    322  private:
    323   space::ContinuousMemMapAllocSpace* const from_space_;
    324 };
    325 
    326 void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
    327   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
    328   VerifyNoFromSpaceReferencesVisitor visitor(from_space_);
    329   obj->VisitReferences(visitor, VoidFunctor());
    330 }
    331 
    332 void SemiSpace::MarkReachableObjects() {
    333   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    334   {
    335     TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
    336     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
    337     heap_->MarkAllocStackAsLive(live_stack);
    338     live_stack->Reset();
    339   }
    340   for (auto& space : heap_->GetContinuousSpaces()) {
    341     // If the space is immune then we need to mark the references to other spaces.
    342     accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
    343     if (table != nullptr) {
    344       // TODO: Improve naming.
    345       TimingLogger::ScopedTiming t2(
    346           space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
    347                                    "UpdateAndMarkImageModUnionTable",
    348                                    GetTimings());
    349       table->UpdateAndMarkReferences(this);
    350       DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
    351     } else if ((space->IsImageSpace() || collect_from_space_only_) &&
    352                space->GetLiveBitmap() != nullptr) {
    353       // If the space has no mod union table (the non-moving space, app image spaces, main spaces
    354       // when the bump pointer space only collection is enabled,) then we need to scan its live
    355       // bitmap or dirty cards as roots (including the objects on the live stack which have just
    356       // marked in the live bitmap above in MarkAllocStackAsLive().)
    357       accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
    358       if (!space->IsImageSpace()) {
    359         DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
    360             << "Space " << space->GetName() << " "
    361             << "generational_=" << generational_ << " "
    362             << "collect_from_space_only_=" << collect_from_space_only_;
    363         // App images currently do not have remembered sets.
    364         DCHECK_EQ(kUseRememberedSet, rem_set != nullptr);
    365       } else {
    366         DCHECK(rem_set == nullptr);
    367       }
    368       if (rem_set != nullptr) {
    369         TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
    370         rem_set->UpdateAndMarkReferences(from_space_, this);
    371       } else {
    372         TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
    373         accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
    374         live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
    375                                       reinterpret_cast<uintptr_t>(space->End()),
    376                                       [this](mirror::Object* obj)
    377            REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
    378           ScanObject(obj);
    379         });
    380       }
    381       if (kIsDebugBuild) {
    382         // Verify that there are no from-space references that
    383         // remain in the space, that is, the remembered set (and the
    384         // card table) didn't miss any from-space references in the
    385         // space.
    386         accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
    387         live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
    388                                       reinterpret_cast<uintptr_t>(space->End()),
    389                                       [this](Object* obj)
    390             REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
    391           DCHECK(obj != nullptr);
    392           VerifyNoFromSpaceReferences(obj);
    393         });
    394       }
    395     }
    396   }
    397 
    398   CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
    399   space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
    400   if (is_large_object_space_immune_ && los != nullptr) {
    401     TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings());
    402     DCHECK(collect_from_space_only_);
    403     // Delay copying the live set to the marked set until here from
    404     // BindBitmaps() as the large objects on the allocation stack may
    405     // be newly added to the live set above in MarkAllocStackAsLive().
    406     los->CopyLiveToMarked();
    407 
    408     // When the large object space is immune, we need to scan the
    409     // large object space as roots as they contain references to their
    410     // classes (primitive array classes) that could move though they
    411     // don't contain any other references.
    412     accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
    413     std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
    414     large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
    415                                         reinterpret_cast<uintptr_t>(range.second),
    416                                         [this](mirror::Object* obj)
    417         REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
    418       ScanObject(obj);
    419     });
    420   }
    421   // Recursively process the mark stack.
    422   ProcessMarkStack();
    423 }
    424 
    425 void SemiSpace::ReclaimPhase() {
    426   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    427   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
    428   // Reclaim unmarked objects.
    429   Sweep(false);
    430   // Swap the live and mark bitmaps for each space which we modified space. This is an
    431   // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
    432   // bitmaps.
    433   SwapBitmaps();
    434   // Unbind the live and mark bitmaps.
    435   GetHeap()->UnBindBitmaps();
    436   if (saved_bytes_ > 0) {
    437     VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
    438   }
    439   if (generational_) {
    440     // Record the end (top) of the to space so we can distinguish
    441     // between objects that were allocated since the last GC and the
    442     // older objects.
    443     last_gc_to_space_end_ = to_space_->End();
    444   }
    445 }
    446 
    447 void SemiSpace::ResizeMarkStack(size_t new_size) {
    448   std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
    449   CHECK_LE(mark_stack_->Size(), new_size);
    450   mark_stack_->Resize(new_size);
    451   for (auto& obj : temp) {
    452     mark_stack_->PushBack(obj.AsMirrorPtr());
    453   }
    454 }
    455 
    456 inline void SemiSpace::MarkStackPush(Object* obj) {
    457   if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
    458     ResizeMarkStack(mark_stack_->Capacity() * 2);
    459   }
    460   // The object must be pushed on to the mark stack.
    461   mark_stack_->PushBack(obj);
    462 }
    463 
    464 static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
    465   if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
    466     // We will dirty the current page and somewhere in the middle of the next page. This means
    467     // that the next object copied will also dirty that page.
    468     // TODO: Worth considering the last object copied? We may end up dirtying one page which is
    469     // not necessary per GC.
    470     memcpy(dest, src, size);
    471     return 0;
    472   }
    473   size_t saved_bytes = 0;
    474   uint8_t* byte_dest = reinterpret_cast<uint8_t*>(dest);
    475   if (kIsDebugBuild) {
    476     for (size_t i = 0; i < size; ++i) {
    477       CHECK_EQ(byte_dest[i], 0U);
    478     }
    479   }
    480   // Process the start of the page. The page must already be dirty, don't bother with checking.
    481   const uint8_t* byte_src = reinterpret_cast<const uint8_t*>(src);
    482   const uint8_t* limit = byte_src + size;
    483   size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
    484   // Copy the bytes until the start of the next page.
    485   memcpy(dest, src, page_remain);
    486   byte_src += page_remain;
    487   byte_dest += page_remain;
    488   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
    489   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
    490   DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
    491   while (byte_src + kPageSize < limit) {
    492     bool all_zero = true;
    493     uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
    494     const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
    495     for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
    496       // Assumes the destination of the copy is all zeros.
    497       if (word_src[i] != 0) {
    498         all_zero = false;
    499         word_dest[i] = word_src[i];
    500       }
    501     }
    502     if (all_zero) {
    503       // Avoided copying into the page since it was all zeros.
    504       saved_bytes += kPageSize;
    505     }
    506     byte_src += kPageSize;
    507     byte_dest += kPageSize;
    508   }
    509   // Handle the part of the page at the end.
    510   memcpy(byte_dest, byte_src, limit - byte_src);
    511   return saved_bytes;
    512 }
    513 
    514 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
    515   const size_t object_size = obj->SizeOf();
    516   size_t bytes_allocated, dummy;
    517   mirror::Object* forward_address = nullptr;
    518   if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
    519     // If it's allocated before the last GC (older), move
    520     // (pseudo-promote) it to the main free list space (as sort
    521     // of an old generation.)
    522     forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
    523                                                            nullptr, &dummy);
    524     if (UNLIKELY(forward_address == nullptr)) {
    525       // If out of space, fall back to the to-space.
    526       forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
    527                                                      &dummy);
    528       // No logic for marking the bitmap, so it must be null.
    529       DCHECK(to_space_live_bitmap_ == nullptr);
    530     } else {
    531       bytes_promoted_ += bytes_allocated;
    532       // Dirty the card at the destionation as it may contain
    533       // references (including the class pointer) to the bump pointer
    534       // space.
    535       GetHeap()->WriteBarrierEveryFieldOf(forward_address);
    536       // Handle the bitmaps marking.
    537       accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
    538       DCHECK(live_bitmap != nullptr);
    539       accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
    540       DCHECK(mark_bitmap != nullptr);
    541       DCHECK(!live_bitmap->Test(forward_address));
    542       if (collect_from_space_only_) {
    543         // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
    544         DCHECK_EQ(live_bitmap, mark_bitmap);
    545 
    546         // If a bump pointer space only collection, delay the live
    547         // bitmap marking of the promoted object until it's popped off
    548         // the mark stack (ProcessMarkStack()). The rationale: we may
    549         // be in the middle of scanning the objects in the promo
    550         // destination space for
    551         // non-moving-space-to-bump-pointer-space references by
    552         // iterating over the marked bits of the live bitmap
    553         // (MarkReachableObjects()). If we don't delay it (and instead
    554         // mark the promoted object here), the above promo destination
    555         // space scan could encounter the just-promoted object and
    556         // forward the references in the promoted object's fields even
    557         // through it is pushed onto the mark stack. If this happens,
    558         // the promoted object would be in an inconsistent state, that
    559         // is, it's on the mark stack (gray) but its fields are
    560         // already forwarded (black), which would cause a
    561         // DCHECK(!to_space_->HasAddress(obj)) failure below.
    562       } else {
    563         // Mark forward_address on the live bit map.
    564         live_bitmap->Set(forward_address);
    565         // Mark forward_address on the mark bit map.
    566         DCHECK(!mark_bitmap->Test(forward_address));
    567         mark_bitmap->Set(forward_address);
    568       }
    569     }
    570   } else {
    571     // If it's allocated after the last GC (younger), copy it to the to-space.
    572     forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
    573                                                    &dummy);
    574     if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
    575       to_space_live_bitmap_->Set(forward_address);
    576     }
    577   }
    578   // If it's still null, attempt to use the fallback space.
    579   if (UNLIKELY(forward_address == nullptr)) {
    580     forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
    581                                                          nullptr, &dummy);
    582     CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
    583     accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
    584     if (bitmap != nullptr) {
    585       bitmap->Set(forward_address);
    586     }
    587   }
    588   ++objects_moved_;
    589   bytes_moved_ += bytes_allocated;
    590   // Copy over the object and add it to the mark stack since we still need to update its
    591   // references.
    592   saved_bytes_ +=
    593       CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
    594   if (kUseBakerReadBarrier) {
    595     obj->AssertReadBarrierState();
    596     forward_address->AssertReadBarrierState();
    597   }
    598   DCHECK(to_space_->HasAddress(forward_address) ||
    599          fallback_space_->HasAddress(forward_address) ||
    600          (generational_ && promo_dest_space_->HasAddress(forward_address)))
    601       << forward_address << "\n" << GetHeap()->DumpSpaces();
    602   return forward_address;
    603 }
    604 
    605 mirror::Object* SemiSpace::MarkObject(mirror::Object* root) {
    606   auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
    607   MarkObjectIfNotInToSpace(&ref);
    608   return ref.AsMirrorPtr();
    609 }
    610 
    611 void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
    612                                   bool do_atomic_update ATTRIBUTE_UNUSED) {
    613   MarkObject(obj_ptr);
    614 }
    615 
    616 void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
    617                            const RootInfo& info ATTRIBUTE_UNUSED) {
    618   for (size_t i = 0; i < count; ++i) {
    619     auto* root = roots[i];
    620     auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
    621     // The root can be in the to-space since we may visit the declaring class of an ArtMethod
    622     // multiple times if it is on the call stack.
    623     MarkObjectIfNotInToSpace(&ref);
    624     if (*root != ref.AsMirrorPtr()) {
    625       *root = ref.AsMirrorPtr();
    626     }
    627   }
    628 }
    629 
    630 void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
    631                            const RootInfo& info ATTRIBUTE_UNUSED) {
    632   for (size_t i = 0; i < count; ++i) {
    633     MarkObjectIfNotInToSpace(roots[i]);
    634   }
    635 }
    636 
    637 // Marks all objects in the root set.
    638 void SemiSpace::MarkRoots() {
    639   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    640   Runtime::Current()->VisitRoots(this);
    641 }
    642 
    643 void SemiSpace::SweepSystemWeaks() {
    644   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    645   Runtime::Current()->SweepSystemWeaks(this);
    646 }
    647 
    648 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
    649   return space != from_space_ && space != to_space_;
    650 }
    651 
    652 void SemiSpace::Sweep(bool swap_bitmaps) {
    653   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    654   DCHECK(mark_stack_->IsEmpty());
    655   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
    656     if (space->IsContinuousMemMapAllocSpace()) {
    657       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
    658       if (!ShouldSweepSpace(alloc_space)) {
    659         continue;
    660       }
    661       TimingLogger::ScopedTiming split(
    662           alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
    663       RecordFree(alloc_space->Sweep(swap_bitmaps));
    664     }
    665   }
    666   if (!is_large_object_space_immune_) {
    667     SweepLargeObjects(swap_bitmaps);
    668   }
    669 }
    670 
    671 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
    672   DCHECK(!is_large_object_space_immune_);
    673   space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
    674   if (los != nullptr) {
    675     TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
    676     RecordFreeLOS(los->Sweep(swap_bitmaps));
    677   }
    678 }
    679 
    680 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
    681 // marked, put it on the appropriate list in the heap for later processing.
    682 void SemiSpace::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
    683                                        ObjPtr<mirror::Reference> reference) {
    684   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
    685 }
    686 
    687 class SemiSpace::MarkObjectVisitor {
    688  public:
    689   explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
    690 
    691   void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
    692       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
    693     // Object was already verified when we scanned it.
    694     collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
    695   }
    696 
    697   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
    698       REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
    699     collector_->DelayReferenceReferent(klass, ref);
    700   }
    701 
    702   // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
    703   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
    704       NO_THREAD_SAFETY_ANALYSIS {
    705     if (!root->IsNull()) {
    706       VisitRoot(root);
    707     }
    708   }
    709 
    710   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
    711       NO_THREAD_SAFETY_ANALYSIS {
    712     if (kIsDebugBuild) {
    713       Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
    714       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
    715     }
    716     // We may visit the same root multiple times, so avoid marking things in the to-space since
    717     // this is not handled by the GC.
    718     collector_->MarkObjectIfNotInToSpace(root);
    719   }
    720 
    721  private:
    722   SemiSpace* const collector_;
    723 };
    724 
    725 // Visit all of the references of an object and update.
    726 void SemiSpace::ScanObject(Object* obj) {
    727   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
    728   MarkObjectVisitor visitor(this);
    729   // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
    730   obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
    731       visitor, visitor);
    732 }
    733 
    734 // Scan anything that's on the mark stack.
    735 void SemiSpace::ProcessMarkStack() {
    736   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    737   accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
    738   if (collect_from_space_only_) {
    739     // If a bump pointer space only collection (and the promotion is
    740     // enabled,) we delay the live-bitmap marking of promoted objects
    741     // from MarkObject() until this function.
    742     live_bitmap = promo_dest_space_->GetLiveBitmap();
    743     DCHECK(live_bitmap != nullptr);
    744     accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
    745     DCHECK(mark_bitmap != nullptr);
    746     DCHECK_EQ(live_bitmap, mark_bitmap);
    747   }
    748   while (!mark_stack_->IsEmpty()) {
    749     Object* obj = mark_stack_->PopBack();
    750     if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
    751       // obj has just been promoted. Mark the live bitmap for it,
    752       // which is delayed from MarkObject().
    753       DCHECK(!live_bitmap->Test(obj));
    754       live_bitmap->Set(obj);
    755     }
    756     ScanObject(obj);
    757   }
    758 }
    759 
    760 mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
    761   // All immune objects are assumed marked.
    762   if (from_space_->HasAddress(obj)) {
    763     // Returns either the forwarding address or null.
    764     return GetForwardingAddressInFromSpace(obj);
    765   } else if (collect_from_space_only_ ||
    766              immune_spaces_.IsInImmuneRegion(obj) ||
    767              to_space_->HasAddress(obj)) {
    768     return obj;  // Already forwarded, must be marked.
    769   }
    770   return mark_bitmap_->Test(obj) ? obj : nullptr;
    771 }
    772 
    773 bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
    774                                             // SemiSpace does the GC in a pause. No CAS needed.
    775                                             bool do_atomic_update ATTRIBUTE_UNUSED) {
    776   mirror::Object* obj = object->AsMirrorPtr();
    777   if (obj == nullptr) {
    778     return true;
    779   }
    780   mirror::Object* new_obj = IsMarked(obj);
    781   if (new_obj == nullptr) {
    782     return false;
    783   }
    784   if (new_obj != obj) {
    785     // Write barrier is not necessary since it still points to the same object, just at a different
    786     // address.
    787     object->Assign(new_obj);
    788   }
    789   return true;
    790 }
    791 
    792 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
    793   DCHECK(to_space != nullptr);
    794   to_space_ = to_space;
    795 }
    796 
    797 void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
    798   DCHECK(from_space != nullptr);
    799   from_space_ = from_space;
    800 }
    801 
    802 void SemiSpace::FinishPhase() {
    803   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    804   // b/31172841. Temporarily disable the from-space protection with host debug build
    805   // due to some protection issue in the build server.
    806   if (kProtectFromSpace && !(kIsDebugBuild && !kIsTargetBuild)) {
    807     if (from_space_->IsRosAllocSpace()) {
    808       VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
    809       from_space_->GetMemMap()->Protect(PROT_NONE);
    810     }
    811   }
    812   // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
    813   // further action is done by the heap.
    814   to_space_ = nullptr;
    815   from_space_ = nullptr;
    816   CHECK(mark_stack_->IsEmpty());
    817   mark_stack_->Reset();
    818   space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
    819   if (generational_) {
    820     // Decide whether to do a whole heap collection or a bump pointer
    821     // only space collection at the next collection by updating
    822     // collect_from_space_only_.
    823     if (collect_from_space_only_) {
    824       // Disable collect_from_space_only_ if the bytes promoted since the
    825       // last whole heap collection or the large object bytes
    826       // allocated exceeds a threshold.
    827       bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
    828       bool bytes_promoted_threshold_exceeded =
    829           bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
    830       uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
    831       uint64_t last_los_bytes_allocated =
    832           large_object_bytes_allocated_at_last_whole_heap_collection_;
    833       bool large_object_bytes_threshold_exceeded =
    834           current_los_bytes_allocated >=
    835           last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
    836       if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
    837         collect_from_space_only_ = false;
    838       }
    839     } else {
    840       // Reset the counters.
    841       bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
    842       large_object_bytes_allocated_at_last_whole_heap_collection_ =
    843           los != nullptr ? los->GetBytesAllocated() : 0U;
    844       collect_from_space_only_ = true;
    845     }
    846   }
    847   // Clear all of the spaces' mark bitmaps.
    848   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
    849   heap_->ClearMarkedObjects();
    850 }
    851 
    852 void SemiSpace::RevokeAllThreadLocalBuffers() {
    853   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    854   GetHeap()->RevokeAllThreadLocalBuffers();
    855 }
    856 
    857 }  // namespace collector
    858 }  // namespace gc
    859 }  // namespace art
    860