Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <stdio.h>
     18 
     19 #include "garbage_collector.h"
     20 
     21 #include "android-base/stringprintf.h"
     22 
     23 #include "base/dumpable.h"
     24 #include "base/histogram-inl.h"
     25 #include "base/logging.h"  // For VLOG_IS_ON.
     26 #include "base/mutex-inl.h"
     27 #include "base/systrace.h"
     28 #include "base/time_utils.h"
     29 #include "base/utils.h"
     30 #include "gc/accounting/heap_bitmap.h"
     31 #include "gc/gc_pause_listener.h"
     32 #include "gc/heap.h"
     33 #include "gc/space/large_object_space.h"
     34 #include "gc/space/space-inl.h"
     35 #include "runtime.h"
     36 #include "thread-current-inl.h"
     37 #include "thread_list.h"
     38 
     39 namespace art {
     40 namespace gc {
     41 namespace collector {
     42 
     43 Iteration::Iteration()
     44     : duration_ns_(0), timings_("GC iteration timing logger", true, VLOG_IS_ON(heap)) {
     45   Reset(kGcCauseBackground, false);  // Reset to some place holder values.
     46 }
     47 
     48 void Iteration::Reset(GcCause gc_cause, bool clear_soft_references) {
     49   timings_.Reset();
     50   pause_times_.clear();
     51   duration_ns_ = 0;
     52   clear_soft_references_ = clear_soft_references;
     53   gc_cause_ = gc_cause;
     54   freed_ = ObjectBytePair();
     55   freed_los_ = ObjectBytePair();
     56   freed_bytes_revoke_ = 0;
     57 }
     58 
     59 uint64_t Iteration::GetEstimatedThroughput() const {
     60   // Add 1ms to prevent possible division by 0.
     61   return (static_cast<uint64_t>(freed_.bytes) * 1000) / (NsToMs(GetDurationNs()) + 1);
     62 }
     63 
     64 GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
     65     : heap_(heap),
     66       name_(name),
     67       pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
     68       cumulative_timings_(name),
     69       pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true),
     70       is_transaction_active_(false) {
     71   ResetCumulativeStatistics();
     72 }
     73 
     74 void GarbageCollector::RegisterPause(uint64_t nano_length) {
     75   GetCurrentIteration()->pause_times_.push_back(nano_length);
     76 }
     77 
     78 void GarbageCollector::ResetCumulativeStatistics() {
     79   cumulative_timings_.Reset();
     80   total_time_ns_ = 0;
     81   total_freed_objects_ = 0;
     82   total_freed_bytes_ = 0;
     83   MutexLock mu(Thread::Current(), pause_histogram_lock_);
     84   pause_histogram_.Reset();
     85 }
     86 
     87 void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
     88   ScopedTrace trace(android::base::StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()));
     89   Thread* self = Thread::Current();
     90   uint64_t start_time = NanoTime();
     91   Iteration* current_iteration = GetCurrentIteration();
     92   current_iteration->Reset(gc_cause, clear_soft_references);
     93   // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
     94   // change in the middle of a GC.
     95   is_transaction_active_ = Runtime::Current()->IsActiveTransaction();
     96   RunPhases();  // Run all the GC phases.
     97   // Add the current timings to the cumulative timings.
     98   cumulative_timings_.AddLogger(*GetTimings());
     99   // Update cumulative statistics with how many bytes the GC iteration freed.
    100   total_freed_objects_ += current_iteration->GetFreedObjects() +
    101       current_iteration->GetFreedLargeObjects();
    102   total_freed_bytes_ += current_iteration->GetFreedBytes() +
    103       current_iteration->GetFreedLargeObjectBytes();
    104   uint64_t end_time = NanoTime();
    105   current_iteration->SetDurationNs(end_time - start_time);
    106   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
    107     // The entire GC was paused, clear the fake pauses which might be in the pause times and add
    108     // the whole GC duration.
    109     current_iteration->pause_times_.clear();
    110     RegisterPause(current_iteration->GetDurationNs());
    111   }
    112   total_time_ns_ += current_iteration->GetDurationNs();
    113   for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
    114     MutexLock mu(self, pause_histogram_lock_);
    115     pause_histogram_.AdjustAndAddValue(pause_time);
    116   }
    117   is_transaction_active_ = false;
    118 }
    119 
    120 void GarbageCollector::SwapBitmaps() {
    121   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
    122   // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
    123   // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
    124   // bits of dead objects in the live bitmap.
    125   const GcType gc_type = GetGcType();
    126   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
    127     // We never allocate into zygote spaces.
    128     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
    129         (gc_type == kGcTypeFull &&
    130          space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
    131       accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
    132       accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
    133       if (live_bitmap != nullptr && live_bitmap != mark_bitmap) {
    134         heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
    135         heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
    136         CHECK(space->IsContinuousMemMapAllocSpace());
    137         space->AsContinuousMemMapAllocSpace()->SwapBitmaps();
    138       }
    139     }
    140   }
    141   for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
    142     space::LargeObjectSpace* space = disc_space->AsLargeObjectSpace();
    143     accounting::LargeObjectBitmap* live_set = space->GetLiveBitmap();
    144     accounting::LargeObjectBitmap* mark_set = space->GetMarkBitmap();
    145     heap_->GetLiveBitmap()->ReplaceLargeObjectBitmap(live_set, mark_set);
    146     heap_->GetMarkBitmap()->ReplaceLargeObjectBitmap(mark_set, live_set);
    147     space->SwapBitmaps();
    148   }
    149 }
    150 
    151 uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
    152   // Add 1ms to prevent possible division by 0.
    153   return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
    154 }
    155 
    156 void GarbageCollector::ResetMeasurements() {
    157   {
    158     MutexLock mu(Thread::Current(), pause_histogram_lock_);
    159     pause_histogram_.Reset();
    160   }
    161   cumulative_timings_.Reset();
    162   total_time_ns_ = 0;
    163   total_freed_objects_ = 0;
    164   total_freed_bytes_ = 0;
    165 }
    166 
    167 GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector, bool with_reporting)
    168     : start_time_(NanoTime()), collector_(collector), with_reporting_(with_reporting) {
    169   Runtime* runtime = Runtime::Current();
    170   runtime->GetThreadList()->SuspendAll(__FUNCTION__);
    171   if (with_reporting) {
    172     GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
    173     if (pause_listener != nullptr) {
    174       pause_listener->StartPause();
    175     }
    176   }
    177 }
    178 
    179 GarbageCollector::ScopedPause::~ScopedPause() {
    180   collector_->RegisterPause(NanoTime() - start_time_);
    181   Runtime* runtime = Runtime::Current();
    182   if (with_reporting_) {
    183     GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
    184     if (pause_listener != nullptr) {
    185       pause_listener->EndPause();
    186     }
    187   }
    188   runtime->GetThreadList()->ResumeAll();
    189 }
    190 
    191 // Returns the current GC iteration and assocated info.
    192 Iteration* GarbageCollector::GetCurrentIteration() {
    193   return heap_->GetCurrentGcIteration();
    194 }
    195 const Iteration* GarbageCollector::GetCurrentIteration() const {
    196   return heap_->GetCurrentGcIteration();
    197 }
    198 
    199 void GarbageCollector::RecordFree(const ObjectBytePair& freed) {
    200   GetCurrentIteration()->freed_.Add(freed);
    201   heap_->RecordFree(freed.objects, freed.bytes);
    202 }
    203 void GarbageCollector::RecordFreeLOS(const ObjectBytePair& freed) {
    204   GetCurrentIteration()->freed_los_.Add(freed);
    205   heap_->RecordFree(freed.objects, freed.bytes);
    206 }
    207 
    208 uint64_t GarbageCollector::GetTotalPausedTimeNs() {
    209   MutexLock mu(Thread::Current(), pause_histogram_lock_);
    210   return pause_histogram_.AdjustedSum();
    211 }
    212 
    213 void GarbageCollector::DumpPerformanceInfo(std::ostream& os) {
    214   const CumulativeLogger& logger = GetCumulativeTimings();
    215   const size_t iterations = logger.GetIterations();
    216   if (iterations == 0) {
    217     return;
    218   }
    219   os << Dumpable<CumulativeLogger>(logger);
    220   const uint64_t total_ns = logger.GetTotalNs();
    221   double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
    222   const uint64_t freed_bytes = GetTotalFreedBytes();
    223   const uint64_t freed_objects = GetTotalFreedObjects();
    224   {
    225     MutexLock mu(Thread::Current(), pause_histogram_lock_);
    226     if (pause_histogram_.SampleSize() > 0) {
    227       Histogram<uint64_t>::CumulativeData cumulative_data;
    228       pause_histogram_.CreateHistogram(&cumulative_data);
    229       pause_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
    230     }
    231   }
    232   os << GetName() << " total time: " << PrettyDuration(total_ns)
    233      << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
    234      << GetName() << " freed: " << freed_objects
    235      << " objects with total size " << PrettySize(freed_bytes) << "\n"
    236      << GetName() << " throughput: " << freed_objects / seconds << "/s / "
    237      << PrettySize(freed_bytes / seconds) << "/s\n";
    238 }
    239 
    240 }  // namespace collector
    241 }  // namespace gc
    242 }  // namespace art
    243