Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "allocation_record.h"
     18 
     19 #include "art_method-inl.h"
     20 #include "base/enums.h"
     21 #include "base/logging.h"  // For VLOG
     22 #include "base/stl_util.h"
     23 #include "obj_ptr-inl.h"
     24 #include "object_callbacks.h"
     25 #include "stack.h"
     26 
     27 #include <android-base/properties.h>
     28 
     29 namespace art {
     30 namespace gc {
     31 
     32 int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
     33   DCHECK(method_ != nullptr);
     34   return method_->GetLineNumFromDexPC(dex_pc_);
     35 }
     36 
     37 const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
     38   // klass_ could contain null only if we implement class unloading.
     39   return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
     40 }
     41 
     42 void AllocRecordObjectMap::SetMaxStackDepth(size_t max_stack_depth) {
     43   // Log fatal since this should already be checked when calling VMDebug.setAllocTrackerStackDepth.
     44   CHECK_LE(max_stack_depth, kMaxSupportedStackDepth)
     45       << "Allocation record max stack depth is too large";
     46   max_stack_depth_ = max_stack_depth;
     47 }
     48 
     49 AllocRecordObjectMap::~AllocRecordObjectMap() {
     50   Clear();
     51 }
     52 
     53 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
     54   CHECK_LE(recent_record_max_, alloc_record_max_);
     55   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
     56   size_t count = recent_record_max_;
     57   // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
     58   // klass_ fields as strong roots.
     59   for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
     60     AllocRecord& record = it->second;
     61     if (count > 0) {
     62       buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
     63       --count;
     64     }
     65     // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
     66     // class unloading.
     67     for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
     68       const AllocRecordStackTraceElement& element = record.StackElement(i);
     69       DCHECK(element.GetMethod() != nullptr);
     70       element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
     71     }
     72   }
     73 }
     74 
     75 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
     76     REQUIRES_SHARED(Locks::mutator_lock_)
     77     REQUIRES(Locks::alloc_tracker_lock_) {
     78   GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
     79   // This does not need a read barrier because this is called by GC.
     80   mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
     81   if (old_object != nullptr) {
     82     // The class object can become null if we implement class unloading.
     83     // In that case we might still want to keep the class name string (not implemented).
     84     mirror::Object* new_object = visitor->IsMarked(old_object);
     85     DCHECK(new_object != nullptr);
     86     if (UNLIKELY(old_object != new_object)) {
     87       klass = GcRoot<mirror::Class>(new_object->AsClass());
     88     }
     89   }
     90 }
     91 
     92 void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
     93   VLOG(heap) << "Start SweepAllocationRecords()";
     94   size_t count_deleted = 0, count_moved = 0, count = 0;
     95   // Only the first (size - recent_record_max_) number of records can be deleted.
     96   const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
     97   for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
     98     ++count;
     99     // This does not need a read barrier because this is called by GC.
    100     mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
    101     AllocRecord& record = it->second;
    102     mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
    103     if (new_object == nullptr) {
    104       if (count > delete_bound) {
    105         it->first = GcRoot<mirror::Object>(nullptr);
    106         SweepClassObject(&record, visitor);
    107         ++it;
    108       } else {
    109         it = entries_.erase(it);
    110         ++count_deleted;
    111       }
    112     } else {
    113       if (old_object != new_object) {
    114         it->first = GcRoot<mirror::Object>(new_object);
    115         ++count_moved;
    116       }
    117       SweepClassObject(&record, visitor);
    118       ++it;
    119     }
    120   }
    121   VLOG(heap) << "Deleted " << count_deleted << " allocation records";
    122   VLOG(heap) << "Updated " << count_moved << " allocation records";
    123 }
    124 
    125 void AllocRecordObjectMap::AllowNewAllocationRecords() {
    126   CHECK(!kUseReadBarrier);
    127   allow_new_record_ = true;
    128   new_record_condition_.Broadcast(Thread::Current());
    129 }
    130 
    131 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
    132   CHECK(!kUseReadBarrier);
    133   allow_new_record_ = false;
    134 }
    135 
    136 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
    137   new_record_condition_.Broadcast(Thread::Current());
    138 }
    139 
    140 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
    141   Thread* self = Thread::Current();
    142   Heap* heap = Runtime::Current()->GetHeap();
    143   if (enable) {
    144     {
    145       MutexLock mu(self, *Locks::alloc_tracker_lock_);
    146       if (heap->IsAllocTrackingEnabled()) {
    147         return;  // Already enabled, bail.
    148       }
    149       AllocRecordObjectMap* records = heap->GetAllocationRecords();
    150       if (records == nullptr) {
    151         records = new AllocRecordObjectMap;
    152         heap->SetAllocationRecords(records);
    153       }
    154       CHECK(records != nullptr);
    155       records->SetMaxStackDepth(heap->GetAllocTrackerStackDepth());
    156       std::string self_name;
    157       self->GetThreadName(self_name);
    158       if (self_name == "JDWP") {
    159         records->alloc_ddm_thread_id_ = self->GetTid();
    160       }
    161       size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
    162                   sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
    163       LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
    164                 << records->max_stack_depth_ << " frames, taking up to "
    165                 << PrettySize(sz * records->alloc_record_max_) << ")";
    166     }
    167     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
    168     {
    169       MutexLock mu(self, *Locks::alloc_tracker_lock_);
    170       heap->SetAllocTrackingEnabled(true);
    171     }
    172   } else {
    173     // Delete outside of the critical section to avoid possible lock violations like the runtime
    174     // shutdown lock.
    175     {
    176       MutexLock mu(self, *Locks::alloc_tracker_lock_);
    177       if (!heap->IsAllocTrackingEnabled()) {
    178         return;  // Already disabled, bail.
    179       }
    180       heap->SetAllocTrackingEnabled(false);
    181       LOG(INFO) << "Disabling alloc tracker";
    182       AllocRecordObjectMap* records = heap->GetAllocationRecords();
    183       records->Clear();
    184     }
    185     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
    186     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
    187   }
    188 }
    189 
    190 void AllocRecordObjectMap::RecordAllocation(Thread* self,
    191                                             ObjPtr<mirror::Object>* obj,
    192                                             size_t byte_count) {
    193   // Get stack trace outside of lock in case there are allocations during the stack walk.
    194   // b/27858645.
    195   AllocRecordStackTrace trace;
    196   {
    197     StackHandleScope<1> hs(self);
    198     auto obj_wrapper = hs.NewHandleWrapper(obj);
    199 
    200     StackVisitor::WalkStack(
    201         [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
    202           if (trace.GetDepth() >= max_stack_depth_) {
    203             return false;
    204           }
    205           ArtMethod* m = stack_visitor->GetMethod();
    206           // m may be null if we have inlined methods of unresolved classes. b/27858645
    207           if (m != nullptr && !m->IsRuntimeMethod()) {
    208             m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
    209             trace.AddStackElement(AllocRecordStackTraceElement(m, stack_visitor->GetDexPc()));
    210           }
    211           return true;
    212         },
    213         self,
    214         /* context= */ nullptr,
    215         art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
    216   }
    217 
    218   MutexLock mu(self, *Locks::alloc_tracker_lock_);
    219   Heap* const heap = Runtime::Current()->GetHeap();
    220   if (!heap->IsAllocTrackingEnabled()) {
    221     // In the process of shutting down recording, bail.
    222     return;
    223   }
    224 
    225   // Do not record for DDM thread.
    226   if (alloc_ddm_thread_id_ == self->GetTid()) {
    227     return;
    228   }
    229 
    230   // Wait for GC's sweeping to complete and allow new records.
    231   while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
    232                   (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
    233     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
    234     // presence of threads blocking for weak ref access.
    235     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
    236     new_record_condition_.WaitHoldingLocks(self);
    237   }
    238 
    239   if (!heap->IsAllocTrackingEnabled()) {
    240     // Return if the allocation tracking has been disabled while waiting for system weak access
    241     // above.
    242     return;
    243   }
    244 
    245   DCHECK_LE(Size(), alloc_record_max_);
    246 
    247   // Erase extra unfilled elements.
    248   trace.SetTid(self->GetTid());
    249 
    250   // Add the record.
    251   Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
    252   DCHECK_LE(Size(), alloc_record_max_);
    253 }
    254 
    255 void AllocRecordObjectMap::Clear() {
    256   entries_.clear();
    257 }
    258 
    259 AllocRecordObjectMap::AllocRecordObjectMap()
    260     : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
    261 
    262 }  // namespace gc
    263 }  // namespace art
    264