Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "allocation_record.h"
     18 
     19 #include "art_method-inl.h"
     20 #include "base/enums.h"
     21 #include "base/stl_util.h"
     22 #include "obj_ptr-inl.h"
     23 #include "object_callbacks.h"
     24 #include "stack.h"
     25 
     26 #ifdef ART_TARGET_ANDROID
     27 #include "cutils/properties.h"
     28 #endif
     29 
     30 namespace art {
     31 namespace gc {
     32 
     33 int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
     34   DCHECK(method_ != nullptr);
     35   return method_->GetLineNumFromDexPC(dex_pc_);
     36 }
     37 
     38 const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
     39   // klass_ could contain null only if we implement class unloading.
     40   return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
     41 }
     42 
     43 void AllocRecordObjectMap::SetProperties() {
     44 #ifdef ART_TARGET_ANDROID
     45   // Check whether there's a system property overriding the max number of records.
     46   const char* propertyName = "dalvik.vm.allocTrackerMax";
     47   char allocMaxString[PROPERTY_VALUE_MAX];
     48   if (property_get(propertyName, allocMaxString, "") > 0) {
     49     char* end;
     50     size_t value = strtoul(allocMaxString, &end, 10);
     51     if (*end != '\0') {
     52       LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocMaxString
     53                  << "' --- invalid";
     54     } else {
     55       alloc_record_max_ = value;
     56       if (recent_record_max_ > value) {
     57         recent_record_max_ = value;
     58       }
     59     }
     60   }
     61   // Check whether there's a system property overriding the number of recent records.
     62   propertyName = "dalvik.vm.recentAllocMax";
     63   char recentAllocMaxString[PROPERTY_VALUE_MAX];
     64   if (property_get(propertyName, recentAllocMaxString, "") > 0) {
     65     char* end;
     66     size_t value = strtoul(recentAllocMaxString, &end, 10);
     67     if (*end != '\0') {
     68       LOG(ERROR) << "Ignoring  " << propertyName << " '" << recentAllocMaxString
     69                  << "' --- invalid";
     70     } else if (value > alloc_record_max_) {
     71       LOG(ERROR) << "Ignoring  " << propertyName << " '" << recentAllocMaxString
     72                  << "' --- should be less than " << alloc_record_max_;
     73     } else {
     74       recent_record_max_ = value;
     75     }
     76   }
     77   // Check whether there's a system property overriding the max depth of stack trace.
     78   propertyName = "debug.allocTracker.stackDepth";
     79   char stackDepthString[PROPERTY_VALUE_MAX];
     80   if (property_get(propertyName, stackDepthString, "") > 0) {
     81     char* end;
     82     size_t value = strtoul(stackDepthString, &end, 10);
     83     if (*end != '\0') {
     84       LOG(ERROR) << "Ignoring  " << propertyName << " '" << stackDepthString
     85                  << "' --- invalid";
     86     } else if (value > kMaxSupportedStackDepth) {
     87       LOG(WARNING) << propertyName << " '" << stackDepthString << "' too large, using "
     88                    << kMaxSupportedStackDepth;
     89       max_stack_depth_ = kMaxSupportedStackDepth;
     90     } else {
     91       max_stack_depth_ = value;
     92     }
     93   }
     94 #endif  // ART_TARGET_ANDROID
     95 }
     96 
     97 AllocRecordObjectMap::~AllocRecordObjectMap() {
     98   Clear();
     99 }
    100 
    101 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
    102   CHECK_LE(recent_record_max_, alloc_record_max_);
    103   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
    104   size_t count = recent_record_max_;
    105   // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
    106   // klass_ fields as strong roots.
    107   for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
    108     AllocRecord& record = it->second;
    109     if (count > 0) {
    110       buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
    111       --count;
    112     }
    113     // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
    114     // class unloading.
    115     for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
    116       const AllocRecordStackTraceElement& element = record.StackElement(i);
    117       DCHECK(element.GetMethod() != nullptr);
    118       element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
    119     }
    120   }
    121 }
    122 
    123 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
    124     REQUIRES_SHARED(Locks::mutator_lock_)
    125     REQUIRES(Locks::alloc_tracker_lock_) {
    126   GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
    127   // This does not need a read barrier because this is called by GC.
    128   mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
    129   if (old_object != nullptr) {
    130     // The class object can become null if we implement class unloading.
    131     // In that case we might still want to keep the class name string (not implemented).
    132     mirror::Object* new_object = visitor->IsMarked(old_object);
    133     DCHECK(new_object != nullptr);
    134     if (UNLIKELY(old_object != new_object)) {
    135       klass = GcRoot<mirror::Class>(new_object->AsClass());
    136     }
    137   }
    138 }
    139 
    140 void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
    141   VLOG(heap) << "Start SweepAllocationRecords()";
    142   size_t count_deleted = 0, count_moved = 0, count = 0;
    143   // Only the first (size - recent_record_max_) number of records can be deleted.
    144   const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
    145   for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
    146     ++count;
    147     // This does not need a read barrier because this is called by GC.
    148     mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
    149     AllocRecord& record = it->second;
    150     mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
    151     if (new_object == nullptr) {
    152       if (count > delete_bound) {
    153         it->first = GcRoot<mirror::Object>(nullptr);
    154         SweepClassObject(&record, visitor);
    155         ++it;
    156       } else {
    157         it = entries_.erase(it);
    158         ++count_deleted;
    159       }
    160     } else {
    161       if (old_object != new_object) {
    162         it->first = GcRoot<mirror::Object>(new_object);
    163         ++count_moved;
    164       }
    165       SweepClassObject(&record, visitor);
    166       ++it;
    167     }
    168   }
    169   VLOG(heap) << "Deleted " << count_deleted << " allocation records";
    170   VLOG(heap) << "Updated " << count_moved << " allocation records";
    171 }
    172 
    173 void AllocRecordObjectMap::AllowNewAllocationRecords() {
    174   CHECK(!kUseReadBarrier);
    175   allow_new_record_ = true;
    176   new_record_condition_.Broadcast(Thread::Current());
    177 }
    178 
    179 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
    180   CHECK(!kUseReadBarrier);
    181   allow_new_record_ = false;
    182 }
    183 
    184 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
    185   new_record_condition_.Broadcast(Thread::Current());
    186 }
    187 
    188 class AllocRecordStackVisitor : public StackVisitor {
    189  public:
    190   AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
    191       REQUIRES_SHARED(Locks::mutator_lock_)
    192       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
    193         max_depth_(max_depth),
    194         trace_(trace_out) {}
    195 
    196   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
    197   // annotalysis.
    198   bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
    199     if (trace_->GetDepth() >= max_depth_) {
    200       return false;
    201     }
    202     ArtMethod* m = GetMethod();
    203     // m may be null if we have inlined methods of unresolved classes. b/27858645
    204     if (m != nullptr && !m->IsRuntimeMethod()) {
    205       m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
    206       trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
    207     }
    208     return true;
    209   }
    210 
    211  private:
    212   const size_t max_depth_;
    213   AllocRecordStackTrace* const trace_;
    214 };
    215 
    216 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
    217   Thread* self = Thread::Current();
    218   Heap* heap = Runtime::Current()->GetHeap();
    219   if (enable) {
    220     {
    221       MutexLock mu(self, *Locks::alloc_tracker_lock_);
    222       if (heap->IsAllocTrackingEnabled()) {
    223         return;  // Already enabled, bail.
    224       }
    225       AllocRecordObjectMap* records = heap->GetAllocationRecords();
    226       if (records == nullptr) {
    227         records = new AllocRecordObjectMap;
    228         heap->SetAllocationRecords(records);
    229       }
    230       CHECK(records != nullptr);
    231       records->SetProperties();
    232       std::string self_name;
    233       self->GetThreadName(self_name);
    234       if (self_name == "JDWP") {
    235         records->alloc_ddm_thread_id_ = self->GetTid();
    236       }
    237       size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
    238                   sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
    239       LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
    240                 << records->max_stack_depth_ << " frames, taking up to "
    241                 << PrettySize(sz * records->alloc_record_max_) << ")";
    242     }
    243     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
    244     {
    245       MutexLock mu(self, *Locks::alloc_tracker_lock_);
    246       heap->SetAllocTrackingEnabled(true);
    247     }
    248   } else {
    249     // Delete outside of the critical section to avoid possible lock violations like the runtime
    250     // shutdown lock.
    251     {
    252       MutexLock mu(self, *Locks::alloc_tracker_lock_);
    253       if (!heap->IsAllocTrackingEnabled()) {
    254         return;  // Already disabled, bail.
    255       }
    256       heap->SetAllocTrackingEnabled(false);
    257       LOG(INFO) << "Disabling alloc tracker";
    258       AllocRecordObjectMap* records = heap->GetAllocationRecords();
    259       records->Clear();
    260     }
    261     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
    262     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
    263   }
    264 }
    265 
    266 void AllocRecordObjectMap::RecordAllocation(Thread* self,
    267                                             ObjPtr<mirror::Object>* obj,
    268                                             size_t byte_count) {
    269   // Get stack trace outside of lock in case there are allocations during the stack walk.
    270   // b/27858645.
    271   AllocRecordStackTrace trace;
    272   AllocRecordStackVisitor visitor(self, max_stack_depth_, /*out*/ &trace);
    273   {
    274     StackHandleScope<1> hs(self);
    275     auto obj_wrapper = hs.NewHandleWrapper(obj);
    276     visitor.WalkStack();
    277   }
    278 
    279   MutexLock mu(self, *Locks::alloc_tracker_lock_);
    280   Heap* const heap = Runtime::Current()->GetHeap();
    281   if (!heap->IsAllocTrackingEnabled()) {
    282     // In the process of shutting down recording, bail.
    283     return;
    284   }
    285 
    286   // Do not record for DDM thread.
    287   if (alloc_ddm_thread_id_ == self->GetTid()) {
    288     return;
    289   }
    290 
    291   // Wait for GC's sweeping to complete and allow new records
    292   while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
    293                   (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
    294     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
    295     // presence of threads blocking for weak ref access.
    296     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
    297     new_record_condition_.WaitHoldingLocks(self);
    298   }
    299 
    300   if (!heap->IsAllocTrackingEnabled()) {
    301     // Return if the allocation tracking has been disabled while waiting for system weak access
    302     // above.
    303     return;
    304   }
    305 
    306   DCHECK_LE(Size(), alloc_record_max_);
    307 
    308   // Erase extra unfilled elements.
    309   trace.SetTid(self->GetTid());
    310 
    311   // Add the record.
    312   Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
    313   DCHECK_LE(Size(), alloc_record_max_);
    314 }
    315 
    316 void AllocRecordObjectMap::Clear() {
    317   entries_.clear();
    318 }
    319 
    320 AllocRecordObjectMap::AllocRecordObjectMap()
    321     : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
    322 
    323 }  // namespace gc
    324 }  // namespace art
    325