Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_ALLOCATION_RECORD_H_
     18 #define ART_RUNTIME_GC_ALLOCATION_RECORD_H_
     19 
     20 #include <list>
     21 #include <memory>
     22 
     23 #include "base/mutex.h"
     24 #include "object_callbacks.h"
     25 #include "gc_root.h"
     26 
     27 namespace art {
     28 
     29 class ArtMethod;
     30 class Thread;
     31 
     32 namespace mirror {
     33   class Class;
     34   class Object;
     35 }
     36 
     37 namespace gc {
     38 
     39 class AllocRecordStackTraceElement {
     40  public:
     41   int32_t ComputeLineNumber() const SHARED_REQUIRES(Locks::mutator_lock_);
     42 
     43   AllocRecordStackTraceElement() = default;
     44   AllocRecordStackTraceElement(ArtMethod* method, uint32_t dex_pc)
     45       : method_(method),
     46         dex_pc_(dex_pc) {}
     47 
     48   ArtMethod* GetMethod() const {
     49     return method_;
     50   }
     51 
     52   void SetMethod(ArtMethod* m) {
     53     method_ = m;
     54   }
     55 
     56   uint32_t GetDexPc() const {
     57     return dex_pc_;
     58   }
     59 
     60   void SetDexPc(uint32_t pc) {
     61     dex_pc_ = pc;
     62   }
     63 
     64   bool operator==(const AllocRecordStackTraceElement& other) const {
     65     return method_ == other.method_ && dex_pc_ == other.dex_pc_;
     66   }
     67 
     68  private:
     69   ArtMethod* method_ = nullptr;
     70   uint32_t dex_pc_ = 0;
     71 };
     72 
     73 class AllocRecordStackTrace {
     74  public:
     75   static constexpr size_t kHashMultiplier = 17;
     76 
     77   AllocRecordStackTrace() = default;
     78 
     79   AllocRecordStackTrace(AllocRecordStackTrace&& r)
     80       : tid_(r.tid_),
     81         stack_(std::move(r.stack_)) {}
     82 
     83   AllocRecordStackTrace(const AllocRecordStackTrace& r)
     84       : tid_(r.tid_),
     85         stack_(r.stack_) {}
     86 
     87   pid_t GetTid() const {
     88     return tid_;
     89   }
     90 
     91   void SetTid(pid_t t) {
     92     tid_ = t;
     93   }
     94 
     95   size_t GetDepth() const {
     96     return stack_.size();
     97   }
     98 
     99   const AllocRecordStackTraceElement& GetStackElement(size_t index) const {
    100     DCHECK_LT(index, GetDepth());
    101     return stack_[index];
    102   }
    103 
    104   void AddStackElement(const AllocRecordStackTraceElement& element) {
    105     stack_.push_back(element);
    106   }
    107 
    108   void SetStackElementAt(size_t index, ArtMethod* m, uint32_t dex_pc) {
    109     DCHECK_LT(index, stack_.size());
    110     stack_[index].SetMethod(m);
    111     stack_[index].SetDexPc(dex_pc);
    112   }
    113 
    114   bool operator==(const AllocRecordStackTrace& other) const {
    115     if (this == &other) return true;
    116     return tid_ == other.tid_ && stack_ == other.stack_;
    117   }
    118 
    119  private:
    120   pid_t tid_ = 0;
    121   std::vector<AllocRecordStackTraceElement> stack_;
    122 };
    123 
    124 struct HashAllocRecordTypes {
    125   size_t operator()(const AllocRecordStackTraceElement& r) const {
    126     return std::hash<void*>()(reinterpret_cast<void*>(r.GetMethod())) *
    127         AllocRecordStackTrace::kHashMultiplier + std::hash<uint32_t>()(r.GetDexPc());
    128   }
    129 
    130   size_t operator()(const AllocRecordStackTrace& r) const {
    131     size_t depth = r.GetDepth();
    132     size_t result = r.GetTid() * AllocRecordStackTrace::kHashMultiplier + depth;
    133     for (size_t i = 0; i < depth; ++i) {
    134       result = result * AllocRecordStackTrace::kHashMultiplier + (*this)(r.GetStackElement(i));
    135     }
    136     return result;
    137   }
    138 };
    139 
    140 template <typename T> struct HashAllocRecordTypesPtr {
    141   size_t operator()(const T* r) const {
    142     if (r == nullptr) return 0;
    143     return HashAllocRecordTypes()(*r);
    144   }
    145 };
    146 
    147 template <typename T> struct EqAllocRecordTypesPtr {
    148   bool operator()(const T* r1, const T* r2) const {
    149     if (r1 == r2) return true;
    150     if (r1 == nullptr || r2 == nullptr) return false;
    151     return *r1 == *r2;
    152   }
    153 };
    154 
    155 class AllocRecord {
    156  public:
    157   // All instances of AllocRecord should be managed by an instance of AllocRecordObjectMap.
    158   AllocRecord(size_t count, mirror::Class* klass, AllocRecordStackTrace&& trace)
    159       : byte_count_(count), klass_(klass), trace_(std::move(trace)) {}
    160 
    161   size_t GetDepth() const {
    162     return trace_.GetDepth();
    163   }
    164 
    165   const AllocRecordStackTrace* GetStackTrace() const {
    166     return &trace_;
    167   }
    168 
    169   size_t ByteCount() const {
    170     return byte_count_;
    171   }
    172 
    173   pid_t GetTid() const {
    174     return trace_.GetTid();
    175   }
    176 
    177   mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
    178     return klass_.Read();
    179   }
    180 
    181   const char* GetClassDescriptor(std::string* storage) const
    182       SHARED_REQUIRES(Locks::mutator_lock_);
    183 
    184   GcRoot<mirror::Class>& GetClassGcRoot() SHARED_REQUIRES(Locks::mutator_lock_) {
    185     return klass_;
    186   }
    187 
    188   const AllocRecordStackTraceElement& StackElement(size_t index) const {
    189     return trace_.GetStackElement(index);
    190   }
    191 
    192  private:
    193   const size_t byte_count_;
    194   // The klass_ could be a strong or weak root for GC
    195   GcRoot<mirror::Class> klass_;
    196   // TODO: Share between alloc records with identical stack traces.
    197   AllocRecordStackTrace trace_;
    198 };
    199 
    200 class AllocRecordObjectMap {
    201  public:
    202   // GcRoot<mirror::Object> pointers in the list are weak roots, and the last recent_record_max_
    203   // number of AllocRecord::klass_ pointers are strong roots (and the rest of klass_ pointers are
    204   // weak roots). The last recent_record_max_ number of pairs in the list are always kept for DDMS's
    205   // recent allocation tracking, but GcRoot<mirror::Object> pointers in these pairs can become null.
    206   // Both types of pointers need read barriers, do not directly access them.
    207   using EntryPair = std::pair<GcRoot<mirror::Object>, AllocRecord>;
    208   typedef std::list<EntryPair> EntryList;
    209 
    210   // Caller needs to check that it is enabled before calling since we read the stack trace before
    211   // checking the enabled boolean.
    212   void RecordAllocation(Thread* self,
    213                         mirror::Object** obj,
    214                         size_t byte_count)
    215       REQUIRES(!Locks::alloc_tracker_lock_)
    216       SHARED_REQUIRES(Locks::mutator_lock_);
    217 
    218   static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
    219 
    220   AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_);
    221   ~AllocRecordObjectMap();
    222 
    223   void Put(mirror::Object* obj, AllocRecord&& record)
    224       SHARED_REQUIRES(Locks::mutator_lock_)
    225       REQUIRES(Locks::alloc_tracker_lock_) {
    226     if (entries_.size() == alloc_record_max_) {
    227       entries_.pop_front();
    228     }
    229     entries_.push_back(EntryPair(GcRoot<mirror::Object>(obj), std::move(record)));
    230   }
    231 
    232   size_t Size() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
    233     return entries_.size();
    234   }
    235 
    236   size_t GetRecentAllocationSize() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
    237     CHECK_LE(recent_record_max_, alloc_record_max_);
    238     size_t sz = entries_.size();
    239     return std::min(recent_record_max_, sz);
    240   }
    241 
    242   void VisitRoots(RootVisitor* visitor)
    243       SHARED_REQUIRES(Locks::mutator_lock_)
    244       REQUIRES(Locks::alloc_tracker_lock_);
    245 
    246   void SweepAllocationRecords(IsMarkedVisitor* visitor)
    247       SHARED_REQUIRES(Locks::mutator_lock_)
    248       REQUIRES(Locks::alloc_tracker_lock_);
    249 
    250   // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
    251   // AllowNewAllocationRecords(), in which case new allocation records can be added although they
    252   // should be disallowed. However, this is GC-safe because new objects are not processed in this GC
    253   // cycle. The only downside of not handling this case is that such new allocation records can be
    254   // swept from the list. But missing the first few records is acceptable for using the button to
    255   // enable allocation tracking.
    256   void DisallowNewAllocationRecords()
    257       SHARED_REQUIRES(Locks::mutator_lock_)
    258       REQUIRES(Locks::alloc_tracker_lock_);
    259   void AllowNewAllocationRecords()
    260       SHARED_REQUIRES(Locks::mutator_lock_)
    261       REQUIRES(Locks::alloc_tracker_lock_);
    262   void BroadcastForNewAllocationRecords()
    263       SHARED_REQUIRES(Locks::mutator_lock_)
    264       REQUIRES(Locks::alloc_tracker_lock_);
    265 
    266   // TODO: Is there a better way to hide the entries_'s type?
    267   EntryList::iterator Begin()
    268       SHARED_REQUIRES(Locks::mutator_lock_)
    269       REQUIRES(Locks::alloc_tracker_lock_) {
    270     return entries_.begin();
    271   }
    272 
    273   EntryList::iterator End()
    274       SHARED_REQUIRES(Locks::mutator_lock_)
    275       REQUIRES(Locks::alloc_tracker_lock_) {
    276     return entries_.end();
    277   }
    278 
    279   EntryList::reverse_iterator RBegin()
    280       SHARED_REQUIRES(Locks::mutator_lock_)
    281       REQUIRES(Locks::alloc_tracker_lock_) {
    282     return entries_.rbegin();
    283   }
    284 
    285   EntryList::reverse_iterator REnd()
    286       SHARED_REQUIRES(Locks::mutator_lock_)
    287       REQUIRES(Locks::alloc_tracker_lock_) {
    288     return entries_.rend();
    289   }
    290 
    291   void Clear() REQUIRES(Locks::alloc_tracker_lock_);
    292 
    293  private:
    294   static constexpr size_t kDefaultNumAllocRecords = 512 * 1024;
    295   static constexpr size_t kDefaultNumRecentRecords = 64 * 1024 - 1;
    296   static constexpr size_t kDefaultAllocStackDepth = 16;
    297   static constexpr size_t kMaxSupportedStackDepth = 128;
    298   size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumAllocRecords;
    299   size_t recent_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumRecentRecords;
    300   size_t max_stack_depth_ = kDefaultAllocStackDepth;
    301   pid_t alloc_ddm_thread_id_  GUARDED_BY(Locks::alloc_tracker_lock_) = 0;
    302   bool allow_new_record_ GUARDED_BY(Locks::alloc_tracker_lock_) = true;
    303   ConditionVariable new_record_condition_ GUARDED_BY(Locks::alloc_tracker_lock_);
    304   // see the comment in typedef of EntryList
    305   EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_);
    306 
    307   void SetProperties() REQUIRES(Locks::alloc_tracker_lock_);
    308 };
    309 
    310 }  // namespace gc
    311 }  // namespace art
    312 #endif  // ART_RUNTIME_GC_ALLOCATION_RECORD_H_
    313