Home | History | Annotate | Download | only in trace_event
      1 // Copyright 2015 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/trace_event/heap_profiler_allocation_register.h"
      6 
      7 #include <algorithm>
      8 #include <limits>
      9 
     10 #include "base/trace_event/trace_event_memory_overhead.h"
     11 
     12 namespace base {
     13 namespace trace_event {
     14 
     15 AllocationRegister::ConstIterator::ConstIterator(
     16     const AllocationRegister& alloc_register,
     17     AllocationIndex index)
     18     : register_(alloc_register), index_(index) {}
     19 
     20 void AllocationRegister::ConstIterator::operator++() {
     21   index_ = register_.allocations_.Next(index_ + 1);
     22 }
     23 
     24 bool AllocationRegister::ConstIterator::operator!=(
     25     const ConstIterator& other) const {
     26   return index_ != other.index_;
     27 }
     28 
     29 AllocationRegister::Allocation AllocationRegister::ConstIterator::operator*()
     30     const {
     31   return register_.GetAllocation(index_);
     32 }
     33 
     34 size_t AllocationRegister::BacktraceHasher::operator()(
     35     const Backtrace& backtrace) const {
     36   const size_t kSampleLength = 10;
     37 
     38   uintptr_t total_value = 0;
     39 
     40   size_t head_end = std::min(backtrace.frame_count, kSampleLength);
     41   for (size_t i = 0; i != head_end; ++i) {
     42     total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
     43   }
     44 
     45   size_t tail_start = backtrace.frame_count -
     46                       std::min(backtrace.frame_count - head_end, kSampleLength);
     47   for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
     48     total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
     49   }
     50 
     51   total_value += backtrace.frame_count;
     52 
     53   // These magic constants give best results in terms of average collisions
     54   // per backtrace. They were found by replaying real backtraces from Linux
     55   // and Android against different hash functions.
     56   return (total_value * 131101) >> 14;
     57 }
     58 
     59 size_t AllocationRegister::AddressHasher::operator()(
     60     const void* address) const {
     61   // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
     62   // been chosen carefully based on measurements with real-word data (addresses
     63   // recorded from a Chrome trace run). It is the first prime after 2^17. For
     64   // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
     65   // Microbenchmarks show that this simple scheme outperforms fancy hashes like
     66   // Murmur3 by 20 to 40 percent.
     67   const uintptr_t key = reinterpret_cast<uintptr_t>(address);
     68   const uintptr_t a = 131101;
     69   const uintptr_t shift = 15;
     70   const uintptr_t h = (key * a) >> shift;
     71   return h;
     72 }
     73 
     74 AllocationRegister::AllocationRegister()
     75     : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
     76 
     77 AllocationRegister::AllocationRegister(size_t allocation_capacity,
     78                                        size_t backtrace_capacity)
     79     : allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
     80   Backtrace sentinel = {};
     81   sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
     82   sentinel.frame_count = 1;
     83 
     84   // Rationale for max / 2: in theory we could just start the sentinel with a
     85   // refcount == 0. However, using max / 2 allows short circuiting of the
     86   // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast
     87   // path. From a functional viewpoint, the sentinel is safe even if we wrap
     88   // over refcount because .
     89   BacktraceMap::KVPair::second_type sentinel_refcount =
     90       std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2;
     91   auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount);
     92   DCHECK(index_and_flag.second);
     93   DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex);
     94 }
     95 
     96 AllocationRegister::~AllocationRegister() {}
     97 
     98 bool AllocationRegister::Insert(const void* address,
     99                                 size_t size,
    100                                 const AllocationContext& context) {
    101   DCHECK(address != nullptr);
    102   if (size == 0) {
    103     return false;
    104   }
    105 
    106   AllocationInfo info = {size, context.type_name,
    107                          InsertBacktrace(context.backtrace)};
    108 
    109   // Try to insert the allocation.
    110   auto index_and_flag = allocations_.Insert(address, info);
    111   if (!index_and_flag.second &&
    112       index_and_flag.first != AllocationMap::kInvalidKVIndex) {
    113     // |address| is already there - overwrite the allocation info.
    114     auto& old_info = allocations_.Get(index_and_flag.first).second;
    115     RemoveBacktrace(old_info.backtrace_index);
    116     old_info = info;
    117     return true;
    118   }
    119 
    120   return index_and_flag.second;
    121 }
    122 
    123 void AllocationRegister::Remove(const void* address) {
    124   auto index = allocations_.Find(address);
    125   if (index == AllocationMap::kInvalidKVIndex) {
    126     return;
    127   }
    128 
    129   const AllocationInfo& info = allocations_.Get(index).second;
    130   RemoveBacktrace(info.backtrace_index);
    131   allocations_.Remove(index);
    132 }
    133 
    134 bool AllocationRegister::Get(const void* address,
    135                              Allocation* out_allocation) const {
    136   auto index = allocations_.Find(address);
    137   if (index == AllocationMap::kInvalidKVIndex) {
    138     return false;
    139   }
    140 
    141   if (out_allocation) {
    142     *out_allocation = GetAllocation(index);
    143   }
    144   return true;
    145 }
    146 
    147 AllocationRegister::ConstIterator AllocationRegister::begin() const {
    148   return ConstIterator(*this, allocations_.Next(0));
    149 }
    150 
    151 AllocationRegister::ConstIterator AllocationRegister::end() const {
    152   return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
    153 }
    154 
    155 void AllocationRegister::EstimateTraceMemoryOverhead(
    156     TraceEventMemoryOverhead* overhead) const {
    157   size_t allocated = sizeof(AllocationRegister);
    158   size_t resident = sizeof(AllocationRegister) +
    159                     allocations_.EstimateUsedMemory() +
    160                     backtraces_.EstimateUsedMemory();
    161   overhead->Add("AllocationRegister", allocated, resident);
    162 }
    163 
    164 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
    165     const Backtrace& backtrace) {
    166   auto index = backtraces_.Insert(backtrace, 0).first;
    167   if (index == BacktraceMap::kInvalidKVIndex)
    168     return kOutOfStorageBacktraceIndex;
    169   auto& backtrace_and_count = backtraces_.Get(index);
    170   backtrace_and_count.second++;
    171   return index;
    172 }
    173 
    174 void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
    175   auto& backtrace_and_count = backtraces_.Get(index);
    176   if (--backtrace_and_count.second == 0 &&
    177       index != kOutOfStorageBacktraceIndex) {
    178     // Backtrace is not referenced anymore - remove it.
    179     backtraces_.Remove(index);
    180   }
    181 }
    182 
    183 AllocationRegister::Allocation AllocationRegister::GetAllocation(
    184     AllocationMap::KVIndex index) const {
    185   const auto& address_and_info = allocations_.Get(index);
    186   const auto& backtrace_and_count =
    187       backtraces_.Get(address_and_info.second.backtrace_index);
    188   return {address_and_info.first, address_and_info.second.size,
    189           AllocationContext(backtrace_and_count.first,
    190                             address_and_info.second.type_name)};
    191 }
    192 
    193 }  // namespace trace_event
    194 }  // namespace base
    195