Home | History | Annotate | Download | only in src
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // ---
      6 // Author: Sainbayar Sukhbaatar
      7 //         Dai Mikurube
      8 //
      9 
     10 #include "deep-heap-profile.h"
     11 
     12 #ifdef USE_DEEP_HEAP_PROFILE
     13 #include <algorithm>
     14 #include <fcntl.h>
     15 #include <sys/stat.h>
     16 #include <sys/types.h>
     17 #include <time.h>
     18 #ifdef HAVE_UNISTD_H
     19 #include <unistd.h>  // for getpagesize and getpid
     20 #endif  // HAVE_UNISTD_H
     21 
     22 #if defined(__linux__)
     23 #include <endian.h>
     24 #if !defined(__LITTLE_ENDIAN__) and !defined(__BIG_ENDIAN__)
     25 #if __BYTE_ORDER == __BIG_ENDIAN
     26 #define __BIG_ENDIAN__
     27 #endif  // __BYTE_ORDER == __BIG_ENDIAN
     28 #endif  // !defined(__LITTLE_ENDIAN__) and !defined(__BIG_ENDIAN__)
     29 #if defined(__BIG_ENDIAN__)
     30 #include <byteswap.h>
     31 #endif  // defined(__BIG_ENDIAN__)
     32 #endif  // defined(__linux__)
     33 
     34 #include "base/cycleclock.h"
     35 #include "base/sysinfo.h"
     36 #include "internal_logging.h"  // for ASSERT, etc
     37 
     38 static const int kProfilerBufferSize = 1 << 20;
     39 static const int kHashTableSize = 179999;  // Same as heap-profile-table.cc.
     40 
     41 static const int PAGEMAP_BYTES = 8;
     42 static const int KPAGECOUNT_BYTES = 8;
     43 static const uint64 MAX_ADDRESS = kuint64max;
     44 
     45 // Tag strings in heap profile dumps.
     46 static const char kProfileHeader[] = "heap profile: ";
     47 static const char kProfileVersion[] = "DUMP_DEEP_6";
     48 static const char kMetaInformationHeader[] = "META:\n";
     49 static const char kMMapListHeader[] = "MMAP_LIST:\n";
     50 static const char kGlobalStatsHeader[] = "GLOBAL_STATS:\n";
     51 static const char kStacktraceHeader[] = "STACKTRACES:\n";
     52 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
     53 
     54 static const char kVirtualLabel[] = "virtual";
     55 static const char kCommittedLabel[] = "committed";
     56 
     57 #if defined(__linux__)
     58 
     59 bool DeepHeapProfile::AppendCommandLine(TextBuffer* buffer) {
     60   RawFD fd;
     61   char filename[100];
     62   char cmdline[4096];
     63   snprintf(filename, sizeof(filename), "/proc/%d/cmdline",
     64            static_cast<int>(getpid()));
     65   fd = open(filename, O_RDONLY);
     66   if (fd == kIllegalRawFD) {
     67     RAW_LOG(0, "Failed to open /proc/self/cmdline");
     68     return false;
     69   }
     70 
     71   size_t length = read(fd, cmdline, sizeof(cmdline) - 1);
     72   close(fd);
     73 
     74   for (int i = 0; i < length; ++i)
     75     if (cmdline[i] == '\0')
     76       cmdline[i] = ' ';
     77   cmdline[length] = '\0';
     78 
     79   buffer->AppendString("CommandLine: ", 0);
     80   buffer->AppendString(cmdline, 0);
     81   buffer->AppendChar('\n');
     82 
     83   return true;
     84 }
     85 
     86 #else  // defined(__linux__)
     87 
     88 bool DeepHeapProfile::AppendCommandLine(TextBuffer* buffer) {
     89   return false;
     90 }
     91 
     92 #endif  // defined(__linux__)
     93 
     94 #if defined(__linux__)
     95 
     96 void DeepHeapProfile::MemoryInfoGetterLinux::Initialize() {
     97   char filename[100];
     98   snprintf(filename, sizeof(filename), "/proc/%d/pagemap",
     99            static_cast<int>(getpid()));
    100   pagemap_fd_ = open(filename, O_RDONLY);
    101   RAW_CHECK(pagemap_fd_ != -1, "Failed to open /proc/self/pagemap");
    102 
    103   if (pageframe_type_ == DUMP_PAGECOUNT) {
    104     snprintf(filename, sizeof(filename), "/proc/kpagecount",
    105              static_cast<int>(getpid()));
    106     kpagecount_fd_ = open(filename, O_RDONLY);
    107     if (kpagecount_fd_ == -1)
    108       RAW_LOG(0, "Failed to open /proc/kpagecount");
    109   }
    110 }
    111 
    112 size_t DeepHeapProfile::MemoryInfoGetterLinux::CommittedSize(
    113     uint64 first_address,
    114     uint64 last_address,
    115     DeepHeapProfile::TextBuffer* buffer) const {
    116   int page_size = getpagesize();
    117   uint64 page_address = (first_address / page_size) * page_size;
    118   size_t committed_size = 0;
    119   size_t pageframe_list_length = 0;
    120 
    121   Seek(first_address);
    122 
    123   // Check every page on which the allocation resides.
    124   while (page_address <= last_address) {
    125     // Read corresponding physical page.
    126     State state;
    127     // TODO(dmikurube): Read pagemap in bulk for speed.
    128     // TODO(dmikurube): Consider using mincore(2).
    129     if (Read(&state, pageframe_type_ != DUMP_NO_PAGEFRAME) == false) {
    130       // We can't read the last region (e.g vsyscall).
    131 #ifndef NDEBUG
    132       RAW_LOG(0, "pagemap read failed @ %#llx %" PRId64 " bytes",
    133               first_address, last_address - first_address + 1);
    134 #endif
    135       return 0;
    136     }
    137 
    138     // Dump pageframes of resident pages.  Non-resident pages are just skipped.
    139     if (pageframe_type_ != DUMP_NO_PAGEFRAME &&
    140         buffer != NULL && state.pfn != 0) {
    141       if (pageframe_list_length == 0) {
    142         buffer->AppendString("  PF:", 0);
    143         pageframe_list_length = 5;
    144       }
    145       buffer->AppendChar(' ');
    146       if (page_address < first_address)
    147         buffer->AppendChar('<');
    148       buffer->AppendBase64(state.pfn, 4);
    149       pageframe_list_length += 5;
    150       if (pageframe_type_ == DUMP_PAGECOUNT && IsPageCountAvailable()) {
    151         uint64 pagecount = ReadPageCount(state.pfn);
    152         // Assume pagecount == 63 if the pageframe is mapped more than 63 times.
    153         if (pagecount > 63)
    154           pagecount = 63;
    155         buffer->AppendChar('#');
    156         buffer->AppendBase64(pagecount, 1);
    157         pageframe_list_length += 2;
    158       }
    159       if (last_address < page_address - 1 + page_size)
    160         buffer->AppendChar('>');
    161       // Begins a new line every 94 characters.
    162       if (pageframe_list_length > 94) {
    163         buffer->AppendChar('\n');
    164         pageframe_list_length = 0;
    165       }
    166     }
    167 
    168     if (state.is_committed) {
    169       // Calculate the size of the allocation part in this page.
    170       size_t bytes = page_size;
    171 
    172       // If looking at the last page in a given region.
    173       if (last_address <= page_address - 1 + page_size) {
    174         bytes = last_address - page_address + 1;
    175       }
    176 
    177       // If looking at the first page in a given region.
    178       if (page_address < first_address) {
    179         bytes -= first_address - page_address;
    180       }
    181 
    182       committed_size += bytes;
    183     }
    184     if (page_address > MAX_ADDRESS - page_size) {
    185       break;
    186     }
    187     page_address += page_size;
    188   }
    189 
    190   if (pageframe_type_ != DUMP_NO_PAGEFRAME &&
    191       buffer != NULL && pageframe_list_length != 0) {
    192     buffer->AppendChar('\n');
    193   }
    194 
    195   return committed_size;
    196 }
    197 
    198 uint64 DeepHeapProfile::MemoryInfoGetterLinux::ReadPageCount(uint64 pfn) const {
    199   int64 index = pfn * KPAGECOUNT_BYTES;
    200   int64 offset = lseek64(kpagecount_fd_, index, SEEK_SET);
    201   RAW_DCHECK(offset == index, "Failed in seeking in kpagecount.");
    202 
    203   uint64 kpagecount_value;
    204   int result = read(kpagecount_fd_, &kpagecount_value, KPAGECOUNT_BYTES);
    205   if (result != KPAGECOUNT_BYTES)
    206     return 0;
    207 
    208   return kpagecount_value;
    209 }
    210 
    211 bool DeepHeapProfile::MemoryInfoGetterLinux::Seek(uint64 address) const {
    212   int64 index = (address / getpagesize()) * PAGEMAP_BYTES;
    213   RAW_DCHECK(pagemap_fd_ != -1, "Failed to seek in /proc/self/pagemap");
    214   int64 offset = lseek64(pagemap_fd_, index, SEEK_SET);
    215   RAW_DCHECK(offset == index, "Failed in seeking.");
    216   return offset >= 0;
    217 }
    218 
    219 bool DeepHeapProfile::MemoryInfoGetterLinux::Read(
    220     State* state, bool get_pfn) const {
    221   static const uint64 U64_1 = 1;
    222   static const uint64 PFN_FILTER = (U64_1 << 55) - U64_1;
    223   static const uint64 PAGE_PRESENT = U64_1 << 63;
    224   static const uint64 PAGE_SWAP = U64_1 << 62;
    225   static const uint64 PAGE_RESERVED = U64_1 << 61;
    226   static const uint64 FLAG_NOPAGE = U64_1 << 20;
    227   static const uint64 FLAG_KSM = U64_1 << 21;
    228   static const uint64 FLAG_MMAP = U64_1 << 11;
    229 
    230   uint64 pagemap_value;
    231   RAW_DCHECK(pagemap_fd_ != -1, "Failed to read from /proc/self/pagemap");
    232   int result = read(pagemap_fd_, &pagemap_value, PAGEMAP_BYTES);
    233   if (result != PAGEMAP_BYTES) {
    234     return false;
    235   }
    236 
    237   // Check if the page is committed.
    238   state->is_committed = (pagemap_value & (PAGE_PRESENT | PAGE_SWAP));
    239 
    240   state->is_present = (pagemap_value & PAGE_PRESENT);
    241   state->is_swapped = (pagemap_value & PAGE_SWAP);
    242   state->is_shared = false;
    243 
    244   if (get_pfn && state->is_present && !state->is_swapped)
    245     state->pfn = (pagemap_value & PFN_FILTER);
    246   else
    247     state->pfn = 0;
    248 
    249   return true;
    250 }
    251 
    252 bool DeepHeapProfile::MemoryInfoGetterLinux::IsPageCountAvailable() const {
    253   return kpagecount_fd_ != -1;
    254 }
    255 
    256 #endif  // defined(__linux__)
    257 
    258 DeepHeapProfile::MemoryResidenceInfoGetterInterface::
    259     MemoryResidenceInfoGetterInterface() {}
    260 
    261 DeepHeapProfile::MemoryResidenceInfoGetterInterface::
    262     ~MemoryResidenceInfoGetterInterface() {}
    263 
    264 DeepHeapProfile::MemoryResidenceInfoGetterInterface*
    265     DeepHeapProfile::MemoryResidenceInfoGetterInterface::Create(
    266         PageFrameType pageframe_type) {
    267 #if defined(__linux__)
    268   return new MemoryInfoGetterLinux(pageframe_type);
    269 #else
    270   return NULL;
    271 #endif
    272 }
    273 
    274 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
    275                                  const char* prefix,
    276                                  enum PageFrameType pageframe_type)
    277     : memory_residence_info_getter_(
    278           MemoryResidenceInfoGetterInterface::Create(pageframe_type)),
    279       most_recent_pid_(-1),
    280       stats_(),
    281       dump_count_(0),
    282       filename_prefix_(NULL),
    283       deep_table_(kHashTableSize, heap_profile->alloc_, heap_profile->dealloc_),
    284       pageframe_type_(pageframe_type),
    285       heap_profile_(heap_profile) {
    286   // Copy filename prefix.
    287   const int prefix_length = strlen(prefix);
    288   filename_prefix_ =
    289       reinterpret_cast<char*>(heap_profile_->alloc_(prefix_length + 1));
    290   memcpy(filename_prefix_, prefix, prefix_length);
    291   filename_prefix_[prefix_length] = '\0';
    292 
    293   strncpy(run_id_, "undetermined-run-id", sizeof(run_id_));
    294 }
    295 
    296 DeepHeapProfile::~DeepHeapProfile() {
    297   heap_profile_->dealloc_(filename_prefix_);
    298   delete memory_residence_info_getter_;
    299 }
    300 
    301 // Global malloc() should not be used in this function.
    302 // Use LowLevelAlloc if required.
    303 void DeepHeapProfile::DumpOrderedProfile(const char* reason,
    304                                          char raw_buffer[],
    305                                          int buffer_size,
    306                                          RawFD fd) {
    307   TextBuffer buffer(raw_buffer, buffer_size, fd);
    308 
    309 #ifndef NDEBUG
    310   int64 starting_cycles = CycleClock::Now();
    311 #endif
    312 
    313   // Get the time before starting snapshot.
    314   // TODO(dmikurube): Consider gettimeofday if available.
    315   time_t time_value = time(NULL);
    316 
    317   ++dump_count_;
    318 
    319   // Re-open files in /proc/pid/ if the process is newly forked one.
    320   if (most_recent_pid_ != getpid()) {
    321     char hostname[64];
    322     if (0 == gethostname(hostname, sizeof(hostname))) {
    323       char* dot = strchr(hostname, '.');
    324       if (dot != NULL)
    325         *dot = '\0';
    326     } else {
    327       strcpy(hostname, "unknown");
    328     }
    329 
    330     most_recent_pid_ = getpid();
    331 
    332     snprintf(run_id_, sizeof(run_id_), "%s-linux-%d-%lu",
    333              hostname, most_recent_pid_, time(NULL));
    334 
    335     memory_residence_info_getter_->Initialize();
    336     deep_table_.ResetIsLogged();
    337 
    338     // Write maps into "|filename_prefix_|.<pid>.maps".
    339     WriteProcMaps(filename_prefix_, raw_buffer, buffer_size);
    340   }
    341 
    342   // Reset committed sizes of buckets.
    343   deep_table_.ResetCommittedSize();
    344 
    345   // Record committed sizes.
    346   stats_.SnapshotAllocations(this);
    347 
    348   // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
    349   // glibc's snprintf internally allocates memory by alloca normally, but it
    350   // allocates memory by malloc if large memory is required.
    351 
    352   buffer.AppendString(kProfileHeader, 0);
    353   buffer.AppendString(kProfileVersion, 0);
    354   buffer.AppendString("\n", 0);
    355 
    356   // Fill buffer with meta information.
    357   buffer.AppendString(kMetaInformationHeader, 0);
    358 
    359   buffer.AppendString("Time: ", 0);
    360   buffer.AppendUnsignedLong(time_value, 0);
    361   buffer.AppendChar('\n');
    362 
    363   if (reason != NULL) {
    364     buffer.AppendString("Reason: ", 0);
    365     buffer.AppendString(reason, 0);
    366     buffer.AppendChar('\n');
    367   }
    368 
    369   AppendCommandLine(&buffer);
    370 
    371   buffer.AppendString("RunID: ", 0);
    372   buffer.AppendString(run_id_, 0);
    373   buffer.AppendChar('\n');
    374 
    375   buffer.AppendString("PageSize: ", 0);
    376   buffer.AppendInt(getpagesize(), 0, 0);
    377   buffer.AppendChar('\n');
    378 
    379   // Assumes the physical memory <= 64GB (PFN < 2^24).
    380   if (pageframe_type_ == DUMP_PAGECOUNT &&
    381       memory_residence_info_getter_->IsPageCountAvailable()) {
    382     buffer.AppendString("PageFrame: 24,Base64,PageCount", 0);
    383     buffer.AppendChar('\n');
    384   } else if (pageframe_type_ != DUMP_NO_PAGEFRAME) {
    385     buffer.AppendString("PageFrame: 24,Base64", 0);
    386     buffer.AppendChar('\n');
    387   }
    388 
    389   // Fill buffer with the global stats.
    390   buffer.AppendString(kMMapListHeader, 0);
    391 
    392   stats_.SnapshotMaps(memory_residence_info_getter_, this, &buffer);
    393 
    394   // Fill buffer with the global stats.
    395   buffer.AppendString(kGlobalStatsHeader, 0);
    396 
    397   stats_.Unparse(&buffer);
    398 
    399   buffer.AppendString(kStacktraceHeader, 0);
    400   buffer.AppendString(kVirtualLabel, 10);
    401   buffer.AppendChar(' ');
    402   buffer.AppendString(kCommittedLabel, 10);
    403   buffer.AppendString("\n", 0);
    404 
    405   // Fill buffer.
    406   deep_table_.UnparseForStats(&buffer);
    407 
    408   buffer.Flush();
    409 
    410   // Write the bucket listing into a .bucket file.
    411   deep_table_.WriteForBucketFile(
    412       filename_prefix_, dump_count_, raw_buffer, buffer_size);
    413 
    414 #ifndef NDEBUG
    415   int64 elapsed_cycles = CycleClock::Now() - starting_cycles;
    416   double elapsed_seconds = elapsed_cycles / CyclesPerSecond();
    417   RAW_LOG(0, "Time spent on DeepProfiler: %.3f sec\n", elapsed_seconds);
    418 #endif
    419 }
    420 
    421 int DeepHeapProfile::TextBuffer::Size() {
    422   return size_;
    423 }
    424 
    425 int DeepHeapProfile::TextBuffer::FilledBytes() {
    426   return cursor_;
    427 }
    428 
    429 void DeepHeapProfile::TextBuffer::Clear() {
    430   cursor_ = 0;
    431 }
    432 
    433 void DeepHeapProfile::TextBuffer::Flush() {
    434   RawWrite(fd_, buffer_, cursor_);
    435   cursor_ = 0;
    436 }
    437 
    438 // TODO(dmikurube): These Append* functions should not use snprintf.
    439 bool DeepHeapProfile::TextBuffer::AppendChar(char value) {
    440   return ForwardCursor(snprintf(buffer_ + cursor_, size_ - cursor_,
    441                                 "%c", value));
    442 }
    443 
    444 bool DeepHeapProfile::TextBuffer::AppendString(const char* value, int width) {
    445   char* position = buffer_ + cursor_;
    446   int available = size_ - cursor_;
    447   int appended;
    448   if (width == 0)
    449     appended = snprintf(position, available, "%s", value);
    450   else
    451     appended = snprintf(position, available, "%*s",
    452                         width, value);
    453   return ForwardCursor(appended);
    454 }
    455 
    456 bool DeepHeapProfile::TextBuffer::AppendInt(int value, int width,
    457                                             bool leading_zero) {
    458   char* position = buffer_ + cursor_;
    459   int available = size_ - cursor_;
    460   int appended;
    461   if (width == 0)
    462     appended = snprintf(position, available, "%d", value);
    463   else if (leading_zero)
    464     appended = snprintf(position, available, "%0*d", width, value);
    465   else
    466     appended = snprintf(position, available, "%*d", width, value);
    467   return ForwardCursor(appended);
    468 }
    469 
    470 bool DeepHeapProfile::TextBuffer::AppendLong(long value, int width) {
    471   char* position = buffer_ + cursor_;
    472   int available = size_ - cursor_;
    473   int appended;
    474   if (width == 0)
    475     appended = snprintf(position, available, "%ld", value);
    476   else
    477     appended = snprintf(position, available, "%*ld", width, value);
    478   return ForwardCursor(appended);
    479 }
    480 
    481 bool DeepHeapProfile::TextBuffer::AppendUnsignedLong(unsigned long value,
    482                                                      int width) {
    483   char* position = buffer_ + cursor_;
    484   int available = size_ - cursor_;
    485   int appended;
    486   if (width == 0)
    487     appended = snprintf(position, available, "%lu", value);
    488   else
    489     appended = snprintf(position, available, "%*lu", width, value);
    490   return ForwardCursor(appended);
    491 }
    492 
    493 bool DeepHeapProfile::TextBuffer::AppendInt64(int64 value, int width) {
    494   char* position = buffer_ + cursor_;
    495   int available = size_ - cursor_;
    496   int appended;
    497   if (width == 0)
    498     appended = snprintf(position, available, "%" PRId64, value);
    499   else
    500     appended = snprintf(position, available, "%*" PRId64, width, value);
    501   return ForwardCursor(appended);
    502 }
    503 
    504 bool DeepHeapProfile::TextBuffer::AppendPtr(uint64 value, int width) {
    505   char* position = buffer_ + cursor_;
    506   int available = size_ - cursor_;
    507   int appended;
    508   if (width == 0)
    509     appended = snprintf(position, available, "%" PRIx64, value);
    510   else
    511     appended = snprintf(position, available, "%0*" PRIx64, width, value);
    512   return ForwardCursor(appended);
    513 }
    514 
    515 bool DeepHeapProfile::TextBuffer::AppendBase64(uint64 value, int width) {
    516   static const char base64[65] =
    517       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
    518 #if defined(__BIG_ENDIAN__)
    519   value = bswap_64(value);
    520 #endif
    521   for (int shift = (width - 1) * 6; shift >= 0; shift -= 6) {
    522     if (!AppendChar(base64[(value >> shift) & 0x3f]))
    523       return false;
    524   }
    525   return true;
    526 }
    527 
    528 bool DeepHeapProfile::TextBuffer::ForwardCursor(int appended) {
    529   if (appended < 0 || appended >= size_ - cursor_)
    530     return false;
    531   cursor_ += appended;
    532   if (cursor_ > size_ * 4 / 5)
    533     Flush();
    534   return true;
    535 }
    536 
    537 void DeepHeapProfile::DeepBucket::UnparseForStats(TextBuffer* buffer) {
    538   buffer->AppendInt64(bucket->alloc_size - bucket->free_size, 10);
    539   buffer->AppendChar(' ');
    540   buffer->AppendInt64(committed_size, 10);
    541   buffer->AppendChar(' ');
    542   buffer->AppendInt(bucket->allocs, 6, false);
    543   buffer->AppendChar(' ');
    544   buffer->AppendInt(bucket->frees, 6, false);
    545   buffer->AppendString(" @ ", 0);
    546   buffer->AppendInt(id, 0, false);
    547   buffer->AppendString("\n", 0);
    548 }
    549 
    550 void DeepHeapProfile::DeepBucket::UnparseForBucketFile(TextBuffer* buffer) {
    551   buffer->AppendInt(id, 0, false);
    552   buffer->AppendChar(' ');
    553   buffer->AppendString(is_mmap ? "mmap" : "malloc", 0);
    554 
    555 #if defined(TYPE_PROFILING)
    556   buffer->AppendString(" t0x", 0);
    557   buffer->AppendPtr(reinterpret_cast<uintptr_t>(type), 0);
    558   if (type == NULL) {
    559     buffer->AppendString(" nno_typeinfo", 0);
    560   } else {
    561     buffer->AppendString(" n", 0);
    562     buffer->AppendString(type->name(), 0);
    563   }
    564 #endif
    565 
    566   for (int depth = 0; depth < bucket->depth; depth++) {
    567     buffer->AppendString(" 0x", 0);
    568     buffer->AppendPtr(reinterpret_cast<uintptr_t>(bucket->stack[depth]), 8);
    569   }
    570   buffer->AppendString("\n", 0);
    571 }
    572 
    573 DeepHeapProfile::DeepBucketTable::DeepBucketTable(
    574     int table_size,
    575     HeapProfileTable::Allocator alloc,
    576     HeapProfileTable::DeAllocator dealloc)
    577     : table_(NULL),
    578       table_size_(table_size),
    579       alloc_(alloc),
    580       dealloc_(dealloc),
    581       bucket_id_(0) {
    582   const int bytes = table_size * sizeof(DeepBucket*);
    583   table_ = reinterpret_cast<DeepBucket**>(alloc(bytes));
    584   memset(table_, 0, bytes);
    585 }
    586 
    587 DeepHeapProfile::DeepBucketTable::~DeepBucketTable() {
    588   ASSERT(table_ != NULL);
    589   for (int db = 0; db < table_size_; db++) {
    590     for (DeepBucket* x = table_[db]; x != 0; /**/) {
    591       DeepBucket* db = x;
    592       x = x->next;
    593       dealloc_(db);
    594     }
    595   }
    596   dealloc_(table_);
    597 }
    598 
    599 DeepHeapProfile::DeepBucket* DeepHeapProfile::DeepBucketTable::Lookup(
    600     Bucket* bucket,
    601 #if defined(TYPE_PROFILING)
    602     const std::type_info* type,
    603 #endif
    604     bool is_mmap) {
    605   // Make hash-value
    606   uintptr_t h = 0;
    607 
    608   AddToHashValue(reinterpret_cast<uintptr_t>(bucket), &h);
    609   if (is_mmap) {
    610     AddToHashValue(1, &h);
    611   } else {
    612     AddToHashValue(0, &h);
    613   }
    614 
    615 #if defined(TYPE_PROFILING)
    616   if (type == NULL) {
    617     AddToHashValue(0, &h);
    618   } else {
    619     AddToHashValue(reinterpret_cast<uintptr_t>(type->name()), &h);
    620   }
    621 #endif
    622 
    623   FinishHashValue(&h);
    624 
    625   // Lookup stack trace in table
    626   unsigned int buck = ((unsigned int) h) % table_size_;
    627   for (DeepBucket* db = table_[buck]; db != 0; db = db->next) {
    628     if (db->bucket == bucket) {
    629       return db;
    630     }
    631   }
    632 
    633   // Create a new bucket
    634   DeepBucket* db = reinterpret_cast<DeepBucket*>(alloc_(sizeof(DeepBucket)));
    635   memset(db, 0, sizeof(*db));
    636   db->bucket         = bucket;
    637 #if defined(TYPE_PROFILING)
    638   db->type           = type;
    639 #endif
    640   db->committed_size = 0;
    641   db->is_mmap        = is_mmap;
    642   db->id             = (bucket_id_++);
    643   db->is_logged      = false;
    644   db->next           = table_[buck];
    645   table_[buck] = db;
    646   return db;
    647 }
    648 
    649 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
    650 void DeepHeapProfile::DeepBucketTable::UnparseForStats(TextBuffer* buffer) {
    651   for (int i = 0; i < table_size_; i++) {
    652     for (DeepBucket* deep_bucket = table_[i];
    653          deep_bucket != NULL;
    654          deep_bucket = deep_bucket->next) {
    655       Bucket* bucket = deep_bucket->bucket;
    656       if (bucket->alloc_size - bucket->free_size == 0) {
    657         continue;  // Skip empty buckets.
    658       }
    659       deep_bucket->UnparseForStats(buffer);
    660     }
    661   }
    662 }
    663 
    664 void DeepHeapProfile::DeepBucketTable::WriteForBucketFile(
    665     const char* prefix, int dump_count, char raw_buffer[], int buffer_size) {
    666   char filename[100];
    667   snprintf(filename, sizeof(filename),
    668            "%s.%05d.%04d.buckets", prefix, getpid(), dump_count);
    669   RawFD fd = RawOpenForWriting(filename);
    670   RAW_DCHECK(fd != kIllegalRawFD, "");
    671 
    672   TextBuffer buffer(raw_buffer, buffer_size, fd);
    673 
    674   for (int i = 0; i < table_size_; i++) {
    675     for (DeepBucket* deep_bucket = table_[i];
    676          deep_bucket != NULL;
    677          deep_bucket = deep_bucket->next) {
    678       Bucket* bucket = deep_bucket->bucket;
    679       if (deep_bucket->is_logged) {
    680         continue;  // Skip the bucket if it is already logged.
    681       }
    682       if (!deep_bucket->is_mmap &&
    683           bucket->alloc_size - bucket->free_size <= 64) {
    684         continue;  // Skip small malloc buckets.
    685       }
    686 
    687       deep_bucket->UnparseForBucketFile(&buffer);
    688       deep_bucket->is_logged = true;
    689     }
    690   }
    691 
    692   buffer.Flush();
    693   RawClose(fd);
    694 }
    695 
    696 void DeepHeapProfile::DeepBucketTable::ResetCommittedSize() {
    697   for (int i = 0; i < table_size_; i++) {
    698     for (DeepBucket* deep_bucket = table_[i];
    699          deep_bucket != NULL;
    700          deep_bucket = deep_bucket->next) {
    701       deep_bucket->committed_size = 0;
    702     }
    703   }
    704 }
    705 
    706 void DeepHeapProfile::DeepBucketTable::ResetIsLogged() {
    707   for (int i = 0; i < table_size_; i++) {
    708     for (DeepBucket* deep_bucket = table_[i];
    709          deep_bucket != NULL;
    710          deep_bucket = deep_bucket->next) {
    711       deep_bucket->is_logged = false;
    712     }
    713   }
    714 }
    715 
    716 // This hash function is from HeapProfileTable::GetBucket.
    717 // static
    718 void DeepHeapProfile::DeepBucketTable::AddToHashValue(
    719     uintptr_t add, uintptr_t* hash_value) {
    720   *hash_value += add;
    721   *hash_value += *hash_value << 10;
    722   *hash_value ^= *hash_value >> 6;
    723 }
    724 
    725 // This hash function is from HeapProfileTable::GetBucket.
    726 // static
    727 void DeepHeapProfile::DeepBucketTable::FinishHashValue(uintptr_t* hash_value) {
    728   *hash_value += *hash_value << 3;
    729   *hash_value ^= *hash_value >> 11;
    730 }
    731 
    732 void DeepHeapProfile::RegionStats::Initialize() {
    733   virtual_bytes_ = 0;
    734   committed_bytes_ = 0;
    735 }
    736 
    737 uint64 DeepHeapProfile::RegionStats::Record(
    738     const MemoryResidenceInfoGetterInterface* memory_residence_info_getter,
    739     uint64 first_address,
    740     uint64 last_address,
    741     TextBuffer* buffer) {
    742   uint64 committed;
    743   virtual_bytes_ += static_cast<size_t>(last_address - first_address + 1);
    744   committed = memory_residence_info_getter->CommittedSize(first_address,
    745                                                           last_address,
    746                                                           buffer);
    747   committed_bytes_ += committed;
    748   return committed;
    749 }
    750 
    751 void DeepHeapProfile::RegionStats::Unparse(const char* name,
    752                                            TextBuffer* buffer) {
    753   buffer->AppendString(name, 25);
    754   buffer->AppendChar(' ');
    755   buffer->AppendLong(virtual_bytes_, 12);
    756   buffer->AppendChar(' ');
    757   buffer->AppendLong(committed_bytes_, 12);
    758   buffer->AppendString("\n", 0);
    759 }
    760 
    761 // Snapshots all virtual memory mappging stats by merging mmap(2) records from
    762 // MemoryRegionMap and /proc/maps, the OS-level memory mapping information.
    763 // Memory regions described in /proc/maps, but which are not created by mmap,
    764 // are accounted as "unhooked" memory regions.
    765 //
    766 // This function assumes that every memory region created by mmap is covered
    767 // by VMA(s) described in /proc/maps except for http://crbug.com/189114.
    768 // Note that memory regions created with mmap don't align with borders of VMAs
    769 // in /proc/maps.  In other words, a memory region by mmap can cut across many
    770 // VMAs.  Also, of course a VMA can include many memory regions by mmap.
    771 // It means that the following situation happens:
    772 //
    773 // => Virtual address
    774 // <----- VMA #1 -----><----- VMA #2 ----->...<----- VMA #3 -----><- VMA #4 ->
    775 // ..< mmap #1 >.<- mmap #2 -><- mmap #3 ->...<- mmap #4 ->..<-- mmap #5 -->..
    776 //
    777 // It can happen easily as permission can be changed by mprotect(2) for a part
    778 // of a memory region.  A change in permission splits VMA(s).
    779 //
    780 // To deal with the situation, this function iterates over MemoryRegionMap and
    781 // /proc/maps independently.  The iterator for MemoryRegionMap is initialized
    782 // at the top outside the loop for /proc/maps, and it goes forward inside the
    783 // loop while comparing their addresses.
    784 //
    785 // TODO(dmikurube): Eliminate dynamic memory allocation caused by snprintf.
    786 void DeepHeapProfile::GlobalStats::SnapshotMaps(
    787     const MemoryResidenceInfoGetterInterface* memory_residence_info_getter,
    788     DeepHeapProfile* deep_profile,
    789     TextBuffer* mmap_dump_buffer) {
    790   MemoryRegionMap::LockHolder lock_holder;
    791   ProcMapsIterator::Buffer procmaps_iter_buffer;
    792   ProcMapsIterator procmaps_iter(0, &procmaps_iter_buffer);
    793   uint64 vma_start_addr, vma_last_addr, offset;
    794   int64 inode;
    795   char* flags;
    796   char* filename;
    797   enum MapsRegionType type;
    798   for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) {
    799     all_[i].Initialize();
    800     unhooked_[i].Initialize();
    801   }
    802   profiled_mmap_.Initialize();
    803 
    804   MemoryRegionMap::RegionIterator mmap_iter =
    805       MemoryRegionMap::BeginRegionLocked();
    806   DeepBucket* deep_bucket = NULL;
    807   if (mmap_iter != MemoryRegionMap::EndRegionLocked()) {
    808     deep_bucket = GetInformationOfMemoryRegion(
    809         mmap_iter, memory_residence_info_getter, deep_profile);
    810   }
    811 
    812   while (procmaps_iter.Next(&vma_start_addr, &vma_last_addr,
    813                             &flags, &offset, &inode, &filename)) {
    814     if (mmap_dump_buffer) {
    815       char buffer[1024];
    816       int written = procmaps_iter.FormatLine(buffer, sizeof(buffer),
    817                                              vma_start_addr, vma_last_addr,
    818                                              flags, offset, inode, filename, 0);
    819       mmap_dump_buffer->AppendString(buffer, 0);
    820     }
    821 
    822     // 'vma_last_addr' should be the last inclusive address of the region.
    823     vma_last_addr -= 1;
    824     if (strcmp("[vsyscall]", filename) == 0) {
    825       continue;  // Reading pagemap will fail in [vsyscall].
    826     }
    827 
    828     // TODO(dmikurube): |type| will be deprecated in the dump.
    829     // See http://crbug.com/245603.
    830     type = ABSENT;
    831     if (filename[0] == '/') {
    832       if (flags[2] == 'x')
    833         type = FILE_EXEC;
    834       else
    835         type = FILE_NONEXEC;
    836     } else if (filename[0] == '\0' || filename[0] == '\n') {
    837       type = ANONYMOUS;
    838     } else if (strcmp(filename, "[stack]") == 0) {
    839       type = STACK;
    840     } else {
    841       type = OTHER;
    842     }
    843     // TODO(dmikurube): This |all_| count should be removed in future soon.
    844     // See http://crbug.com/245603.
    845     uint64 vma_total = all_[type].Record(
    846         memory_residence_info_getter, vma_start_addr, vma_last_addr, NULL);
    847     uint64 vma_subtotal = 0;
    848 
    849     // TODO(dmikurube): Stop double-counting pagemap.
    850     // It will be fixed when http://crbug.com/245603 finishes.
    851     if (MemoryRegionMap::IsRecordingLocked()) {
    852       uint64 cursor = vma_start_addr;
    853       bool first = true;
    854 
    855       // Iterates over MemoryRegionMap until the iterator moves out of the VMA.
    856       do {
    857         if (!first) {
    858           cursor = mmap_iter->end_addr;
    859           ++mmap_iter;
    860           // Don't break here even if mmap_iter == EndRegionLocked().
    861 
    862           if (mmap_iter != MemoryRegionMap::EndRegionLocked()) {
    863             deep_bucket = GetInformationOfMemoryRegion(
    864                 mmap_iter, memory_residence_info_getter, deep_profile);
    865           }
    866         }
    867         first = false;
    868 
    869         uint64 last_address_of_unhooked;
    870         // If the next mmap entry is away from the current VMA.
    871         if (mmap_iter == MemoryRegionMap::EndRegionLocked() ||
    872             mmap_iter->start_addr > vma_last_addr) {
    873           last_address_of_unhooked = vma_last_addr;
    874         } else {
    875           last_address_of_unhooked = mmap_iter->start_addr - 1;
    876         }
    877 
    878         if (last_address_of_unhooked + 1 > cursor) {
    879           RAW_CHECK(cursor >= vma_start_addr,
    880                     "Wrong calculation for unhooked");
    881           RAW_CHECK(last_address_of_unhooked <= vma_last_addr,
    882                     "Wrong calculation for unhooked");
    883           uint64 committed_size = unhooked_[type].Record(
    884               memory_residence_info_getter,
    885               cursor,
    886               last_address_of_unhooked,
    887               mmap_dump_buffer);
    888           vma_subtotal += committed_size;
    889           if (mmap_dump_buffer) {
    890             mmap_dump_buffer->AppendString("  ", 0);
    891             mmap_dump_buffer->AppendPtr(cursor, 0);
    892             mmap_dump_buffer->AppendString(" - ", 0);
    893             mmap_dump_buffer->AppendPtr(last_address_of_unhooked + 1, 0);
    894             mmap_dump_buffer->AppendString("  unhooked ", 0);
    895             mmap_dump_buffer->AppendInt64(committed_size, 0);
    896             mmap_dump_buffer->AppendString(" / ", 0);
    897             mmap_dump_buffer->AppendInt64(
    898                 last_address_of_unhooked - cursor + 1, 0);
    899             mmap_dump_buffer->AppendString("\n", 0);
    900           }
    901           cursor = last_address_of_unhooked + 1;
    902         }
    903 
    904         if (mmap_iter != MemoryRegionMap::EndRegionLocked() &&
    905             mmap_iter->start_addr <= vma_last_addr &&
    906             mmap_dump_buffer) {
    907           bool trailing = mmap_iter->start_addr < vma_start_addr;
    908           bool continued = mmap_iter->end_addr - 1 > vma_last_addr;
    909           uint64 partial_first_address, partial_last_address;
    910           if (trailing)
    911             partial_first_address = vma_start_addr;
    912           else
    913             partial_first_address = mmap_iter->start_addr;
    914           if (continued)
    915             partial_last_address = vma_last_addr;
    916           else
    917             partial_last_address = mmap_iter->end_addr - 1;
    918           uint64 committed_size = memory_residence_info_getter->CommittedSize(
    919               partial_first_address, partial_last_address, mmap_dump_buffer);
    920           vma_subtotal += committed_size;
    921           mmap_dump_buffer->AppendString(trailing ? " (" : "  ", 0);
    922           mmap_dump_buffer->AppendPtr(mmap_iter->start_addr, 0);
    923           mmap_dump_buffer->AppendString(trailing ? ")" : " ", 0);
    924           mmap_dump_buffer->AppendString("-", 0);
    925           mmap_dump_buffer->AppendString(continued ? "(" : " ", 0);
    926           mmap_dump_buffer->AppendPtr(mmap_iter->end_addr, 0);
    927           mmap_dump_buffer->AppendString(continued ? ")" : " ", 0);
    928           mmap_dump_buffer->AppendString(" hooked ", 0);
    929           mmap_dump_buffer->AppendInt64(committed_size, 0);
    930           mmap_dump_buffer->AppendString(" / ", 0);
    931           mmap_dump_buffer->AppendInt64(
    932               partial_last_address - partial_first_address + 1, 0);
    933           mmap_dump_buffer->AppendString(" @ ", 0);
    934           if (deep_bucket != NULL) {
    935             mmap_dump_buffer->AppendInt(deep_bucket->id, 0, false);
    936           } else {
    937             mmap_dump_buffer->AppendInt(0, 0, false);
    938           }
    939           mmap_dump_buffer->AppendString("\n", 0);
    940         }
    941       } while (mmap_iter != MemoryRegionMap::EndRegionLocked() &&
    942                mmap_iter->end_addr - 1 <= vma_last_addr);
    943     }
    944 
    945     if (vma_total != vma_subtotal) {
    946       char buffer[1024];
    947       int written = procmaps_iter.FormatLine(buffer, sizeof(buffer),
    948                                              vma_start_addr, vma_last_addr,
    949                                              flags, offset, inode, filename, 0);
    950       RAW_LOG(0, "[%d] Mismatched total in VMA %" PRId64 ":"
    951               "%" PRId64 " (%" PRId64 ")",
    952               getpid(), vma_total, vma_subtotal, vma_total - vma_subtotal);
    953       RAW_LOG(0, "[%d]   in %s", getpid(), buffer);
    954     }
    955   }
    956 
    957   // TODO(dmikurube): Investigate and fix http://crbug.com/189114.
    958   //
    959   // The total committed memory usage in all_ (from /proc/<pid>/maps) is
    960   // sometimes smaller than the sum of the committed mmap'ed addresses and
    961   // unhooked regions.  Within our observation, the difference was only 4KB
    962   // in committed usage, zero in reserved virtual addresses
    963   //
    964   // A guess is that an uncommitted (but reserved) page may become committed
    965   // during counting memory usage in the loop above.
    966   //
    967   // The difference is accounted as "ABSENT" to investigate such cases.
    968   //
    969   // It will be fixed when http://crbug.com/245603 finishes (no double count).
    970 
    971   RegionStats all_total;
    972   RegionStats unhooked_total;
    973   for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) {
    974     all_total.AddAnotherRegionStat(all_[i]);
    975     unhooked_total.AddAnotherRegionStat(unhooked_[i]);
    976   }
    977 
    978   size_t absent_virtual = profiled_mmap_.virtual_bytes() +
    979                           unhooked_total.virtual_bytes() -
    980                           all_total.virtual_bytes();
    981   if (absent_virtual > 0)
    982     all_[ABSENT].AddToVirtualBytes(absent_virtual);
    983 
    984   size_t absent_committed = profiled_mmap_.committed_bytes() +
    985                             unhooked_total.committed_bytes() -
    986                             all_total.committed_bytes();
    987   if (absent_committed > 0)
    988     all_[ABSENT].AddToCommittedBytes(absent_committed);
    989 }
    990 
    991 void DeepHeapProfile::GlobalStats::SnapshotAllocations(
    992     DeepHeapProfile* deep_profile) {
    993   profiled_malloc_.Initialize();
    994 
    995   deep_profile->heap_profile_->address_map_->Iterate(RecordAlloc, deep_profile);
    996 }
    997 
    998 void DeepHeapProfile::GlobalStats::Unparse(TextBuffer* buffer) {
    999   RegionStats all_total;
   1000   RegionStats unhooked_total;
   1001   for (int i = 0; i < NUMBER_OF_MAPS_REGION_TYPES; ++i) {
   1002     all_total.AddAnotherRegionStat(all_[i]);
   1003     unhooked_total.AddAnotherRegionStat(unhooked_[i]);
   1004   }
   1005 
   1006   // "# total (%lu) %c= profiled-mmap (%lu) + nonprofiled-* (%lu)\n"
   1007   buffer->AppendString("# total (", 0);
   1008   buffer->AppendUnsignedLong(all_total.committed_bytes(), 0);
   1009   buffer->AppendString(") ", 0);
   1010   buffer->AppendChar(all_total.committed_bytes() ==
   1011                      profiled_mmap_.committed_bytes() +
   1012                      unhooked_total.committed_bytes() ? '=' : '!');
   1013   buffer->AppendString("= profiled-mmap (", 0);
   1014   buffer->AppendUnsignedLong(profiled_mmap_.committed_bytes(), 0);
   1015   buffer->AppendString(") + nonprofiled-* (", 0);
   1016   buffer->AppendUnsignedLong(unhooked_total.committed_bytes(), 0);
   1017   buffer->AppendString(")\n", 0);
   1018 
   1019   // "                               virtual    committed"
   1020   buffer->AppendString("", 26);
   1021   buffer->AppendString(kVirtualLabel, 12);
   1022   buffer->AppendChar(' ');
   1023   buffer->AppendString(kCommittedLabel, 12);
   1024   buffer->AppendString("\n", 0);
   1025 
   1026   all_total.Unparse("total", buffer);
   1027   all_[ABSENT].Unparse("absent", buffer);
   1028   all_[FILE_EXEC].Unparse("file-exec", buffer);
   1029   all_[FILE_NONEXEC].Unparse("file-nonexec", buffer);
   1030   all_[ANONYMOUS].Unparse("anonymous", buffer);
   1031   all_[STACK].Unparse("stack", buffer);
   1032   all_[OTHER].Unparse("other", buffer);
   1033   unhooked_total.Unparse("nonprofiled-total", buffer);
   1034   unhooked_[ABSENT].Unparse("nonprofiled-absent", buffer);
   1035   unhooked_[ANONYMOUS].Unparse("nonprofiled-anonymous", buffer);
   1036   unhooked_[FILE_EXEC].Unparse("nonprofiled-file-exec", buffer);
   1037   unhooked_[FILE_NONEXEC].Unparse("nonprofiled-file-nonexec", buffer);
   1038   unhooked_[STACK].Unparse("nonprofiled-stack", buffer);
   1039   unhooked_[OTHER].Unparse("nonprofiled-other", buffer);
   1040   profiled_mmap_.Unparse("profiled-mmap", buffer);
   1041   profiled_malloc_.Unparse("profiled-malloc", buffer);
   1042 }
   1043 
   1044 // static
   1045 void DeepHeapProfile::GlobalStats::RecordAlloc(const void* pointer,
   1046                                                AllocValue* alloc_value,
   1047                                                DeepHeapProfile* deep_profile) {
   1048   uint64 address = reinterpret_cast<uintptr_t>(pointer);
   1049   size_t committed = deep_profile->memory_residence_info_getter_->CommittedSize(
   1050       address, address + alloc_value->bytes - 1, NULL);
   1051 
   1052   DeepBucket* deep_bucket = deep_profile->deep_table_.Lookup(
   1053       alloc_value->bucket(),
   1054 #if defined(TYPE_PROFILING)
   1055       LookupType(pointer),
   1056 #endif
   1057       /* is_mmap */ false);
   1058   deep_bucket->committed_size += committed;
   1059   deep_profile->stats_.profiled_malloc_.AddToVirtualBytes(alloc_value->bytes);
   1060   deep_profile->stats_.profiled_malloc_.AddToCommittedBytes(committed);
   1061 }
   1062 
   1063 DeepHeapProfile::DeepBucket*
   1064     DeepHeapProfile::GlobalStats::GetInformationOfMemoryRegion(
   1065         const MemoryRegionMap::RegionIterator& mmap_iter,
   1066         const MemoryResidenceInfoGetterInterface* memory_residence_info_getter,
   1067         DeepHeapProfile* deep_profile) {
   1068   size_t committed = deep_profile->memory_residence_info_getter_->
   1069       CommittedSize(mmap_iter->start_addr, mmap_iter->end_addr - 1, NULL);
   1070 
   1071   // TODO(dmikurube): Store a reference to the bucket in region.
   1072   Bucket* bucket = MemoryRegionMap::GetBucket(
   1073       mmap_iter->call_stack_depth, mmap_iter->call_stack);
   1074   DeepBucket* deep_bucket = NULL;
   1075   if (bucket != NULL) {
   1076     deep_bucket = deep_profile->deep_table_.Lookup(
   1077         bucket,
   1078 #if defined(TYPE_PROFILING)
   1079         NULL,  // No type information for memory regions by mmap.
   1080 #endif
   1081         /* is_mmap */ true);
   1082     if (deep_bucket != NULL)
   1083       deep_bucket->committed_size += committed;
   1084   }
   1085 
   1086   profiled_mmap_.AddToVirtualBytes(
   1087       mmap_iter->end_addr - mmap_iter->start_addr);
   1088   profiled_mmap_.AddToCommittedBytes(committed);
   1089 
   1090   return deep_bucket;
   1091 }
   1092 
   1093 // static
   1094 void DeepHeapProfile::WriteProcMaps(const char* prefix,
   1095                                     char raw_buffer[],
   1096                                     int buffer_size) {
   1097   char filename[100];
   1098   snprintf(filename, sizeof(filename),
   1099            "%s.%05d.maps", prefix, static_cast<int>(getpid()));
   1100 
   1101   RawFD fd = RawOpenForWriting(filename);
   1102   RAW_DCHECK(fd != kIllegalRawFD, "");
   1103 
   1104   int length;
   1105   bool wrote_all;
   1106   length = tcmalloc::FillProcSelfMaps(raw_buffer, buffer_size, &wrote_all);
   1107   RAW_DCHECK(wrote_all, "");
   1108   RAW_DCHECK(length <= buffer_size, "");
   1109   RawWrite(fd, raw_buffer, length);
   1110   RawClose(fd);
   1111 }
   1112 #else  // USE_DEEP_HEAP_PROFILE
   1113 
   1114 DeepHeapProfile::DeepHeapProfile(HeapProfileTable* heap_profile,
   1115                                  const char* prefix)
   1116     : heap_profile_(heap_profile) {
   1117 }
   1118 
   1119 DeepHeapProfile::~DeepHeapProfile() {
   1120 }
   1121 
   1122 int DeepHeapProfile::DumpOrderedProfile(const char* reason,
   1123                                         char raw_buffer[],
   1124                                         int buffer_size,
   1125                                         RawFD fd) {
   1126 }
   1127 
   1128 #endif  // USE_DEEP_HEAP_PROFILE
   1129