Home | History | Annotate | Download | only in src
      1 // Copyright (c) 2006, Google Inc.
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 //     * Redistributions of source code must retain the above copyright
      9 // notice, this list of conditions and the following disclaimer.
     10 //     * Redistributions in binary form must reproduce the above
     11 // copyright notice, this list of conditions and the following disclaimer
     12 // in the documentation and/or other materials provided with the
     13 // distribution.
     14 //     * Neither the name of Google Inc. nor the names of its
     15 // contributors may be used to endorse or promote products derived from
     16 // this software without specific prior written permission.
     17 //
     18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29 
     30 // ---
     31 // Author: Sanjay Ghemawat
     32 //         Maxim Lifantsev (refactoring)
     33 //
     34 
     35 #include <config.h>
     36 
     37 #ifdef HAVE_UNISTD_H
     38 #include <unistd.h>   // for write()
     39 #endif
     40 #include <fcntl.h>    // for open()
     41 #ifdef HAVE_GLOB_H
     42 #include <glob.h>
     43 #ifndef GLOB_NOMATCH  // true on some old cygwins
     44 # define GLOB_NOMATCH 0
     45 #endif
     46 #endif
     47 #ifdef HAVE_INTTYPES_H
     48 #include <inttypes.h> // for PRIxPTR
     49 #endif
     50 #ifdef HAVE_POLL_H
     51 #include <poll.h>
     52 #endif
     53 #include <errno.h>
     54 #include <stdarg.h>
     55 #include <string>
     56 #include <map>
     57 #include <algorithm>  // for sort(), equal(), and copy()
     58 
     59 #include "heap-profile-table.h"
     60 
     61 #include "base/logging.h"
     62 #include "raw_printer.h"
     63 #include "symbolize.h"
     64 #include <gperftools/stacktrace.h>
     65 #include <gperftools/malloc_hook.h>
     66 #include "memory_region_map.h"
     67 #include "base/commandlineflags.h"
     68 #include "base/logging.h"    // for the RawFD I/O commands
     69 #include "base/sysinfo.h"
     70 
     71 using std::sort;
     72 using std::equal;
     73 using std::copy;
     74 using std::string;
     75 using std::map;
     76 
     77 using tcmalloc::FillProcSelfMaps;   // from sysinfo.h
     78 using tcmalloc::DumpProcSelfMaps;   // from sysinfo.h
     79 
     80 //----------------------------------------------------------------------
     81 
     82 DEFINE_bool(cleanup_old_heap_profiles,
     83             EnvToBool("HEAP_PROFILE_CLEANUP", true),
     84             "At initialization time, delete old heap profiles.");
     85 
     86 DEFINE_int32(heap_check_max_leaks,
     87              EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
     88              "The maximum number of leak reports to print.");
     89 
     90 //----------------------------------------------------------------------
     91 
     92 // header of the dumped heap profile
     93 static const char kProfileHeader[] = "heap profile: ";
     94 static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
     95 #if defined(TYPE_PROFILING)
     96 static const char kTypeProfileStatsHeader[] = "type statistics:\n";
     97 #endif  // defined(TYPE_PROFILING)
     98 
     99 //----------------------------------------------------------------------
    100 
    101 const char HeapProfileTable::kFileExt[] = ".heap";
    102 
    103 //----------------------------------------------------------------------
    104 
    105 static const int kHashTableSize = 179999;   // Size for bucket_table_.
    106 /*static*/ const int HeapProfileTable::kMaxStackDepth;
    107 
    108 //----------------------------------------------------------------------
    109 
    110 // We strip out different number of stack frames in debug mode
    111 // because less inlining happens in that case
    112 #ifdef NDEBUG
    113 static const int kStripFrames = 2;
    114 #else
    115 static const int kStripFrames = 3;
    116 #endif
    117 
    118 // For sorting Stats or Buckets by in-use space
    119 static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
    120                              HeapProfileTable::Stats* b) {
    121   // Return true iff "a" has more allocated space than "b"
    122   return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
    123 }
    124 
    125 //----------------------------------------------------------------------
    126 
    127 HeapProfileTable::HeapProfileTable(Allocator alloc,
    128                                    DeAllocator dealloc,
    129                                    bool profile_mmap)
    130     : alloc_(alloc),
    131       dealloc_(dealloc),
    132       bucket_table_(NULL),
    133       profile_mmap_(profile_mmap),
    134       num_buckets_(0),
    135       address_map_(NULL) {
    136   // Make a hash table for buckets.
    137   const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
    138   bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
    139   memset(bucket_table_, 0, table_bytes);
    140 
    141   // Make an allocation map.
    142   address_map_ =
    143       new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
    144 
    145   // Initialize.
    146   memset(&total_, 0, sizeof(total_));
    147   num_buckets_ = 0;
    148 }
    149 
    150 HeapProfileTable::~HeapProfileTable() {
    151   // Free the allocation map.
    152   address_map_->~AllocationMap();
    153   dealloc_(address_map_);
    154   address_map_ = NULL;
    155 
    156   // Free the hash table.
    157   for (int i = 0; i < kHashTableSize; i++) {
    158     for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) {
    159       Bucket* bucket = curr;
    160       curr = curr->next;
    161       dealloc_(bucket->stack);
    162       dealloc_(bucket);
    163     }
    164   }
    165   dealloc_(bucket_table_);
    166   bucket_table_ = NULL;
    167 }
    168 
    169 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
    170                                                       const void* const key[]) {
    171   // Make hash-value
    172   uintptr_t h = 0;
    173   for (int i = 0; i < depth; i++) {
    174     h += reinterpret_cast<uintptr_t>(key[i]);
    175     h += h << 10;
    176     h ^= h >> 6;
    177   }
    178   h += h << 3;
    179   h ^= h >> 11;
    180 
    181   // Lookup stack trace in table
    182   unsigned int buck = ((unsigned int) h) % kHashTableSize;
    183   for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) {
    184     if ((b->hash == h) &&
    185         (b->depth == depth) &&
    186         equal(key, key + depth, b->stack)) {
    187       return b;
    188     }
    189   }
    190 
    191   // Create new bucket
    192   const size_t key_size = sizeof(key[0]) * depth;
    193   const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
    194   copy(key, key + depth, kcopy);
    195   Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
    196   memset(b, 0, sizeof(*b));
    197   b->hash  = h;
    198   b->depth = depth;
    199   b->stack = kcopy;
    200   b->next  = bucket_table_[buck];
    201   bucket_table_[buck] = b;
    202   num_buckets_++;
    203   return b;
    204 }
    205 
    206 int HeapProfileTable::GetCallerStackTrace(
    207     int skip_count, void* stack[kMaxStackDepth]) {
    208   return MallocHook::GetCallerStackTrace(
    209       stack, kMaxStackDepth, kStripFrames + skip_count + 1);
    210 }
    211 
    212 void HeapProfileTable::RecordAlloc(
    213     const void* ptr, size_t bytes, int stack_depth,
    214     const void* const call_stack[]) {
    215   Bucket* b = GetBucket(stack_depth, call_stack);
    216   b->allocs++;
    217   b->alloc_size += bytes;
    218   total_.allocs++;
    219   total_.alloc_size += bytes;
    220 
    221   AllocValue v;
    222   v.set_bucket(b);  // also did set_live(false); set_ignore(false)
    223   v.bytes = bytes;
    224   address_map_->Insert(ptr, v);
    225 }
    226 
    227 void HeapProfileTable::RecordFree(const void* ptr) {
    228   AllocValue v;
    229   if (address_map_->FindAndRemove(ptr, &v)) {
    230     Bucket* b = v.bucket();
    231     b->frees++;
    232     b->free_size += v.bytes;
    233     total_.frees++;
    234     total_.free_size += v.bytes;
    235   }
    236 }
    237 
    238 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
    239   const AllocValue* alloc_value = address_map_->Find(ptr);
    240   if (alloc_value != NULL) *object_size = alloc_value->bytes;
    241   return alloc_value != NULL;
    242 }
    243 
    244 bool HeapProfileTable::FindAllocDetails(const void* ptr,
    245                                         AllocInfo* info) const {
    246   const AllocValue* alloc_value = address_map_->Find(ptr);
    247   if (alloc_value != NULL) {
    248     info->object_size = alloc_value->bytes;
    249     info->call_stack = alloc_value->bucket()->stack;
    250     info->stack_depth = alloc_value->bucket()->depth;
    251   }
    252   return alloc_value != NULL;
    253 }
    254 
    255 bool HeapProfileTable::FindInsideAlloc(const void* ptr,
    256                                        size_t max_size,
    257                                        const void** object_ptr,
    258                                        size_t* object_size) const {
    259   const AllocValue* alloc_value =
    260     address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
    261   if (alloc_value != NULL) *object_size = alloc_value->bytes;
    262   return alloc_value != NULL;
    263 }
    264 
    265 bool HeapProfileTable::MarkAsLive(const void* ptr) {
    266   AllocValue* alloc = address_map_->FindMutable(ptr);
    267   if (alloc && !alloc->live()) {
    268     alloc->set_live(true);
    269     return true;
    270   }
    271   return false;
    272 }
    273 
    274 void HeapProfileTable::MarkAsIgnored(const void* ptr) {
    275   AllocValue* alloc = address_map_->FindMutable(ptr);
    276   if (alloc) {
    277     alloc->set_ignore(true);
    278   }
    279 }
    280 
    281 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f,
    282                                                   void* data) {
    283   const AllocationAddressIteratorArgs args(f, data);
    284   address_map_->Iterate<const AllocationAddressIteratorArgs&>(
    285       AllocationAddressesIterator, args);
    286 }
    287 
    288 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) {
    289   const MarkArgs args(mark, true);
    290   address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
    291 }
    292 
    293 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) {
    294   const MarkArgs args(mark, true);
    295   address_map_->Iterate<const MarkArgs&>(MarkIterator, args);
    296 }
    297 
    298 // We'd be happier using snprintfer, but we don't to reduce dependencies.
    299 int HeapProfileTable::UnparseBucket(const Bucket& b,
    300                                     char* buf, int buflen, int bufsize,
    301                                     const char* extra,
    302                                     Stats* profile_stats) {
    303   if (profile_stats != NULL) {
    304     profile_stats->allocs += b.allocs;
    305     profile_stats->alloc_size += b.alloc_size;
    306     profile_stats->frees += b.frees;
    307     profile_stats->free_size += b.free_size;
    308   }
    309   int printed =
    310     snprintf(buf + buflen, bufsize - buflen,
    311              "%6d: %8" PRId64 " [%6d: %8" PRId64 "] @%s",
    312              b.allocs - b.frees,
    313              b.alloc_size - b.free_size,
    314              b.allocs,
    315              b.alloc_size,
    316              extra);
    317   // If it looks like the snprintf failed, ignore the fact we printed anything
    318   if (printed < 0 || printed >= bufsize - buflen) return buflen;
    319   buflen += printed;
    320   for (int d = 0; d < b.depth; d++) {
    321     printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
    322                        reinterpret_cast<uintptr_t>(b.stack[d]));
    323     if (printed < 0 || printed >= bufsize - buflen) return buflen;
    324     buflen += printed;
    325   }
    326   printed = snprintf(buf + buflen, bufsize - buflen, "\n");
    327   if (printed < 0 || printed >= bufsize - buflen) return buflen;
    328   buflen += printed;
    329   return buflen;
    330 }
    331 
    332 HeapProfileTable::Bucket**
    333 HeapProfileTable::MakeSortedBucketList() const {
    334   Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_));
    335 
    336   int bucket_count = 0;
    337   for (int i = 0; i < kHashTableSize; i++) {
    338     for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) {
    339       list[bucket_count++] = curr;
    340     }
    341   }
    342   RAW_DCHECK(bucket_count == num_buckets_, "");
    343 
    344   sort(list, list + num_buckets_, ByAllocatedSpace);
    345 
    346   return list;
    347 }
    348 
    349 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark,
    350                                          const char* file_name) {
    351   RawFD fd = RawOpenForWriting(file_name);
    352   if (fd == kIllegalRawFD) {
    353     RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name);
    354     return;
    355   }
    356   const DumpMarkedArgs args(fd, mark);
    357   address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args);
    358   RawClose(fd);
    359 }
    360 
    361 #if defined(TYPE_PROFILING)
    362 void HeapProfileTable::DumpTypeStatistics(const char* file_name) const {
    363   RawFD fd = RawOpenForWriting(file_name);
    364   if (fd == kIllegalRawFD) {
    365     RAW_LOG(ERROR, "Failed dumping type statistics to %s", file_name);
    366     return;
    367   }
    368 
    369   AddressMap<TypeCount>* type_size_map;
    370   type_size_map = new(alloc_(sizeof(AddressMap<TypeCount>)))
    371       AddressMap<TypeCount>(alloc_, dealloc_);
    372   address_map_->Iterate(TallyTypesItererator, type_size_map);
    373 
    374   RawWrite(fd, kTypeProfileStatsHeader, strlen(kTypeProfileStatsHeader));
    375   const DumpArgs args(fd, NULL);
    376   type_size_map->Iterate<const DumpArgs&>(DumpTypesIterator, args);
    377   RawClose(fd);
    378 
    379   type_size_map->~AddressMap<TypeCount>();
    380   dealloc_(type_size_map);
    381 }
    382 #endif  // defined(TYPE_PROFILING)
    383 
    384 void HeapProfileTable::IterateOrderedAllocContexts(
    385     AllocContextIterator callback) const {
    386   Bucket** list = MakeSortedBucketList();
    387   AllocContextInfo info;
    388   for (int i = 0; i < num_buckets_; ++i) {
    389     *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
    390     info.stack_depth = list[i]->depth;
    391     info.call_stack = list[i]->stack;
    392     callback(info);
    393   }
    394   dealloc_(list);
    395 }
    396 
    397 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
    398   Bucket** list = MakeSortedBucketList();
    399 
    400   // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
    401   // In the cases buf is too small, we'd rather leave out the last
    402   // buckets than leave out the /proc/self/maps info.  To ensure that,
    403   // we actually print the /proc/self/maps info first, then move it to
    404   // the end of the buffer, then write the bucket info into whatever
    405   // is remaining, and then move the maps info one last time to close
    406   // any gaps.  Whew!
    407   int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
    408   if (map_length < 0 || map_length >= size) return 0;
    409   bool dummy;   // "wrote_all" -- did /proc/self/maps fit in its entirety?
    410   map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
    411   RAW_DCHECK(map_length <= size, "");
    412   char* const map_start = buf + size - map_length;      // move to end
    413   memmove(map_start, buf, map_length);
    414   size -= map_length;
    415 
    416   Stats stats;
    417   memset(&stats, 0, sizeof(stats));
    418   int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
    419   if (bucket_length < 0 || bucket_length >= size) return 0;
    420   bucket_length = UnparseBucket(total_, buf, bucket_length, size,
    421                                 " heapprofile", &stats);
    422 
    423   // Dump the mmap list first.
    424   if (profile_mmap_) {
    425     BufferArgs buffer(buf, bucket_length, size);
    426     MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer);
    427     bucket_length = buffer.buflen;
    428   }
    429 
    430   for (int i = 0; i < num_buckets_; i++) {
    431     bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
    432                                   &stats);
    433   }
    434   RAW_DCHECK(bucket_length < size, "");
    435 
    436   dealloc_(list);
    437 
    438   RAW_DCHECK(buf + bucket_length <= map_start, "");
    439   memmove(buf + bucket_length, map_start, map_length);  // close the gap
    440 
    441   return bucket_length + map_length;
    442 }
    443 
    444 // static
    445 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket,
    446                                           BufferArgs* args) {
    447   args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize,
    448                                "", NULL);
    449 }
    450 
    451 #if defined(TYPE_PROFILING)
    452 // static
    453 void HeapProfileTable::TallyTypesItererator(
    454     const void* ptr,
    455     AllocValue* value,
    456     AddressMap<TypeCount>* type_size_map) {
    457   const std::type_info* type = LookupType(ptr);
    458 
    459   const void* key = NULL;
    460   if (type)
    461     key = type->name();
    462 
    463   TypeCount* count = type_size_map->FindMutable(key);
    464   if (count) {
    465     count->bytes += value->bytes;
    466     ++count->objects;
    467   } else {
    468     type_size_map->Insert(key, TypeCount(value->bytes, 1));
    469   }
    470 }
    471 
    472 // static
    473 void HeapProfileTable::DumpTypesIterator(const void* ptr,
    474                                          TypeCount* count,
    475                                          const DumpArgs& args) {
    476   char buf[1024];
    477   int len;
    478   const char* mangled_type_name = static_cast<const char*>(ptr);
    479   len = snprintf(buf, sizeof(buf), "%6d: %8" PRId64 " @ %s\n",
    480                  count->objects, count->bytes,
    481                  mangled_type_name ? mangled_type_name : "(no_typeinfo)");
    482   RawWrite(args.fd, buf, len);
    483 }
    484 #endif  // defined(TYPE_PROFILING)
    485 
    486 inline
    487 void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
    488                                            const DumpArgs& args) {
    489   if (v->live()) {
    490     v->set_live(false);
    491     return;
    492   }
    493   if (v->ignore()) {
    494     return;
    495   }
    496   Bucket b;
    497   memset(&b, 0, sizeof(b));
    498   b.allocs = 1;
    499   b.alloc_size = v->bytes;
    500   b.depth = v->bucket()->depth;
    501   b.stack = v->bucket()->stack;
    502   char buf[1024];
    503   int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
    504   RawWrite(args.fd, buf, len);
    505 }
    506 
    507 inline
    508 void HeapProfileTable::DumpMarkedIterator(const void* ptr, AllocValue* v,
    509                                           const DumpMarkedArgs& args) {
    510   if (v->mark() != args.mark)
    511     return;
    512   Bucket b;
    513   memset(&b, 0, sizeof(b));
    514   b.allocs = 1;
    515   b.alloc_size = v->bytes;
    516   b.depth = v->bucket()->depth;
    517   b.stack = v->bucket()->stack;
    518   char addr[16];
    519   snprintf(addr, 16, "0x%08" PRIxPTR, ptr);
    520   char buf[1024];
    521   int len = UnparseBucket(b, buf, 0, sizeof(buf), addr, NULL);
    522   RawWrite(args.fd, buf, len);
    523 }
    524 
    525 inline
    526 void HeapProfileTable::AllocationAddressesIterator(
    527     const void* ptr,
    528     AllocValue* v,
    529     const AllocationAddressIteratorArgs& args) {
    530   args.callback(args.data, ptr);
    531 }
    532 
    533 inline
    534 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v,
    535                                     const MarkArgs& args) {
    536   if (!args.mark_all && v->mark() != UNMARKED)
    537     return;
    538   v->set_mark(args.mark);
    539 }
    540 
    541 // Callback from NonLiveSnapshot; adds entry to arg->dest
    542 // if not the entry is not live and is not present in arg->base.
    543 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
    544                                     AddNonLiveArgs* arg) {
    545   if (v->live()) {
    546     v->set_live(false);
    547   } else {
    548     if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
    549       // Present in arg->base, so do not save
    550     } else {
    551       arg->dest->Add(ptr, *v);
    552     }
    553   }
    554 }
    555 
    556 bool HeapProfileTable::WriteProfile(const char* file_name,
    557                                     const Bucket& total,
    558                                     AllocationMap* allocations) {
    559   RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
    560   RawFD fd = RawOpenForWriting(file_name);
    561   if (fd == kIllegalRawFD) {
    562     RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
    563     return false;
    564   }
    565   RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
    566   char buf[512];
    567   int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
    568                           NULL);
    569   RawWrite(fd, buf, len);
    570   const DumpArgs args(fd, NULL);
    571   allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
    572   RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
    573   DumpProcSelfMaps(fd);
    574   RawClose(fd);
    575   return true;
    576 }
    577 
    578 void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
    579   if (!FLAGS_cleanup_old_heap_profiles)
    580     return;
    581   char buf[1000];
    582   snprintf(buf, 1000,"%s.%05d.", prefix, getpid());
    583   string pattern = string(buf) + ".*" + kFileExt;
    584 
    585 #if defined(HAVE_GLOB_H)
    586   glob_t g;
    587   const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
    588   if (r == 0 || r == GLOB_NOMATCH) {
    589     const int prefix_length = strlen(prefix);
    590     for (int i = 0; i < g.gl_pathc; i++) {
    591       const char* fname = g.gl_pathv[i];
    592       if ((strlen(fname) >= prefix_length) &&
    593           (memcmp(fname, prefix, prefix_length) == 0)) {
    594         RAW_VLOG(1, "Removing old heap profile %s", fname);
    595         unlink(fname);
    596       }
    597     }
    598   }
    599   globfree(&g);
    600 #else   /* HAVE_GLOB_H */
    601   RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
    602 #endif
    603 }
    604 
    605 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
    606   Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
    607   address_map_->Iterate(AddToSnapshot, s);
    608   return s;
    609 }
    610 
    611 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
    612   s->~Snapshot();
    613   dealloc_(s);
    614 }
    615 
    616 // Callback from TakeSnapshot; adds a single entry to snapshot
    617 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
    618                                      Snapshot* snapshot) {
    619   snapshot->Add(ptr, *v);
    620 }
    621 
    622 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
    623     Snapshot* base) {
    624   RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
    625            int(total_.allocs - total_.frees),
    626            int(total_.alloc_size - total_.free_size));
    627 
    628   Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
    629   AddNonLiveArgs args;
    630   args.dest = s;
    631   args.base = base;
    632   address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
    633   RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
    634            int(s->total_.allocs - s->total_.frees),
    635            int(s->total_.alloc_size - s->total_.free_size));
    636   return s;
    637 }
    638 
    639 // Information kept per unique bucket seen
    640 struct HeapProfileTable::Snapshot::Entry {
    641   int count;
    642   int bytes;
    643   Bucket* bucket;
    644   Entry() : count(0), bytes(0) { }
    645 
    646   // Order by decreasing bytes
    647   bool operator<(const Entry& x) const {
    648     return this->bytes > x.bytes;
    649   }
    650 };
    651 
    652 // State used to generate leak report.  We keep a mapping from Bucket pointer
    653 // the collected stats for that bucket.
    654 struct HeapProfileTable::Snapshot::ReportState {
    655   map<Bucket*, Entry> buckets_;
    656 };
    657 
    658 // Callback from ReportLeaks; updates ReportState.
    659 void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
    660                                                 AllocValue* v,
    661                                                 ReportState* state) {
    662   Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time
    663   e->bucket = v->bucket();
    664   e->count++;
    665   e->bytes += v->bytes;
    666 }
    667 
    668 void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
    669                                              const char* filename,
    670                                              bool should_symbolize) {
    671   // This is only used by the heap leak checker, but is intimately
    672   // tied to the allocation map that belongs in this module and is
    673   // therefore placed here.
    674   RAW_LOG(ERROR, "Leak check %s detected leaks of %" PRIuS " bytes "
    675           "in %" PRIuS " objects",
    676           checker_name,
    677           size_t(total_.alloc_size),
    678           size_t(total_.allocs));
    679 
    680   // Group objects by Bucket
    681   ReportState state;
    682   map_.Iterate(&ReportCallback, &state);
    683 
    684   // Sort buckets by decreasing leaked size
    685   const int n = state.buckets_.size();
    686   Entry* entries = new Entry[n];
    687   int dst = 0;
    688   for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
    689        iter != state.buckets_.end();
    690        ++iter) {
    691     entries[dst++] = iter->second;
    692   }
    693   sort(entries, entries + n);
    694 
    695   // Report a bounded number of leaks to keep the leak report from
    696   // growing too long.
    697   const int to_report =
    698       (FLAGS_heap_check_max_leaks > 0 &&
    699        n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
    700   RAW_LOG(ERROR, "The %d largest leaks:", to_report);
    701 
    702   // Print
    703   SymbolTable symbolization_table;
    704   for (int i = 0; i < to_report; i++) {
    705     const Entry& e = entries[i];
    706     for (int j = 0; j < e.bucket->depth; j++) {
    707       symbolization_table.Add(e.bucket->stack[j]);
    708     }
    709   }
    710   static const int kBufSize = 2<<10;
    711   char buffer[kBufSize];
    712   if (should_symbolize)
    713     symbolization_table.Symbolize();
    714   for (int i = 0; i < to_report; i++) {
    715     const Entry& e = entries[i];
    716     base::RawPrinter printer(buffer, kBufSize);
    717     printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
    718                    e.bytes, e.count);
    719     for (int j = 0; j < e.bucket->depth; j++) {
    720       const void* pc = e.bucket->stack[j];
    721       printer.Printf("\t@ %" PRIxPTR " %s\n",
    722           reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
    723     }
    724     RAW_LOG(ERROR, "%s", buffer);
    725   }
    726 
    727   if (to_report < n) {
    728     RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
    729             to_report, n-1);
    730   }
    731   delete[] entries;
    732 
    733   // TODO: Dump the sorted Entry list instead of dumping raw data?
    734   // (should be much shorter)
    735   if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
    736     RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
    737   }
    738 }
    739 
    740 void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
    741                                               AllocValue* v,
    742                                               char* unused) {
    743   // Perhaps also log the allocation stack trace (unsymbolized)
    744   // on this line in case somebody finds it useful.
    745   RAW_LOG(ERROR, "leaked %" PRIuS " byte object %p", v->bytes, ptr);
    746 }
    747 
    748 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
    749   char unused;
    750   map_.Iterate(ReportObject, &unused);
    751 }
    752