Home | History | Annotate | Download | only in src
      1 // Copyright (c) 2005, Google Inc.
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 //     * Redistributions of source code must retain the above copyright
      9 // notice, this list of conditions and the following disclaimer.
     10 //     * Redistributions in binary form must reproduce the above
     11 // copyright notice, this list of conditions and the following disclaimer
     12 // in the documentation and/or other materials provided with the
     13 // distribution.
     14 //     * Neither the name of Google Inc. nor the names of its
     15 // contributors may be used to endorse or promote products derived from
     16 // this software without specific prior written permission.
     17 //
     18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29 
     30 // ---
     31 // Author: Sanjay Ghemawat
     32 //
     33 // TODO: Log large allocations
     34 
     35 #include <config.h>
     36 #include <stddef.h>
     37 #include <stdio.h>
     38 #include <stdlib.h>
     39 #ifdef HAVE_UNISTD_H
     40 #include <unistd.h>
     41 #endif
     42 #ifdef HAVE_INTTYPES_H
     43 #include <inttypes.h>
     44 #endif
     45 #ifdef HAVE_FCNTL_H
     46 #include <fcntl.h>    // for open()
     47 #endif
     48 #ifdef HAVE_MMAP
     49 #include <sys/mman.h>
     50 #endif
     51 #include <errno.h>
     52 #include <assert.h>
     53 #include <sys/types.h>
     54 
     55 #include <algorithm>
     56 #include <string>
     57 
     58 #include <gperftools/heap-profiler.h>
     59 
     60 #include "base/logging.h"
     61 #include "base/basictypes.h"   // for PRId64, among other things
     62 #include "base/googleinit.h"
     63 #include "base/commandlineflags.h"
     64 #include "malloc_hook-inl.h"
     65 #include "tcmalloc_guard.h"
     66 #include <gperftools/malloc_hook.h>
     67 #include <gperftools/malloc_extension.h>
     68 #include "base/spinlock.h"
     69 #include "base/low_level_alloc.h"
     70 #include "base/sysinfo.h"      // for GetUniquePathFromEnv()
     71 #include "deep-heap-profile.h"
     72 #include "heap-profile-table.h"
     73 #include "memory_region_map.h"
     74 
     75 
     76 #ifndef	PATH_MAX
     77 #ifdef MAXPATHLEN
     78 #define	PATH_MAX	MAXPATHLEN
     79 #else
     80 #define	PATH_MAX	4096         // seems conservative for max filename len!
     81 #endif
     82 #endif
     83 
     84 #if defined(__ANDROID__) || defined(ANDROID)
     85 // On android, there are no environment variables.
     86 // Instead, we use system properties, set via:
     87 //   adb shell setprop prop_name prop_value
     88 // From <sys/system_properties.h>,
     89 //   PROP_NAME_MAX   32
     90 //   PROP_VALUE_MAX  92
     91 #define HEAPPROFILE "heapprof"
     92 #define HEAP_PROFILE_ALLOCATION_INTERVAL "heapprof.allocation_interval"
     93 #define HEAP_PROFILE_DEALLOCATION_INTERVAL "heapprof.deallocation_interval"
     94 #define HEAP_PROFILE_INUSE_INTERVAL "heapprof.inuse_interval"
     95 #define HEAP_PROFILE_TIME_INTERVAL "heapprof.time_interval"
     96 #define HEAP_PROFILE_MMAP_LOG "heapprof.mmap_log"
     97 #define HEAP_PROFILE_MMAP "heapprof.mmap"
     98 #define HEAP_PROFILE_ONLY_MMAP "heapprof.only_mmap"
     99 #define DEEP_HEAP_PROFILE "heapprof.deep_heap_profile"
    100 #define DEEP_HEAP_PROFILE_PAGEFRAME "heapprof.deep.pageframe"
    101 #define HEAP_PROFILE_TYPE_STATISTICS "heapprof.type_statistics"
    102 #else  // defined(__ANDROID__) || defined(ANDROID)
    103 #define HEAPPROFILE "HEAPPROFILE"
    104 #define HEAP_PROFILE_ALLOCATION_INTERVAL "HEAP_PROFILE_ALLOCATION_INTERVAL"
    105 #define HEAP_PROFILE_DEALLOCATION_INTERVAL "HEAP_PROFILE_DEALLOCATION_INTERVAL"
    106 #define HEAP_PROFILE_INUSE_INTERVAL "HEAP_PROFILE_INUSE_INTERVAL"
    107 #define HEAP_PROFILE_TIME_INTERVAL "HEAP_PROFILE_TIME_INTERVAL"
    108 #define HEAP_PROFILE_MMAP_LOG "HEAP_PROFILE_MMAP_LOG"
    109 #define HEAP_PROFILE_MMAP "HEAP_PROFILE_MMAP"
    110 #define HEAP_PROFILE_ONLY_MMAP "HEAP_PROFILE_ONLY_MMAP"
    111 #define DEEP_HEAP_PROFILE "DEEP_HEAP_PROFILE"
    112 #define DEEP_HEAP_PROFILE_PAGEFRAME "DEEP_HEAP_PROFILE_PAGEFRAME"
    113 #define HEAP_PROFILE_TYPE_STATISTICS "HEAP_PROFILE_TYPE_STATISTICS"
    114 #endif  // defined(__ANDROID__) || defined(ANDROID)
    115 
    116 using STL_NAMESPACE::string;
    117 using STL_NAMESPACE::sort;
    118 
    119 //----------------------------------------------------------------------
    120 // Flags that control heap-profiling
    121 //
    122 // The thread-safety of the profiler depends on these being immutable
    123 // after main starts, so don't change them.
    124 //----------------------------------------------------------------------
    125 
    126 DEFINE_int64(heap_profile_allocation_interval,
    127              EnvToInt64(HEAP_PROFILE_ALLOCATION_INTERVAL, 1 << 30 /*1GB*/),
    128              "If non-zero, dump heap profiling information once every "
    129              "specified number of bytes allocated by the program since "
    130              "the last dump.");
    131 DEFINE_int64(heap_profile_deallocation_interval,
    132              EnvToInt64(HEAP_PROFILE_DEALLOCATION_INTERVAL, 0),
    133              "If non-zero, dump heap profiling information once every "
    134              "specified number of bytes deallocated by the program "
    135              "since the last dump.");
    136 // We could also add flags that report whenever inuse_bytes changes by
    137 // X or -X, but there hasn't been a need for that yet, so we haven't.
    138 DEFINE_int64(heap_profile_inuse_interval,
    139              EnvToInt64(HEAP_PROFILE_INUSE_INTERVAL, 100 << 20 /*100MB*/),
    140              "If non-zero, dump heap profiling information whenever "
    141              "the high-water memory usage mark increases by the specified "
    142              "number of bytes.");
    143 DEFINE_int64(heap_profile_time_interval,
    144              EnvToInt64(HEAP_PROFILE_TIME_INTERVAL, 0),
    145              "If non-zero, dump heap profiling information once every "
    146              "specified number of seconds since the last dump.");
    147 DEFINE_bool(mmap_log,
    148             EnvToBool(HEAP_PROFILE_MMAP_LOG, false),
    149             "Should mmap/munmap calls be logged?");
    150 DEFINE_bool(mmap_profile,
    151             EnvToBool(HEAP_PROFILE_MMAP, false),
    152             "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
    153 DEFINE_bool(only_mmap_profile,
    154             EnvToBool(HEAP_PROFILE_ONLY_MMAP, false),
    155             "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
    156             "do not profile malloc/new/etc");
    157 DEFINE_bool(deep_heap_profile,
    158             EnvToBool(DEEP_HEAP_PROFILE, false),
    159             "If heap-profiling is on, profile deeper (Linux and Android)");
    160 DEFINE_int32(deep_heap_profile_pageframe,
    161              EnvToInt(DEEP_HEAP_PROFILE_PAGEFRAME, 0),
    162              "Needs deeper profile. If 1, dump page frame numbers (PFNs). "
    163              "If 2, dump page counts (/proc/kpagecount) with PFNs.");
    164 #if defined(TYPE_PROFILING)
    165 DEFINE_bool(heap_profile_type_statistics,
    166             EnvToBool(HEAP_PROFILE_TYPE_STATISTICS, false),
    167             "If heap-profiling is on, dump type statistics.");
    168 #endif  // defined(TYPE_PROFILING)
    169 
    170 
    171 //----------------------------------------------------------------------
    172 // Locking
    173 //----------------------------------------------------------------------
    174 
    175 // A pthread_mutex has way too much lock contention to be used here.
    176 //
    177 // I would like to use Mutex, but it can call malloc(),
    178 // which can cause us to fall into an infinite recursion.
    179 //
    180 // So we use a simple spinlock.
    181 static SpinLock heap_lock(SpinLock::LINKER_INITIALIZED);
    182 
    183 //----------------------------------------------------------------------
    184 // Simple allocator for heap profiler's internal memory
    185 //----------------------------------------------------------------------
    186 
    187 static LowLevelAlloc::Arena *heap_profiler_memory;
    188 
    189 static void* ProfilerMalloc(size_t bytes) {
    190   return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory);
    191 }
    192 static void ProfilerFree(void* p) {
    193   LowLevelAlloc::Free(p);
    194 }
    195 
    196 // We use buffers of this size in DoGetHeapProfile.
    197 static const int kProfileBufferSize = 1 << 20;
    198 
    199 // This is a last-ditch buffer we use in DumpProfileLocked in case we
    200 // can't allocate more memory from ProfilerMalloc.  We expect this
    201 // will be used by HeapProfileEndWriter when the application has to
    202 // exit due to out-of-memory.  This buffer is allocated in
    203 // HeapProfilerStart.  Access to this must be protected by heap_lock.
    204 static char* global_profiler_buffer = NULL;
    205 
    206 
    207 //----------------------------------------------------------------------
    208 // Profiling control/state data
    209 //----------------------------------------------------------------------
    210 
    211 // Access to all of these is protected by heap_lock.
    212 static bool  is_on = false;           // If are on as a subsytem.
    213 static bool  dumping = false;         // Dumping status to prevent recursion
    214 static char* filename_prefix = NULL;  // Prefix used for profile file names
    215                                       // (NULL if no need for dumping yet)
    216 static int   dump_count = 0;          // How many dumps so far
    217 static int64 last_dump_alloc = 0;     // alloc_size when did we last dump
    218 static int64 last_dump_free = 0;      // free_size when did we last dump
    219 static int64 high_water_mark = 0;     // In-use-bytes at last high-water dump
    220 static int64 last_dump_time = 0;      // The time of the last dump
    221 
    222 static HeapProfileTable* heap_profile = NULL;  // the heap profile table
    223 static DeepHeapProfile* deep_profile = NULL;  // deep memory profiler
    224 
    225 // Callback to generate a stack trace for an allocation. May be overriden
    226 // by an application to provide its own pseudo-stacks.
    227 static StackGeneratorFunction stack_generator_function =
    228     HeapProfileTable::GetCallerStackTrace;
    229 
    230 //----------------------------------------------------------------------
    231 // Profile generation
    232 //----------------------------------------------------------------------
    233 
    234 // Input must be a buffer of size at least 1MB.
    235 static char* DoGetHeapProfileLocked(char* buf, int buflen) {
    236   // We used to be smarter about estimating the required memory and
    237   // then capping it to 1MB and generating the profile into that.
    238   if (buf == NULL || buflen < 1)
    239     return NULL;
    240 
    241   RAW_DCHECK(heap_lock.IsHeld(), "");
    242   int bytes_written = 0;
    243   if (is_on) {
    244     HeapProfileTable::Stats const stats = heap_profile->total();
    245     (void)stats;   // avoid an unused-variable warning in non-debug mode.
    246     bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1);
    247     // FillOrderedProfile should not reduce the set of active mmap-ed regions,
    248     // hence MemoryRegionMap will let us remove everything we've added above:
    249     RAW_DCHECK(stats.Equivalent(heap_profile->total()), "");
    250     // if this fails, we somehow removed by FillOrderedProfile
    251     // more than we have added.
    252   }
    253   buf[bytes_written] = '\0';
    254   RAW_DCHECK(bytes_written == strlen(buf), "");
    255 
    256   return buf;
    257 }
    258 
    259 extern "C" char* GetHeapProfile() {
    260   // Use normal malloc: we return the profile to the user to free it:
    261   char* buffer = reinterpret_cast<char*>(malloc(kProfileBufferSize));
    262   SpinLockHolder l(&heap_lock);
    263   return DoGetHeapProfileLocked(buffer, kProfileBufferSize);
    264 }
    265 
    266 // defined below
    267 static void NewHook(const void* ptr, size_t size);
    268 static void DeleteHook(const void* ptr);
    269 
    270 // Helper for HeapProfilerDump.
    271 static void DumpProfileLocked(const char* reason) {
    272   RAW_DCHECK(heap_lock.IsHeld(), "");
    273   RAW_DCHECK(is_on, "");
    274   RAW_DCHECK(!dumping, "");
    275 
    276   if (filename_prefix == NULL) return;  // we do not yet need dumping
    277 
    278   dumping = true;
    279 
    280   // Make file name
    281   char file_name[1000];
    282   dump_count++;
    283   snprintf(file_name, sizeof(file_name), "%s.%05d.%04d%s",
    284            filename_prefix, getpid(), dump_count, HeapProfileTable::kFileExt);
    285 
    286   // Dump the profile
    287   RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason);
    288   // We must use file routines that don't access memory, since we hold
    289   // a memory lock now.
    290   RawFD fd = RawOpenForWriting(file_name);
    291   if (fd == kIllegalRawFD) {
    292     RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name);
    293     dumping = false;
    294     return;
    295   }
    296 
    297   // This case may be impossible, but it's best to be safe.
    298   // It's safe to use the global buffer: we're protected by heap_lock.
    299   if (global_profiler_buffer == NULL) {
    300     global_profiler_buffer =
    301         reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
    302   }
    303 
    304   if (deep_profile) {
    305     deep_profile->DumpOrderedProfile(reason, global_profiler_buffer,
    306                                      kProfileBufferSize, fd);
    307   } else {
    308     char* profile = DoGetHeapProfileLocked(global_profiler_buffer,
    309                                            kProfileBufferSize);
    310     RawWrite(fd, profile, strlen(profile));
    311   }
    312   RawClose(fd);
    313 
    314 #if defined(TYPE_PROFILING)
    315   if (FLAGS_heap_profile_type_statistics) {
    316     snprintf(file_name, sizeof(file_name), "%s.%05d.%04d.type",
    317              filename_prefix, getpid(), dump_count);
    318     RAW_VLOG(0, "Dumping type statistics to %s", file_name);
    319     heap_profile->DumpTypeStatistics(file_name);
    320   }
    321 #endif  // defined(TYPE_PROFILING)
    322 
    323   dumping = false;
    324 }
    325 
    326 //----------------------------------------------------------------------
    327 // Profile collection
    328 //----------------------------------------------------------------------
    329 
    330 // Dump a profile after either an allocation or deallocation, if
    331 // the memory use has changed enough since the last dump.
    332 static void MaybeDumpProfileLocked() {
    333   if (!dumping) {
    334     const HeapProfileTable::Stats& total = heap_profile->total();
    335     const int64 inuse_bytes = total.alloc_size - total.free_size;
    336     bool need_to_dump = false;
    337     char buf[128];
    338     int64 current_time = time(NULL);
    339     if (FLAGS_heap_profile_allocation_interval > 0 &&
    340         total.alloc_size >=
    341         last_dump_alloc + FLAGS_heap_profile_allocation_interval) {
    342       snprintf(buf, sizeof(buf), ("%" PRId64 " MB allocated cumulatively, "
    343                                   "%" PRId64 " MB currently in use"),
    344                total.alloc_size >> 20, inuse_bytes >> 20);
    345       need_to_dump = true;
    346     } else if (FLAGS_heap_profile_deallocation_interval > 0 &&
    347                total.free_size >=
    348                last_dump_free + FLAGS_heap_profile_deallocation_interval) {
    349       snprintf(buf, sizeof(buf), ("%" PRId64 " MB freed cumulatively, "
    350                                   "%" PRId64 " MB currently in use"),
    351                total.free_size >> 20, inuse_bytes >> 20);
    352       need_to_dump = true;
    353     } else if (FLAGS_heap_profile_inuse_interval > 0 &&
    354                inuse_bytes >
    355                high_water_mark + FLAGS_heap_profile_inuse_interval) {
    356       snprintf(buf, sizeof(buf), "%" PRId64 " MB currently in use",
    357                inuse_bytes >> 20);
    358       need_to_dump = true;
    359     } else if (FLAGS_heap_profile_time_interval > 0 &&
    360                current_time - last_dump_time >=
    361                FLAGS_heap_profile_time_interval) {
    362       snprintf(buf, sizeof(buf), "%d sec since the last dump",
    363                current_time - last_dump_time);
    364       need_to_dump = true;
    365       last_dump_time = current_time;
    366     }
    367     if (need_to_dump) {
    368       DumpProfileLocked(buf);
    369 
    370       last_dump_alloc = total.alloc_size;
    371       last_dump_free = total.free_size;
    372       if (inuse_bytes > high_water_mark)
    373         high_water_mark = inuse_bytes;
    374     }
    375   }
    376 }
    377 
    378 // Record an allocation in the profile.
    379 static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
    380   // Take the stack trace outside the critical section.
    381   void* stack[HeapProfileTable::kMaxStackDepth];
    382   int depth = stack_generator_function(skip_count + 1, stack);
    383   SpinLockHolder l(&heap_lock);
    384   if (is_on) {
    385     heap_profile->RecordAlloc(ptr, bytes, depth, stack);
    386     MaybeDumpProfileLocked();
    387   }
    388 }
    389 
    390 // Record a deallocation in the profile.
    391 static void RecordFree(const void* ptr) {
    392   SpinLockHolder l(&heap_lock);
    393   if (is_on) {
    394     heap_profile->RecordFree(ptr);
    395     MaybeDumpProfileLocked();
    396   }
    397 }
    398 
    399 //----------------------------------------------------------------------
    400 // Allocation/deallocation hooks for MallocHook
    401 //----------------------------------------------------------------------
    402 
    403 // static
    404 void NewHook(const void* ptr, size_t size) {
    405   if (ptr != NULL) RecordAlloc(ptr, size, 0);
    406 }
    407 
    408 // static
    409 void DeleteHook(const void* ptr) {
    410   if (ptr != NULL) RecordFree(ptr);
    411 }
    412 
    413 // TODO(jandrews): Re-enable stack tracing
    414 #ifdef TODO_REENABLE_STACK_TRACING
    415 static void RawInfoStackDumper(const char* message, void*) {
    416   RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message);
    417   // -1 is to chop the \n which will be added by RAW_LOG
    418 }
    419 #endif
    420 
    421 static void MmapHook(const void* result, const void* start, size_t size,
    422                      int prot, int flags, int fd, off_t offset) {
    423   if (FLAGS_mmap_log) {  // log it
    424     // We use PRIxS not just '%p' to avoid deadlocks
    425     // in pretty-printing of NULL as "nil".
    426     // TODO(maxim): instead should use a safe snprintf reimplementation
    427     RAW_LOG(INFO,
    428             "mmap(start=0x%" PRIxPTR ", len=%" PRIuS ", prot=0x%x, flags=0x%x, "
    429             "fd=%d, offset=0x%x) = 0x%" PRIxPTR,
    430             (uintptr_t) start, size, prot, flags, fd, (unsigned int) offset,
    431             (uintptr_t) result);
    432 #ifdef TODO_REENABLE_STACK_TRACING
    433     DumpStackTrace(1, RawInfoStackDumper, NULL);
    434 #endif
    435   }
    436 }
    437 
    438 static void MremapHook(const void* result, const void* old_addr,
    439                        size_t old_size, size_t new_size,
    440                        int flags, const void* new_addr) {
    441   if (FLAGS_mmap_log) {  // log it
    442     // We use PRIxS not just '%p' to avoid deadlocks
    443     // in pretty-printing of NULL as "nil".
    444     // TODO(maxim): instead should use a safe snprintf reimplementation
    445     RAW_LOG(INFO,
    446             "mremap(old_addr=0x%" PRIxPTR ", old_size=%" PRIuS ", "
    447             "new_size=%" PRIuS ", flags=0x%x, new_addr=0x%" PRIxPTR ") = "
    448             "0x%" PRIxPTR,
    449             (uintptr_t) old_addr, old_size, new_size, flags,
    450             (uintptr_t) new_addr, (uintptr_t) result);
    451 #ifdef TODO_REENABLE_STACK_TRACING
    452     DumpStackTrace(1, RawInfoStackDumper, NULL);
    453 #endif
    454   }
    455 }
    456 
    457 static void MunmapHook(const void* ptr, size_t size) {
    458   if (FLAGS_mmap_log) {  // log it
    459     // We use PRIxS not just '%p' to avoid deadlocks
    460     // in pretty-printing of NULL as "nil".
    461     // TODO(maxim): instead should use a safe snprintf reimplementation
    462     RAW_LOG(INFO, "munmap(start=0x%" PRIxPTR ", len=%" PRIuS ")",
    463                   (uintptr_t) ptr, size);
    464 #ifdef TODO_REENABLE_STACK_TRACING
    465     DumpStackTrace(1, RawInfoStackDumper, NULL);
    466 #endif
    467   }
    468 }
    469 
    470 static void SbrkHook(const void* result, ptrdiff_t increment) {
    471   if (FLAGS_mmap_log) {  // log it
    472     RAW_LOG(INFO, "sbrk(inc=%" PRIdS ") = 0x%" PRIxPTR,
    473                   increment, (uintptr_t) result);
    474 #ifdef TODO_REENABLE_STACK_TRACING
    475     DumpStackTrace(1, RawInfoStackDumper, NULL);
    476 #endif
    477   }
    478 }
    479 
    480 //----------------------------------------------------------------------
    481 // Starting/stopping/dumping
    482 //----------------------------------------------------------------------
    483 
    484 extern "C" void HeapProfilerStart(const char* prefix) {
    485   SpinLockHolder l(&heap_lock);
    486 
    487   if (is_on) return;
    488 
    489   is_on = true;
    490 
    491   RAW_VLOG(0, "Starting tracking the heap");
    492 
    493   // This should be done before the hooks are set up, since it should
    494   // call new, and we want that to be accounted for correctly.
    495   MallocExtension::Initialize();
    496 
    497   if (FLAGS_only_mmap_profile) {
    498     FLAGS_mmap_profile = true;
    499   }
    500 
    501   if (FLAGS_mmap_profile) {
    502     // Ask MemoryRegionMap to record all mmap, mremap, and sbrk
    503     // call stack traces of at least size kMaxStackDepth:
    504     MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth,
    505                           /* use_buckets */ true);
    506   }
    507 
    508   if (FLAGS_mmap_log) {
    509     // Install our hooks to do the logging:
    510     RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
    511     RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
    512     RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
    513     RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
    514   }
    515 
    516   heap_profiler_memory =
    517     LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
    518 
    519   // Reserve space now for the heap profiler, so we can still write a
    520   // heap profile even if the application runs out of memory.
    521   global_profiler_buffer =
    522       reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
    523 
    524   heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
    525       HeapProfileTable(ProfilerMalloc, ProfilerFree, FLAGS_mmap_profile);
    526 
    527   last_dump_alloc = 0;
    528   last_dump_free = 0;
    529   high_water_mark = 0;
    530   last_dump_time = 0;
    531 
    532   if (FLAGS_deep_heap_profile) {
    533     // Initialize deep memory profiler
    534     RAW_VLOG(0, "[%d] Starting a deep memory profiler", getpid());
    535     deep_profile = new(ProfilerMalloc(sizeof(DeepHeapProfile)))
    536         DeepHeapProfile(heap_profile, prefix, DeepHeapProfile::PageFrameType(
    537             FLAGS_deep_heap_profile_pageframe));
    538   }
    539 
    540   // We do not reset dump_count so if the user does a sequence of
    541   // HeapProfilerStart/HeapProfileStop, we will get a continuous
    542   // sequence of profiles.
    543 
    544   if (FLAGS_only_mmap_profile == false) {
    545     // Now set the hooks that capture new/delete and malloc/free.
    546     RAW_CHECK(MallocHook::AddNewHook(&NewHook), "");
    547     RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), "");
    548   }
    549 
    550   // Copy filename prefix only if provided.
    551   if (!prefix)
    552     return;
    553   RAW_DCHECK(filename_prefix == NULL, "");
    554   const int prefix_length = strlen(prefix);
    555   filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1));
    556   memcpy(filename_prefix, prefix, prefix_length);
    557   filename_prefix[prefix_length] = '\0';
    558 }
    559 
    560 extern "C" void HeapProfilerWithPseudoStackStart(
    561     StackGeneratorFunction callback) {
    562   {
    563     // Ensure the callback is set before allocations can be recorded.
    564     SpinLockHolder l(&heap_lock);
    565     stack_generator_function = callback;
    566   }
    567   HeapProfilerStart(NULL);
    568 }
    569 
    570 extern "C" void IterateAllocatedObjects(AddressVisitor visitor, void* data) {
    571   SpinLockHolder l(&heap_lock);
    572 
    573   if (!is_on) return;
    574 
    575   heap_profile->IterateAllocationAddresses(visitor, data);
    576 }
    577 
    578 extern "C" int IsHeapProfilerRunning() {
    579   SpinLockHolder l(&heap_lock);
    580   return is_on ? 1 : 0;   // return an int, because C code doesn't have bool
    581 }
    582 
    583 extern "C" void HeapProfilerStop() {
    584   SpinLockHolder l(&heap_lock);
    585 
    586   if (!is_on) return;
    587 
    588   if (FLAGS_only_mmap_profile == false) {
    589     // Unset our new/delete hooks, checking they were set:
    590     RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), "");
    591     RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), "");
    592   }
    593   if (FLAGS_mmap_log) {
    594     // Restore mmap/sbrk hooks, checking that our hooks were set:
    595     RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
    596     RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
    597     RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
    598     RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
    599   }
    600 
    601   if (deep_profile) {
    602     // free deep memory profiler
    603     deep_profile->~DeepHeapProfile();
    604     ProfilerFree(deep_profile);
    605     deep_profile = NULL;
    606   }
    607 
    608   // free profile
    609   heap_profile->~HeapProfileTable();
    610   ProfilerFree(heap_profile);
    611   heap_profile = NULL;
    612 
    613   // free output-buffer memory
    614   ProfilerFree(global_profiler_buffer);
    615 
    616   // free prefix
    617   ProfilerFree(filename_prefix);
    618   filename_prefix = NULL;
    619 
    620   if (!LowLevelAlloc::DeleteArena(heap_profiler_memory)) {
    621     RAW_LOG(FATAL, "Memory leak in HeapProfiler:");
    622   }
    623 
    624   if (FLAGS_mmap_profile) {
    625     MemoryRegionMap::Shutdown();
    626   }
    627 
    628   is_on = false;
    629 }
    630 
    631 extern "C" void HeapProfilerDump(const char* reason) {
    632   SpinLockHolder l(&heap_lock);
    633   if (is_on && !dumping) {
    634     DumpProfileLocked(reason);
    635   }
    636 }
    637 
    638 extern "C" void HeapProfilerMarkBaseline() {
    639   SpinLockHolder l(&heap_lock);
    640 
    641   if (!is_on) return;
    642 
    643   heap_profile->MarkCurrentAllocations(HeapProfileTable::MARK_ONE);
    644 }
    645 
    646 extern "C" void HeapProfilerMarkInteresting() {
    647   SpinLockHolder l(&heap_lock);
    648 
    649   if (!is_on) return;
    650 
    651   heap_profile->MarkUnmarkedAllocations(HeapProfileTable::MARK_TWO);
    652 }
    653 
    654 extern "C" void HeapProfilerDumpAliveObjects(const char* filename) {
    655   SpinLockHolder l(&heap_lock);
    656 
    657   if (!is_on) return;
    658 
    659   heap_profile->DumpMarkedObjects(HeapProfileTable::MARK_TWO, filename);
    660 }
    661 
    662 //----------------------------------------------------------------------
    663 // Initialization/finalization code
    664 //----------------------------------------------------------------------
    665 
    666 // Initialization code
    667 static void HeapProfilerInit() {
    668   // Everything after this point is for setting up the profiler based on envvar
    669   char fname[PATH_MAX];
    670   if (!GetUniquePathFromEnv(HEAPPROFILE, fname)) {
    671     return;
    672   }
    673   // We do a uid check so we don't write out files in a setuid executable.
    674 #if !defined(__ANDROID__) && defined(HAVE_GETEUID)
    675   if (getuid() != geteuid()) {
    676     RAW_LOG(WARNING, ("HeapProfiler: ignoring " HEAPPROFILE " because "
    677                       "program seems to be setuid\n"));
    678     return;
    679   }
    680 #endif
    681 
    682   HeapProfileTable::CleanupOldProfiles(fname);
    683 
    684   HeapProfilerStart(fname);
    685 }
    686 
    687 // class used for finalization -- dumps the heap-profile at program exit
    688 struct HeapProfileEndWriter {
    689   ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); }
    690 };
    691 
    692 // We want to make sure tcmalloc is up and running before starting the profiler
    693 static const TCMallocGuard tcmalloc_initializer;
    694 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit());
    695 static HeapProfileEndWriter heap_profile_end_writer;
    696