Home | History | Annotate | Download | only in src
      1 /* Copyright (c) 2006, Google Inc.
      2  * All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  *
     30  * ---
     31  * Author: Maxim Lifantsev
     32  */
     33 
     34 //
     35 // Background and key design points of MemoryRegionMap.
     36 //
     37 // MemoryRegionMap is a low-level module with quite atypical requirements that
     38 // result in some degree of non-triviality of the implementation and design.
     39 //
     40 // MemoryRegionMap collects info about *all* memory regions created with
     41 // mmap, munmap, mremap, sbrk.
     42 // They key word above is 'all': all that are happening in a process
     43 // during its lifetime frequently starting even before global object
     44 // constructor execution.
     45 //
     46 // This is needed by the primary client of MemoryRegionMap:
     47 // HeapLeakChecker uses the regions and the associated stack traces
     48 // to figure out what part of the memory is the heap:
     49 // if MemoryRegionMap were to miss some (early) regions, leak checking would
     50 // stop working correctly.
     51 //
     52 // To accomplish the goal of functioning before/during global object
     53 // constructor execution MemoryRegionMap is done as a singleton service
     54 // that relies on own on-demand initialized static constructor-less data,
     55 // and only relies on other low-level modules that can also function properly
     56 // even before global object constructors run.
     57 //
     58 // Accomplishing the goal of collecting data about all mmap, munmap, mremap,
     59 // sbrk occurrences is a more involved: conceptually to do this one needs to
     60 // record some bits of data in particular about any mmap or sbrk call,
     61 // but to do that one needs to allocate memory for that data at some point,
     62 // but all memory allocations in the end themselves come from an mmap
     63 // or sbrk call (that's how the address space of the process grows).
     64 //
     65 // Also note that we need to do all the above recording from
     66 // within an mmap/sbrk hook which is sometimes/frequently is made by a memory
     67 // allocator, including the allocator MemoryRegionMap itself must rely on.
     68 // In the case of heap-checker usage this includes even the very first
     69 // mmap/sbrk call happening in the program: heap-checker gets activated due to
     70 // a link-time installed mmap/sbrk hook and it initializes MemoryRegionMap
     71 // and asks it to record info about this very first call right from that
     72 // very first hook invocation.
     73 //
     74 // MemoryRegionMap is doing its memory allocations via LowLevelAlloc:
     75 // unlike more complex standard memory allocator, LowLevelAlloc cooperates with
     76 // MemoryRegionMap by not holding any of its own locks while it calls mmap
     77 // to get memory, thus we are able to call LowLevelAlloc from
     78 // our mmap/sbrk hooks without causing a deadlock in it.
     79 // For the same reason of deadlock prevention the locking in MemoryRegionMap
     80 // itself is write-recursive which is an exception to Google's mutex usage.
     81 //
     82 // We still need to break the infinite cycle of mmap calling our hook,
     83 // which asks LowLevelAlloc for memory to record this mmap,
     84 // which (sometimes) causes mmap, which calls our hook, and so on.
     85 // We do this as follows: on a recursive call of MemoryRegionMap's
     86 // mmap/sbrk/mremap hook we record the data about the allocation in a
     87 // static fixed-sized stack (saved_regions and saved_buckets), when the
     88 // recursion unwinds but before returning from the outer hook call we unwind
     89 // this stack and move the data from saved_regions and saved_buckets to its
     90 // permanent place in the RegionSet and "bucket_table" respectively,
     91 // which can cause more allocations and mmap-s and recursion and unwinding,
     92 // but the whole process ends eventually due to the fact that for the small
     93 // allocations we are doing LowLevelAlloc reuses one mmap call and parcels out
     94 // the memory it created to satisfy several of our allocation requests.
     95 //
     96 
     97 // ========================================================================= //
     98 
     99 #include <config.h>
    100 
    101 #ifdef HAVE_UNISTD_H
    102 #include <unistd.h>
    103 #endif
    104 #ifdef HAVE_INTTYPES_H
    105 #include <inttypes.h>
    106 #endif
    107 #ifdef HAVE_MMAP
    108 #include <sys/mman.h>
    109 #elif !defined(MAP_FAILED)
    110 #define MAP_FAILED -1  // the only thing we need from mman.h
    111 #endif
    112 #ifdef HAVE_PTHREAD
    113 #include <pthread.h>   // for pthread_t, pthread_self()
    114 #endif
    115 #include <stddef.h>
    116 
    117 #include <algorithm>
    118 #include <set>
    119 
    120 #include "memory_region_map.h"
    121 
    122 #include "base/logging.h"
    123 #include "base/low_level_alloc.h"
    124 #include "malloc_hook-inl.h"
    125 
    126 #include <gperftools/stacktrace.h>
    127 #include <gperftools/malloc_hook.h>
    128 
    129 // MREMAP_FIXED is a linux extension.  How it's used in this file,
    130 // setting it to 0 is equivalent to saying, "This feature isn't
    131 // supported", which is right.
    132 #ifndef MREMAP_FIXED
    133 # define MREMAP_FIXED  0
    134 #endif
    135 
    136 using std::max;
    137 
    138 // ========================================================================= //
    139 
    140 int MemoryRegionMap::client_count_ = 0;
    141 int MemoryRegionMap::max_stack_depth_ = 0;
    142 MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL;
    143 LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL;
    144 SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED);
    145 SpinLock MemoryRegionMap::owner_lock_(  // ACQUIRED_AFTER(lock_)
    146     SpinLock::LINKER_INITIALIZED);
    147 int MemoryRegionMap::recursion_count_ = 0;  // GUARDED_BY(owner_lock_)
    148 pthread_t MemoryRegionMap::lock_owner_tid_;  // GUARDED_BY(owner_lock_)
    149 int64 MemoryRegionMap::map_size_ = 0;
    150 int64 MemoryRegionMap::unmap_size_ = 0;
    151 HeapProfileBucket** MemoryRegionMap::bucket_table_ = NULL;  // GUARDED_BY(lock_)
    152 int MemoryRegionMap::num_buckets_ = 0;  // GUARDED_BY(lock_)
    153 int MemoryRegionMap::saved_buckets_count_ = 0;  // GUARDED_BY(lock_)
    154 HeapProfileBucket MemoryRegionMap::saved_buckets_[20];  // GUARDED_BY(lock_)
    155 
    156 // GUARDED_BY(lock_)
    157 const void* MemoryRegionMap::saved_buckets_keys_[20][kMaxStackDepth];
    158 
    159 // ========================================================================= //
    160 
    161 // Simple hook into execution of global object constructors,
    162 // so that we do not call pthread_self() when it does not yet work.
    163 static bool libpthread_initialized = false;
    164 static bool initializer = (libpthread_initialized = true, true);
    165 
    166 static inline bool current_thread_is(pthread_t should_be) {
    167   // Before main() runs, there's only one thread, so we're always that thread
    168   if (!libpthread_initialized) return true;
    169   // this starts working only sometime well into global constructor execution:
    170   return pthread_equal(pthread_self(), should_be);
    171 }
    172 
    173 // ========================================================================= //
    174 
    175 // Constructor-less place-holder to store a RegionSet in.
    176 union MemoryRegionMap::RegionSetRep {
    177   char rep[sizeof(RegionSet)];
    178   void* align_it;  // do not need a better alignment for 'rep' than this
    179   RegionSet* region_set() { return reinterpret_cast<RegionSet*>(rep); }
    180 };
    181 
    182 // The bytes where MemoryRegionMap::regions_ will point to.
    183 // We use RegionSetRep with noop c-tor so that global construction
    184 // does not interfere.
    185 static MemoryRegionMap::RegionSetRep regions_rep;
    186 
    187 // ========================================================================= //
    188 
    189 // Has InsertRegionLocked been called recursively
    190 // (or rather should we *not* use regions_ to record a hooked mmap).
    191 static bool recursive_insert = false;
    192 
    193 void MemoryRegionMap::Init(int max_stack_depth, bool use_buckets) {
    194   RAW_VLOG(10, "MemoryRegionMap Init");
    195   RAW_CHECK(max_stack_depth >= 0, "");
    196   // Make sure we don't overflow the memory in region stacks:
    197   RAW_CHECK(max_stack_depth <= kMaxStackDepth,
    198             "need to increase kMaxStackDepth?");
    199   Lock();
    200   client_count_ += 1;
    201   max_stack_depth_ = max(max_stack_depth_, max_stack_depth);
    202   if (client_count_ > 1) {
    203     // not first client: already did initialization-proper
    204     Unlock();
    205     RAW_VLOG(10, "MemoryRegionMap Init increment done");
    206     return;
    207   }
    208   // Set our hooks and make sure they were installed:
    209   RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
    210   RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
    211   RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
    212   RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
    213   // We need to set recursive_insert since the NewArena call itself
    214   // will already do some allocations with mmap which our hooks will catch
    215   // recursive_insert allows us to buffer info about these mmap calls.
    216   // Note that Init() can be (and is) sometimes called
    217   // already from within an mmap/sbrk hook.
    218   recursive_insert = true;
    219   arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
    220   recursive_insert = false;
    221   HandleSavedRegionsLocked(&InsertRegionLocked);  // flush the buffered ones
    222     // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before
    223     // recursive_insert = false; as InsertRegionLocked will also construct
    224     // regions_ on demand for us.
    225   if (use_buckets) {
    226     const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
    227     recursive_insert = true;
    228     bucket_table_ = static_cast<HeapProfileBucket**>(
    229         MyAllocator::Allocate(table_bytes));
    230     recursive_insert = false;
    231     memset(bucket_table_, 0, table_bytes);
    232     num_buckets_ = 0;
    233   }
    234   if (regions_ == NULL)  // init regions_
    235     InitRegionSetLocked();
    236   Unlock();
    237   RAW_VLOG(10, "MemoryRegionMap Init done");
    238 }
    239 
    240 bool MemoryRegionMap::Shutdown() {
    241   RAW_VLOG(10, "MemoryRegionMap Shutdown");
    242   Lock();
    243   RAW_CHECK(client_count_ > 0, "");
    244   client_count_ -= 1;
    245   if (client_count_ != 0) {  // not last client; need not really shutdown
    246     Unlock();
    247     RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done");
    248     return true;
    249   }
    250   if (bucket_table_ != NULL) {
    251     for (int i = 0; i < kHashTableSize; i++) {
    252       for (HeapProfileBucket* curr = bucket_table_[i]; curr != 0; /**/) {
    253         HeapProfileBucket* bucket = curr;
    254         curr = curr->next;
    255         MyAllocator::Free(bucket->stack, 0);
    256         MyAllocator::Free(bucket, 0);
    257       }
    258     }
    259     MyAllocator::Free(bucket_table_, 0);
    260     num_buckets_ = 0;
    261     bucket_table_ = NULL;
    262   }
    263   RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
    264   RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
    265   RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
    266   RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
    267   if (regions_) regions_->~RegionSet();
    268   regions_ = NULL;
    269   bool deleted_arena = LowLevelAlloc::DeleteArena(arena_);
    270   if (deleted_arena) {
    271     arena_ = 0;
    272   } else {
    273     RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used");
    274   }
    275   Unlock();
    276   RAW_VLOG(10, "MemoryRegionMap Shutdown done");
    277   return deleted_arena;
    278 }
    279 
    280 bool MemoryRegionMap::IsRecordingLocked() {
    281   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    282   return client_count_ > 0;
    283 }
    284 
    285 // Invariants (once libpthread_initialized is true):
    286 //   * While lock_ is not held, recursion_count_ is 0 (and
    287 //     lock_owner_tid_ is the previous owner, but we don't rely on
    288 //     that).
    289 //   * recursion_count_ and lock_owner_tid_ are only written while
    290 //     both lock_ and owner_lock_ are held. They may be read under
    291 //     just owner_lock_.
    292 //   * At entry and exit of Lock() and Unlock(), the current thread
    293 //     owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self())
    294 //     && recursion_count_ > 0.
    295 void MemoryRegionMap::Lock() {
    296   {
    297     SpinLockHolder l(&owner_lock_);
    298     if (recursion_count_ > 0 && current_thread_is(lock_owner_tid_)) {
    299       RAW_CHECK(lock_.IsHeld(), "Invariants violated");
    300       recursion_count_++;
    301       RAW_CHECK(recursion_count_ <= 5,
    302                 "recursive lock nesting unexpectedly deep");
    303       return;
    304     }
    305   }
    306   lock_.Lock();
    307   {
    308     SpinLockHolder l(&owner_lock_);
    309     RAW_CHECK(recursion_count_ == 0,
    310               "Last Unlock didn't reset recursion_count_");
    311     if (libpthread_initialized)
    312       lock_owner_tid_ = pthread_self();
    313     recursion_count_ = 1;
    314   }
    315 }
    316 
    317 void MemoryRegionMap::Unlock() {
    318   SpinLockHolder l(&owner_lock_);
    319   RAW_CHECK(recursion_count_ >  0, "unlock when not held");
    320   RAW_CHECK(lock_.IsHeld(),
    321             "unlock when not held, and recursion_count_ is wrong");
    322   RAW_CHECK(current_thread_is(lock_owner_tid_), "unlock by non-holder");
    323   recursion_count_--;
    324   if (recursion_count_ == 0) {
    325     lock_.Unlock();
    326   }
    327 }
    328 
    329 bool MemoryRegionMap::LockIsHeld() {
    330   SpinLockHolder l(&owner_lock_);
    331   return lock_.IsHeld()  &&  current_thread_is(lock_owner_tid_);
    332 }
    333 
    334 const MemoryRegionMap::Region*
    335 MemoryRegionMap::DoFindRegionLocked(uintptr_t addr) {
    336   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    337   if (regions_ != NULL) {
    338     Region sample;
    339     sample.SetRegionSetKey(addr);
    340     RegionSet::iterator region = regions_->lower_bound(sample);
    341     if (region != regions_->end()) {
    342       RAW_CHECK(addr <= region->end_addr, "");
    343       if (region->start_addr <= addr  &&  addr < region->end_addr) {
    344         return &(*region);
    345       }
    346     }
    347   }
    348   return NULL;
    349 }
    350 
    351 bool MemoryRegionMap::FindRegion(uintptr_t addr, Region* result) {
    352   Lock();
    353   const Region* region = DoFindRegionLocked(addr);
    354   if (region != NULL) *result = *region;  // create it as an independent copy
    355   Unlock();
    356   return region != NULL;
    357 }
    358 
    359 bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top,
    360                                              Region* result) {
    361   Lock();
    362   const Region* region = DoFindRegionLocked(stack_top);
    363   if (region != NULL) {
    364     RAW_VLOG(10, "Stack at %p is inside region %p..%p",
    365                 reinterpret_cast<void*>(stack_top),
    366                 reinterpret_cast<void*>(region->start_addr),
    367                 reinterpret_cast<void*>(region->end_addr));
    368     const_cast<Region*>(region)->set_is_stack();  // now we know
    369       // cast is safe (set_is_stack does not change the set ordering key)
    370     *result = *region;  // create *result as an independent copy
    371   }
    372   Unlock();
    373   return region != NULL;
    374 }
    375 
    376 HeapProfileBucket* MemoryRegionMap::GetBucket(int depth,
    377                                               const void* const key[]) {
    378   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    379   // Make hash-value
    380   uintptr_t hash = 0;
    381   for (int i = 0; i < depth; i++) {
    382     hash += reinterpret_cast<uintptr_t>(key[i]);
    383     hash += hash << 10;
    384     hash ^= hash >> 6;
    385   }
    386   hash += hash << 3;
    387   hash ^= hash >> 11;
    388 
    389   // Lookup stack trace in table
    390   unsigned int hash_index = (static_cast<unsigned int>(hash)) % kHashTableSize;
    391   for (HeapProfileBucket* bucket = bucket_table_[hash_index];
    392        bucket != 0;
    393        bucket = bucket->next) {
    394     if ((bucket->hash == hash) && (bucket->depth == depth) &&
    395         std::equal(key, key + depth, bucket->stack)) {
    396       return bucket;
    397     }
    398   }
    399 
    400   // Create new bucket
    401   const size_t key_size = sizeof(key[0]) * depth;
    402   HeapProfileBucket* bucket;
    403   if (recursive_insert) {  // recursion: save in saved_buckets_
    404     const void** key_copy = saved_buckets_keys_[saved_buckets_count_];
    405     std::copy(key, key + depth, key_copy);
    406     bucket = &saved_buckets_[saved_buckets_count_];
    407     memset(bucket, 0, sizeof(*bucket));
    408     ++saved_buckets_count_;
    409     bucket->stack = key_copy;
    410     bucket->next  = NULL;
    411   } else {
    412     recursive_insert = true;
    413     const void** key_copy = static_cast<const void**>(
    414         MyAllocator::Allocate(key_size));
    415     recursive_insert = false;
    416     std::copy(key, key + depth, key_copy);
    417     recursive_insert = true;
    418     bucket = static_cast<HeapProfileBucket*>(
    419         MyAllocator::Allocate(sizeof(HeapProfileBucket)));
    420     recursive_insert = false;
    421     memset(bucket, 0, sizeof(*bucket));
    422     bucket->stack = key_copy;
    423     bucket->next  = bucket_table_[hash_index];
    424   }
    425   bucket->hash = hash;
    426   bucket->depth = depth;
    427   bucket_table_[hash_index] = bucket;
    428   ++num_buckets_;
    429   return bucket;
    430 }
    431 
    432 MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() {
    433   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    434   RAW_CHECK(regions_ != NULL, "");
    435   return regions_->begin();
    436 }
    437 
    438 MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() {
    439   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    440   RAW_CHECK(regions_ != NULL, "");
    441   return regions_->end();
    442 }
    443 
    444 inline void MemoryRegionMap::DoInsertRegionLocked(const Region& region) {
    445   RAW_VLOG(12, "Inserting region %p..%p from %p",
    446               reinterpret_cast<void*>(region.start_addr),
    447               reinterpret_cast<void*>(region.end_addr),
    448               reinterpret_cast<void*>(region.caller()));
    449   RegionSet::const_iterator i = regions_->lower_bound(region);
    450   if (i != regions_->end() && i->start_addr <= region.start_addr) {
    451     RAW_DCHECK(region.end_addr <= i->end_addr, "");  // lower_bound ensures this
    452     return;  // 'region' is a subset of an already recorded region; do nothing
    453     // We can be stricter and allow this only when *i has been created via
    454     // an mmap with MAP_NORESERVE flag set.
    455   }
    456   if (DEBUG_MODE) {
    457     RAW_CHECK(i == regions_->end()  ||  !region.Overlaps(*i),
    458               "Wow, overlapping memory regions");
    459     Region sample;
    460     sample.SetRegionSetKey(region.start_addr);
    461     i = regions_->lower_bound(sample);
    462     RAW_CHECK(i == regions_->end()  ||  !region.Overlaps(*i),
    463               "Wow, overlapping memory regions");
    464   }
    465   region.AssertIsConsistent();  // just making sure
    466   // This inserts and allocates permanent storage for region
    467   // and its call stack data: it's safe to do it now:
    468   regions_->insert(region);
    469   RAW_VLOG(12, "Inserted region %p..%p :",
    470               reinterpret_cast<void*>(region.start_addr),
    471               reinterpret_cast<void*>(region.end_addr));
    472   if (VLOG_IS_ON(12))  LogAllLocked();
    473 }
    474 
    475 // These variables are local to MemoryRegionMap::InsertRegionLocked()
    476 // and MemoryRegionMap::HandleSavedRegionsLocked()
    477 // and are file-level to ensure that they are initialized at load time.
    478 
    479 // Number of unprocessed region inserts.
    480 static int saved_regions_count = 0;
    481 
    482 // Unprocessed inserts (must be big enough to hold all allocations that can
    483 // be caused by a InsertRegionLocked call).
    484 // Region has no constructor, so that c-tor execution does not interfere
    485 // with the any-time use of the static memory behind saved_regions.
    486 static MemoryRegionMap::Region saved_regions[20];
    487 
    488 inline void MemoryRegionMap::HandleSavedRegionsLocked(
    489               void (*insert_func)(const Region& region)) {
    490   while (saved_regions_count > 0) {
    491     // Making a local-var copy of the region argument to insert_func
    492     // including its stack (w/o doing any memory allocations) is important:
    493     // in many cases the memory in saved_regions
    494     // will get written-to during the (*insert_func)(r) call below.
    495     Region r = saved_regions[--saved_regions_count];
    496     (*insert_func)(r);
    497   }
    498 }
    499 
    500 void MemoryRegionMap::RestoreSavedBucketsLocked() {
    501   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    502   while (saved_buckets_count_ > 0) {
    503     HeapProfileBucket bucket = saved_buckets_[--saved_buckets_count_];
    504     unsigned int hash_index =
    505         static_cast<unsigned int>(bucket.hash) % kHashTableSize;
    506     bool is_found = false;
    507     for (HeapProfileBucket* curr = bucket_table_[hash_index];
    508          curr != 0;
    509          curr = curr->next) {
    510       if ((curr->hash == bucket.hash) && (curr->depth == bucket.depth) &&
    511           std::equal(bucket.stack, bucket.stack + bucket.depth, curr->stack)) {
    512         curr->allocs += bucket.allocs;
    513         curr->alloc_size += bucket.alloc_size;
    514         curr->frees += bucket.frees;
    515         curr->free_size += bucket.free_size;
    516         is_found = true;
    517         break;
    518       }
    519     }
    520     if (is_found) continue;
    521 
    522     const size_t key_size = sizeof(bucket.stack[0]) * bucket.depth;
    523     const void** key_copy = static_cast<const void**>(
    524         MyAllocator::Allocate(key_size));
    525     std::copy(bucket.stack, bucket.stack + bucket.depth, key_copy);
    526     HeapProfileBucket* new_bucket = static_cast<HeapProfileBucket*>(
    527         MyAllocator::Allocate(sizeof(HeapProfileBucket)));
    528     memset(new_bucket, 0, sizeof(*new_bucket));
    529     new_bucket->hash = bucket.hash;
    530     new_bucket->depth = bucket.depth;
    531     new_bucket->stack = key_copy;
    532     new_bucket->next = bucket_table_[hash_index];
    533     bucket_table_[hash_index] = new_bucket;
    534     ++num_buckets_;
    535   }
    536 }
    537 
    538 inline void MemoryRegionMap::InitRegionSetLocked() {
    539   RAW_VLOG(12, "Initializing region set");
    540   regions_ = regions_rep.region_set();
    541   recursive_insert = true;
    542   new(regions_) RegionSet();
    543   HandleSavedRegionsLocked(&DoInsertRegionLocked);
    544   recursive_insert = false;
    545 }
    546 
    547 inline void MemoryRegionMap::InsertRegionLocked(const Region& region) {
    548   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    549   // We can be called recursively, because RegionSet constructor
    550   // and DoInsertRegionLocked() (called below) can call the allocator.
    551   // recursive_insert tells us if that's the case. When this happens,
    552   // region insertion information is recorded in saved_regions[],
    553   // and taken into account when the recursion unwinds.
    554   // Do the insert:
    555   if (recursive_insert) {  // recursion: save in saved_regions
    556     RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p",
    557                 reinterpret_cast<void*>(region.start_addr),
    558                 reinterpret_cast<void*>(region.end_addr),
    559                 reinterpret_cast<void*>(region.caller()));
    560     RAW_CHECK(saved_regions_count < arraysize(saved_regions), "");
    561     // Copy 'region' to saved_regions[saved_regions_count]
    562     // together with the contents of its call_stack,
    563     // then increment saved_regions_count.
    564     saved_regions[saved_regions_count++] = region;
    565   } else {  // not a recusrive call
    566     if (regions_ == NULL)  // init regions_
    567       InitRegionSetLocked();
    568     recursive_insert = true;
    569     // Do the actual insertion work to put new regions into regions_:
    570     DoInsertRegionLocked(region);
    571     HandleSavedRegionsLocked(&DoInsertRegionLocked);
    572     recursive_insert = false;
    573   }
    574 }
    575 
    576 // We strip out different number of stack frames in debug mode
    577 // because less inlining happens in that case
    578 #ifdef NDEBUG
    579 static const int kStripFrames = 1;
    580 #else
    581 static const int kStripFrames = 3;
    582 #endif
    583 
    584 void MemoryRegionMap::RecordRegionAddition(const void* start, size_t size) {
    585   // Record start/end info about this memory acquisition call in a new region:
    586   Region region;
    587   region.Create(start, size);
    588   // First get the call stack info into the local varible 'region':
    589   const int depth =
    590     max_stack_depth_ > 0
    591     ? MallocHook::GetCallerStackTrace(const_cast<void**>(region.call_stack),
    592                                       max_stack_depth_, kStripFrames + 1)
    593     : 0;
    594   region.set_call_stack_depth(depth);  // record stack info fully
    595   RAW_VLOG(10, "New global region %p..%p from %p",
    596               reinterpret_cast<void*>(region.start_addr),
    597               reinterpret_cast<void*>(region.end_addr),
    598               reinterpret_cast<void*>(region.caller()));
    599   // Note: none of the above allocates memory.
    600   Lock();  // recursively lock
    601   map_size_ += size;
    602   InsertRegionLocked(region);
    603     // This will (eventually) allocate storage for and copy over the stack data
    604     // from region.call_stack_data_ that is pointed by region.call_stack().
    605   if (bucket_table_ != NULL) {
    606     HeapProfileBucket* b = GetBucket(depth, region.call_stack);
    607     ++b->allocs;
    608     b->alloc_size += size;
    609     if (!recursive_insert) {
    610       recursive_insert = true;
    611       RestoreSavedBucketsLocked();
    612       recursive_insert = false;
    613     }
    614   }
    615   Unlock();
    616 }
    617 
    618 void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
    619   Lock();
    620   if (recursive_insert) {
    621     // First remove the removed region from saved_regions, if it's
    622     // there, to prevent overrunning saved_regions in recursive
    623     // map/unmap call sequences, and also from later inserting regions
    624     // which have already been unmapped.
    625     uintptr_t start_addr = reinterpret_cast<uintptr_t>(start);
    626     uintptr_t end_addr = start_addr + size;
    627     int put_pos = 0;
    628     int old_count = saved_regions_count;
    629     for (int i = 0; i < old_count; ++i, ++put_pos) {
    630       Region& r = saved_regions[i];
    631       if (r.start_addr == start_addr && r.end_addr == end_addr) {
    632         // An exact match, so it's safe to remove.
    633         RecordRegionRemovalInBucket(r.call_stack_depth, r.call_stack, size);
    634         --saved_regions_count;
    635         --put_pos;
    636         RAW_VLOG(10, ("Insta-Removing saved region %p..%p; "
    637                      "now have %d saved regions"),
    638                  reinterpret_cast<void*>(start_addr),
    639                  reinterpret_cast<void*>(end_addr),
    640                  saved_regions_count);
    641       } else {
    642         if (put_pos < i) {
    643           saved_regions[put_pos] = saved_regions[i];
    644         }
    645       }
    646     }
    647   }
    648   if (regions_ == NULL) {  // We must have just unset the hooks,
    649                            // but this thread was already inside the hook.
    650     Unlock();
    651     return;
    652   }
    653   if (!recursive_insert) {
    654     HandleSavedRegionsLocked(&InsertRegionLocked);
    655   }
    656     // first handle adding saved regions if any
    657   uintptr_t start_addr = reinterpret_cast<uintptr_t>(start);
    658   uintptr_t end_addr = start_addr + size;
    659   // subtract start_addr, end_addr from all the regions
    660   RAW_VLOG(10, "Removing global region %p..%p; have %" PRIuS " regions",
    661               reinterpret_cast<void*>(start_addr),
    662               reinterpret_cast<void*>(end_addr),
    663               regions_->size());
    664   Region sample;
    665   sample.SetRegionSetKey(start_addr);
    666   // Only iterate over the regions that might overlap start_addr..end_addr:
    667   for (RegionSet::iterator region = regions_->lower_bound(sample);
    668        region != regions_->end()  &&  region->start_addr < end_addr;
    669        /*noop*/) {
    670     RAW_VLOG(13, "Looking at region %p..%p",
    671                 reinterpret_cast<void*>(region->start_addr),
    672                 reinterpret_cast<void*>(region->end_addr));
    673     if (start_addr <= region->start_addr  &&
    674         region->end_addr <= end_addr) {  // full deletion
    675       RAW_VLOG(12, "Deleting region %p..%p",
    676                   reinterpret_cast<void*>(region->start_addr),
    677                   reinterpret_cast<void*>(region->end_addr));
    678       RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
    679                                   region->end_addr - region->start_addr);
    680       RegionSet::iterator d = region;
    681       ++region;
    682       regions_->erase(d);
    683       continue;
    684     } else if (region->start_addr < start_addr  &&
    685                end_addr < region->end_addr) {  // cutting-out split
    686       RAW_VLOG(12, "Splitting region %p..%p in two",
    687                   reinterpret_cast<void*>(region->start_addr),
    688                   reinterpret_cast<void*>(region->end_addr));
    689       RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
    690                                   end_addr - start_addr);
    691       // Make another region for the start portion:
    692       // The new region has to be the start portion because we can't
    693       // just modify region->end_addr as it's the sorting key.
    694       Region r = *region;
    695       r.set_end_addr(start_addr);
    696       InsertRegionLocked(r);
    697       // cut *region from start:
    698       const_cast<Region&>(*region).set_start_addr(end_addr);
    699     } else if (end_addr > region->start_addr  &&
    700                start_addr <= region->start_addr) {  // cut from start
    701       RAW_VLOG(12, "Start-chopping region %p..%p",
    702                   reinterpret_cast<void*>(region->start_addr),
    703                   reinterpret_cast<void*>(region->end_addr));
    704       RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
    705                                   end_addr - region->start_addr);
    706       const_cast<Region&>(*region).set_start_addr(end_addr);
    707     } else if (start_addr > region->start_addr  &&
    708                start_addr < region->end_addr) {  // cut from end
    709       RAW_VLOG(12, "End-chopping region %p..%p",
    710                   reinterpret_cast<void*>(region->start_addr),
    711                   reinterpret_cast<void*>(region->end_addr));
    712       RecordRegionRemovalInBucket(region->call_stack_depth, region->call_stack,
    713                                   region->end_addr - start_addr);
    714       // Can't just modify region->end_addr (it's the sorting key):
    715       Region r = *region;
    716       r.set_end_addr(start_addr);
    717       RegionSet::iterator d = region;
    718       ++region;
    719       // It's safe to erase before inserting since r is independent of *d:
    720       // r contains an own copy of the call stack:
    721       regions_->erase(d);
    722       InsertRegionLocked(r);
    723       continue;
    724     }
    725     ++region;
    726   }
    727   RAW_VLOG(12, "Removed region %p..%p; have %" PRIuS " regions",
    728               reinterpret_cast<void*>(start_addr),
    729               reinterpret_cast<void*>(end_addr),
    730               regions_->size());
    731   if (VLOG_IS_ON(12))  LogAllLocked();
    732   unmap_size_ += size;
    733   Unlock();
    734 }
    735 
    736 void MemoryRegionMap::RecordRegionRemovalInBucket(int depth,
    737                                                   const void* const stack[],
    738                                                   size_t size) {
    739   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    740   if (bucket_table_ == NULL) return;
    741   HeapProfileBucket* b = GetBucket(depth, stack);
    742   ++b->frees;
    743   b->free_size += size;
    744 }
    745 
    746 void MemoryRegionMap::MmapHook(const void* result,
    747                                const void* start, size_t size,
    748                                int prot, int flags,
    749                                int fd, off_t offset) {
    750   // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe
    751   // snprintf reimplementation that does not malloc to pretty-print NULL
    752   RAW_VLOG(10, "MMap = 0x%" PRIxPTR " of %" PRIuS " at %" PRIu64 " "
    753               "prot %d flags %d fd %d offs %" PRId64,
    754               reinterpret_cast<uintptr_t>(result), size,
    755               reinterpret_cast<uint64>(start), prot, flags, fd,
    756               static_cast<int64>(offset));
    757   if (result != reinterpret_cast<void*>(MAP_FAILED)  &&  size != 0) {
    758     RecordRegionAddition(result, size);
    759   }
    760 }
    761 
    762 void MemoryRegionMap::MunmapHook(const void* ptr, size_t size) {
    763   RAW_VLOG(10, "MUnmap of %p %" PRIuS, ptr, size);
    764   if (size != 0) {
    765     RecordRegionRemoval(ptr, size);
    766   }
    767 }
    768 
    769 void MemoryRegionMap::MremapHook(const void* result,
    770                                  const void* old_addr, size_t old_size,
    771                                  size_t new_size, int flags,
    772                                  const void* new_addr) {
    773   RAW_VLOG(10, "MRemap = 0x%" PRIxPTR " of 0x%" PRIxPTR " %" PRIuS " "
    774               "to %" PRIuS " flags %d new_addr=0x%" PRIxPTR,
    775               (uintptr_t)result, (uintptr_t)old_addr,
    776                old_size, new_size, flags,
    777                flags & MREMAP_FIXED ? (uintptr_t)new_addr : 0);
    778   if (result != reinterpret_cast<void*>(-1)) {
    779     RecordRegionRemoval(old_addr, old_size);
    780     RecordRegionAddition(result, new_size);
    781   }
    782 }
    783 
    784 extern "C" void* __sbrk(ptrdiff_t increment);  // defined in libc
    785 
    786 void MemoryRegionMap::SbrkHook(const void* result, ptrdiff_t increment) {
    787   RAW_VLOG(10, "Sbrk = 0x%" PRIxPTR " of %" PRIdS,
    788            (uintptr_t)result, increment);
    789   if (result != reinterpret_cast<void*>(-1)) {
    790     if (increment > 0) {
    791       void* new_end = sbrk(0);
    792       RecordRegionAddition(result, reinterpret_cast<uintptr_t>(new_end) -
    793                                    reinterpret_cast<uintptr_t>(result));
    794     } else if (increment < 0) {
    795       void* new_end = sbrk(0);
    796       RecordRegionRemoval(new_end, reinterpret_cast<uintptr_t>(result) -
    797                                    reinterpret_cast<uintptr_t>(new_end));
    798     }
    799   }
    800 }
    801 
    802 void MemoryRegionMap::LogAllLocked() {
    803   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    804   RAW_LOG(INFO, "List of regions:");
    805   uintptr_t previous = 0;
    806   for (RegionSet::const_iterator r = regions_->begin();
    807        r != regions_->end(); ++r) {
    808     RAW_LOG(INFO, "Memory region 0x%" PRIxPTR "..0x%" PRIxPTR " "
    809                   "from 0x%" PRIxPTR " stack=%d",
    810                   r->start_addr, r->end_addr, r->caller(), r->is_stack);
    811     RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order");
    812       // this must be caused by uncontrolled recursive operations on regions_
    813     previous = r->end_addr;
    814   }
    815   RAW_LOG(INFO, "End of regions list");
    816 }
    817