Home | History | Annotate | Download | only in src
      1 /* Copyright (c) 2006, Google Inc.
      2  * All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  *
     30  * ---
     31  * Author: Maxim Lifantsev
     32  */
     33 
     34 //
     35 // Background and key design points of MemoryRegionMap.
     36 //
     37 // MemoryRegionMap is a low-level module with quite atypical requirements that
     38 // result in some degree of non-triviality of the implementation and design.
     39 //
     40 // MemoryRegionMap collects info about *all* memory regions created with
     41 // mmap, munmap, mremap, sbrk.
     42 // They key word above is 'all': all that are happening in a process
     43 // during its lifetime frequently starting even before global object
     44 // constructor execution.
     45 //
     46 // This is needed by the primary client of MemoryRegionMap:
     47 // HeapLeakChecker uses the regions and the associated stack traces
     48 // to figure out what part of the memory is the heap:
     49 // if MemoryRegionMap were to miss some (early) regions, leak checking would
     50 // stop working correctly.
     51 //
     52 // To accomplish the goal of functioning before/during global object
     53 // constructor execution MemoryRegionMap is done as a singleton service
     54 // that relies on own on-demand initialized static constructor-less data,
     55 // and only relies on other low-level modules that can also function properly
     56 // even before global object constructors run.
     57 //
     58 // Accomplishing the goal of collecting data about all mmap, munmap, mremap,
     59 // sbrk occurrences is a more involved: conceptually to do this one needs to
     60 // record some bits of data in particular about any mmap or sbrk call,
     61 // but to do that one needs to allocate memory for that data at some point,
     62 // but all memory allocations in the end themselves come from an mmap
     63 // or sbrk call (that's how the address space of the process grows).
     64 //
     65 // Also note that we need to do all the above recording from
     66 // within an mmap/sbrk hook which is sometimes/frequently is made by a memory
     67 // allocator, including the allocator MemoryRegionMap itself must rely on.
     68 // In the case of heap-checker usage this includes even the very first
     69 // mmap/sbrk call happening in the program: heap-checker gets activated due to
     70 // a link-time installed mmap/sbrk hook and it initializes MemoryRegionMap
     71 // and asks it to record info about this very first call right from that
     72 // very first hook invocation.
     73 //
     74 // MemoryRegionMap is doing its memory allocations via LowLevelAlloc:
     75 // unlike more complex standard memory allocator, LowLevelAlloc cooperates with
     76 // MemoryRegionMap by not holding any of its own locks while it calls mmap
     77 // to get memory, thus we are able to call LowLevelAlloc from
     78 // our mmap/sbrk hooks without causing a deadlock in it.
     79 // For the same reason of deadlock prevention the locking in MemoryRegionMap
     80 // itself is write-recursive which is an exception to Google's mutex usage.
     81 //
     82 // We still need to break the infinite cycle of mmap calling our hook,
     83 // which asks LowLevelAlloc for memory to record this mmap,
     84 // which (sometimes) causes mmap, which calls our hook, and so on.
     85 // We do this as follows: on a recursive call of MemoryRegionMap's
     86 // mmap/sbrk/mremap hook we record the data about the allocation in a
     87 // static fixed-sized stack (saved_regions), when the recursion unwinds
     88 // but before returning from the outer hook call we unwind this stack and
     89 // move the data from saved_regions to its permanent place in the RegionSet,
     90 // which can cause more allocations and mmap-s and recursion and unwinding,
     91 // but the whole process ends eventually due to the fact that for the small
     92 // allocations we are doing LowLevelAlloc reuses one mmap call and parcels out
     93 // the memory it created to satisfy several of our allocation requests.
     94 //
     95 
     96 // ========================================================================= //
     97 
     98 #include <config.h>
     99 
    100 #ifdef HAVE_UNISTD_H
    101 #include <unistd.h>
    102 #endif
    103 #ifdef HAVE_INTTYPES_H
    104 #include <inttypes.h>
    105 #endif
    106 #ifdef HAVE_MMAP
    107 #include <sys/mman.h>
    108 #elif !defined(MAP_FAILED)
    109 #define MAP_FAILED -1  // the only thing we need from mman.h
    110 #endif
    111 #ifdef HAVE_PTHREAD
    112 #include <pthread.h>   // for pthread_t, pthread_self()
    113 #endif
    114 #include <stddef.h>
    115 
    116 #include <algorithm>
    117 #include <set>
    118 
    119 #include "memory_region_map.h"
    120 
    121 #include "base/logging.h"
    122 #include "base/low_level_alloc.h"
    123 #include "malloc_hook-inl.h"
    124 
    125 #include <gperftools/stacktrace.h>
    126 #include <gperftools/malloc_hook.h>
    127 
    128 // MREMAP_FIXED is a linux extension.  How it's used in this file,
    129 // setting it to 0 is equivalent to saying, "This feature isn't
    130 // supported", which is right.
    131 #ifndef MREMAP_FIXED
    132 # define MREMAP_FIXED  0
    133 #endif
    134 
    135 using std::max;
    136 
    137 // ========================================================================= //
    138 
    139 int MemoryRegionMap::client_count_ = 0;
    140 int MemoryRegionMap::max_stack_depth_ = 0;
    141 MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL;
    142 LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL;
    143 SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED);
    144 SpinLock MemoryRegionMap::owner_lock_(  // ACQUIRED_AFTER(lock_)
    145     SpinLock::LINKER_INITIALIZED);
    146 int MemoryRegionMap::recursion_count_ = 0;  // GUARDED_BY(owner_lock_)
    147 pthread_t MemoryRegionMap::lock_owner_tid_;  // GUARDED_BY(owner_lock_)
    148 int64 MemoryRegionMap::map_size_ = 0;
    149 int64 MemoryRegionMap::unmap_size_ = 0;
    150 
    151 // ========================================================================= //
    152 
    153 // Simple hook into execution of global object constructors,
    154 // so that we do not call pthread_self() when it does not yet work.
    155 static bool libpthread_initialized = false;
    156 static bool initializer = (libpthread_initialized = true, true);
    157 
    158 static inline bool current_thread_is(pthread_t should_be) {
    159   // Before main() runs, there's only one thread, so we're always that thread
    160   if (!libpthread_initialized) return true;
    161   // this starts working only sometime well into global constructor execution:
    162   return pthread_equal(pthread_self(), should_be);
    163 }
    164 
    165 // ========================================================================= //
    166 
    167 // Constructor-less place-holder to store a RegionSet in.
    168 union MemoryRegionMap::RegionSetRep {
    169   char rep[sizeof(RegionSet)];
    170   void* align_it;  // do not need a better alignment for 'rep' than this
    171   RegionSet* region_set() { return reinterpret_cast<RegionSet*>(rep); }
    172 };
    173 
    174 // The bytes where MemoryRegionMap::regions_ will point to.
    175 // We use RegionSetRep with noop c-tor so that global construction
    176 // does not interfere.
    177 static MemoryRegionMap::RegionSetRep regions_rep;
    178 
    179 // ========================================================================= //
    180 
    181 // Has InsertRegionLocked been called recursively
    182 // (or rather should we *not* use regions_ to record a hooked mmap).
    183 static bool recursive_insert = false;
    184 
    185 void MemoryRegionMap::Init(int max_stack_depth) {
    186   RAW_VLOG(10, "MemoryRegionMap Init");
    187   RAW_CHECK(max_stack_depth >= 0, "");
    188   // Make sure we don't overflow the memory in region stacks:
    189   RAW_CHECK(max_stack_depth <= kMaxStackDepth,
    190             "need to increase kMaxStackDepth?");
    191   Lock();
    192   client_count_ += 1;
    193   max_stack_depth_ = max(max_stack_depth_, max_stack_depth);
    194   if (client_count_ > 1) {
    195     // not first client: already did initialization-proper
    196     Unlock();
    197     RAW_VLOG(10, "MemoryRegionMap Init increment done");
    198     return;
    199   }
    200   // Set our hooks and make sure they were installed:
    201   RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
    202   RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
    203   RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
    204   RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
    205   // We need to set recursive_insert since the NewArena call itself
    206   // will already do some allocations with mmap which our hooks will catch
    207   // recursive_insert allows us to buffer info about these mmap calls.
    208   // Note that Init() can be (and is) sometimes called
    209   // already from within an mmap/sbrk hook.
    210   recursive_insert = true;
    211   arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
    212   recursive_insert = false;
    213   HandleSavedRegionsLocked(&InsertRegionLocked);  // flush the buffered ones
    214     // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before
    215     // recursive_insert = false; as InsertRegionLocked will also construct
    216     // regions_ on demand for us.
    217   Unlock();
    218   RAW_VLOG(10, "MemoryRegionMap Init done");
    219 }
    220 
    221 bool MemoryRegionMap::Shutdown() {
    222   RAW_VLOG(10, "MemoryRegionMap Shutdown");
    223   Lock();
    224   RAW_CHECK(client_count_ > 0, "");
    225   client_count_ -= 1;
    226   if (client_count_ != 0) {  // not last client; need not really shutdown
    227     Unlock();
    228     RAW_VLOG(10, "MemoryRegionMap Shutdown decrement done");
    229     return true;
    230   }
    231   RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
    232   RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
    233   RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
    234   RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
    235   if (regions_) regions_->~RegionSet();
    236   regions_ = NULL;
    237   bool deleted_arena = LowLevelAlloc::DeleteArena(arena_);
    238   if (deleted_arena) {
    239     arena_ = 0;
    240   } else {
    241     RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used");
    242   }
    243   Unlock();
    244   RAW_VLOG(10, "MemoryRegionMap Shutdown done");
    245   return deleted_arena;
    246 }
    247 
    248 // Invariants (once libpthread_initialized is true):
    249 //   * While lock_ is not held, recursion_count_ is 0 (and
    250 //     lock_owner_tid_ is the previous owner, but we don't rely on
    251 //     that).
    252 //   * recursion_count_ and lock_owner_tid_ are only written while
    253 //     both lock_ and owner_lock_ are held. They may be read under
    254 //     just owner_lock_.
    255 //   * At entry and exit of Lock() and Unlock(), the current thread
    256 //     owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self())
    257 //     && recursion_count_ > 0.
    258 void MemoryRegionMap::Lock() {
    259   {
    260     SpinLockHolder l(&owner_lock_);
    261     if (recursion_count_ > 0 && current_thread_is(lock_owner_tid_)) {
    262       RAW_CHECK(lock_.IsHeld(), "Invariants violated");
    263       recursion_count_++;
    264       RAW_CHECK(recursion_count_ <= 5,
    265                 "recursive lock nesting unexpectedly deep");
    266       return;
    267     }
    268   }
    269   lock_.Lock();
    270   {
    271     SpinLockHolder l(&owner_lock_);
    272     RAW_CHECK(recursion_count_ == 0,
    273               "Last Unlock didn't reset recursion_count_");
    274     if (libpthread_initialized)
    275       lock_owner_tid_ = pthread_self();
    276     recursion_count_ = 1;
    277   }
    278 }
    279 
    280 void MemoryRegionMap::Unlock() {
    281   SpinLockHolder l(&owner_lock_);
    282   RAW_CHECK(recursion_count_ >  0, "unlock when not held");
    283   RAW_CHECK(lock_.IsHeld(),
    284             "unlock when not held, and recursion_count_ is wrong");
    285   RAW_CHECK(current_thread_is(lock_owner_tid_), "unlock by non-holder");
    286   recursion_count_--;
    287   if (recursion_count_ == 0) {
    288     lock_.Unlock();
    289   }
    290 }
    291 
    292 bool MemoryRegionMap::LockIsHeld() {
    293   SpinLockHolder l(&owner_lock_);
    294   return lock_.IsHeld()  &&  current_thread_is(lock_owner_tid_);
    295 }
    296 
    297 const MemoryRegionMap::Region*
    298 MemoryRegionMap::DoFindRegionLocked(uintptr_t addr) {
    299   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    300   if (regions_ != NULL) {
    301     Region sample;
    302     sample.SetRegionSetKey(addr);
    303     RegionSet::iterator region = regions_->lower_bound(sample);
    304     if (region != regions_->end()) {
    305       RAW_CHECK(addr <= region->end_addr, "");
    306       if (region->start_addr <= addr  &&  addr < region->end_addr) {
    307         return &(*region);
    308       }
    309     }
    310   }
    311   return NULL;
    312 }
    313 
    314 bool MemoryRegionMap::FindRegion(uintptr_t addr, Region* result) {
    315   Lock();
    316   const Region* region = DoFindRegionLocked(addr);
    317   if (region != NULL) *result = *region;  // create it as an independent copy
    318   Unlock();
    319   return region != NULL;
    320 }
    321 
    322 bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top,
    323                                              Region* result) {
    324   Lock();
    325   const Region* region = DoFindRegionLocked(stack_top);
    326   if (region != NULL) {
    327     RAW_VLOG(10, "Stack at %p is inside region %p..%p",
    328                 reinterpret_cast<void*>(stack_top),
    329                 reinterpret_cast<void*>(region->start_addr),
    330                 reinterpret_cast<void*>(region->end_addr));
    331     const_cast<Region*>(region)->set_is_stack();  // now we know
    332       // cast is safe (set_is_stack does not change the set ordering key)
    333     *result = *region;  // create *result as an independent copy
    334   }
    335   Unlock();
    336   return region != NULL;
    337 }
    338 
    339 MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() {
    340   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    341   RAW_CHECK(regions_ != NULL, "");
    342   return regions_->begin();
    343 }
    344 
    345 MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() {
    346   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    347   RAW_CHECK(regions_ != NULL, "");
    348   return regions_->end();
    349 }
    350 
    351 inline void MemoryRegionMap::DoInsertRegionLocked(const Region& region) {
    352   RAW_VLOG(12, "Inserting region %p..%p from %p",
    353               reinterpret_cast<void*>(region.start_addr),
    354               reinterpret_cast<void*>(region.end_addr),
    355               reinterpret_cast<void*>(region.caller()));
    356   RegionSet::const_iterator i = regions_->lower_bound(region);
    357   if (i != regions_->end() && i->start_addr <= region.start_addr) {
    358     RAW_DCHECK(region.end_addr <= i->end_addr, "");  // lower_bound ensures this
    359     return;  // 'region' is a subset of an already recorded region; do nothing
    360     // We can be stricter and allow this only when *i has been created via
    361     // an mmap with MAP_NORESERVE flag set.
    362   }
    363   if (DEBUG_MODE) {
    364     RAW_CHECK(i == regions_->end()  ||  !region.Overlaps(*i),
    365               "Wow, overlapping memory regions");
    366     Region sample;
    367     sample.SetRegionSetKey(region.start_addr);
    368     i = regions_->lower_bound(sample);
    369     RAW_CHECK(i == regions_->end()  ||  !region.Overlaps(*i),
    370               "Wow, overlapping memory regions");
    371   }
    372   region.AssertIsConsistent();  // just making sure
    373   // This inserts and allocates permanent storage for region
    374   // and its call stack data: it's safe to do it now:
    375   regions_->insert(region);
    376   RAW_VLOG(12, "Inserted region %p..%p :",
    377               reinterpret_cast<void*>(region.start_addr),
    378               reinterpret_cast<void*>(region.end_addr));
    379   if (VLOG_IS_ON(12))  LogAllLocked();
    380 }
    381 
    382 // These variables are local to MemoryRegionMap::InsertRegionLocked()
    383 // and MemoryRegionMap::HandleSavedRegionsLocked()
    384 // and are file-level to ensure that they are initialized at load time.
    385 
    386 // Number of unprocessed region inserts.
    387 static int saved_regions_count = 0;
    388 
    389 // Unprocessed inserts (must be big enough to hold all allocations that can
    390 // be caused by a InsertRegionLocked call).
    391 // Region has no constructor, so that c-tor execution does not interfere
    392 // with the any-time use of the static memory behind saved_regions.
    393 static MemoryRegionMap::Region saved_regions[20];
    394 
    395 inline void MemoryRegionMap::HandleSavedRegionsLocked(
    396               void (*insert_func)(const Region& region)) {
    397   while (saved_regions_count > 0) {
    398     // Making a local-var copy of the region argument to insert_func
    399     // including its stack (w/o doing any memory allocations) is important:
    400     // in many cases the memory in saved_regions
    401     // will get written-to during the (*insert_func)(r) call below.
    402     Region r = saved_regions[--saved_regions_count];
    403     (*insert_func)(r);
    404   }
    405 }
    406 
    407 inline void MemoryRegionMap::InsertRegionLocked(const Region& region) {
    408   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    409   // We can be called recursively, because RegionSet constructor
    410   // and DoInsertRegionLocked() (called below) can call the allocator.
    411   // recursive_insert tells us if that's the case. When this happens,
    412   // region insertion information is recorded in saved_regions[],
    413   // and taken into account when the recursion unwinds.
    414   // Do the insert:
    415   if (recursive_insert) {  // recursion: save in saved_regions
    416     RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p",
    417                 reinterpret_cast<void*>(region.start_addr),
    418                 reinterpret_cast<void*>(region.end_addr),
    419                 reinterpret_cast<void*>(region.caller()));
    420     RAW_CHECK(saved_regions_count < arraysize(saved_regions), "");
    421     // Copy 'region' to saved_regions[saved_regions_count]
    422     // together with the contents of its call_stack,
    423     // then increment saved_regions_count.
    424     saved_regions[saved_regions_count++] = region;
    425   } else {  // not a recusrive call
    426     if (regions_ == NULL) {  // init regions_
    427       RAW_VLOG(12, "Initializing region set");
    428       regions_ = regions_rep.region_set();
    429       recursive_insert = true;
    430       new(regions_) RegionSet();
    431       HandleSavedRegionsLocked(&DoInsertRegionLocked);
    432       recursive_insert = false;
    433     }
    434     recursive_insert = true;
    435     // Do the actual insertion work to put new regions into regions_:
    436     DoInsertRegionLocked(region);
    437     HandleSavedRegionsLocked(&DoInsertRegionLocked);
    438     recursive_insert = false;
    439   }
    440 }
    441 
    442 // We strip out different number of stack frames in debug mode
    443 // because less inlining happens in that case
    444 #ifdef NDEBUG
    445 static const int kStripFrames = 1;
    446 #else
    447 static const int kStripFrames = 3;
    448 #endif
    449 
    450 void MemoryRegionMap::RecordRegionAddition(const void* start, size_t size) {
    451   // Record start/end info about this memory acquisition call in a new region:
    452   Region region;
    453   region.Create(start, size);
    454   // First get the call stack info into the local varible 'region':
    455   const int depth =
    456     max_stack_depth_ > 0
    457     ? MallocHook::GetCallerStackTrace(const_cast<void**>(region.call_stack),
    458                                       max_stack_depth_, kStripFrames + 1)
    459     : 0;
    460   region.set_call_stack_depth(depth);  // record stack info fully
    461   RAW_VLOG(10, "New global region %p..%p from %p",
    462               reinterpret_cast<void*>(region.start_addr),
    463               reinterpret_cast<void*>(region.end_addr),
    464               reinterpret_cast<void*>(region.caller()));
    465   // Note: none of the above allocates memory.
    466   Lock();  // recursively lock
    467   map_size_ += size;
    468   InsertRegionLocked(region);
    469     // This will (eventually) allocate storage for and copy over the stack data
    470     // from region.call_stack_data_ that is pointed by region.call_stack().
    471   Unlock();
    472 }
    473 
    474 void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
    475   Lock();
    476   if (recursive_insert) {
    477     // First remove the removed region from saved_regions, if it's
    478     // there, to prevent overrunning saved_regions in recursive
    479     // map/unmap call sequences, and also from later inserting regions
    480     // which have already been unmapped.
    481     uintptr_t start_addr = reinterpret_cast<uintptr_t>(start);
    482     uintptr_t end_addr = start_addr + size;
    483     int put_pos = 0;
    484     int old_count = saved_regions_count;
    485     for (int i = 0; i < old_count; ++i, ++put_pos) {
    486       Region& r = saved_regions[i];
    487       if (r.start_addr == start_addr && r.end_addr == end_addr) {
    488         // An exact match, so it's safe to remove.
    489         --saved_regions_count;
    490         --put_pos;
    491         RAW_VLOG(10, ("Insta-Removing saved region %p..%p; "
    492                      "now have %d saved regions"),
    493                  reinterpret_cast<void*>(start_addr),
    494                  reinterpret_cast<void*>(end_addr),
    495                  saved_regions_count);
    496       } else {
    497         if (put_pos < i) {
    498           saved_regions[put_pos] = saved_regions[i];
    499         }
    500       }
    501     }
    502   }
    503   if (regions_ == NULL) {  // We must have just unset the hooks,
    504                            // but this thread was already inside the hook.
    505     Unlock();
    506     return;
    507   }
    508   if (!recursive_insert) {
    509     HandleSavedRegionsLocked(&InsertRegionLocked);
    510   }
    511     // first handle adding saved regions if any
    512   uintptr_t start_addr = reinterpret_cast<uintptr_t>(start);
    513   uintptr_t end_addr = start_addr + size;
    514   // subtract start_addr, end_addr from all the regions
    515   RAW_VLOG(10, "Removing global region %p..%p; have %"PRIuS" regions",
    516               reinterpret_cast<void*>(start_addr),
    517               reinterpret_cast<void*>(end_addr),
    518               regions_->size());
    519   Region sample;
    520   sample.SetRegionSetKey(start_addr);
    521   // Only iterate over the regions that might overlap start_addr..end_addr:
    522   for (RegionSet::iterator region = regions_->lower_bound(sample);
    523        region != regions_->end()  &&  region->start_addr < end_addr;
    524        /*noop*/) {
    525     RAW_VLOG(13, "Looking at region %p..%p",
    526                 reinterpret_cast<void*>(region->start_addr),
    527                 reinterpret_cast<void*>(region->end_addr));
    528     if (start_addr <= region->start_addr  &&
    529         region->end_addr <= end_addr) {  // full deletion
    530       RAW_VLOG(12, "Deleting region %p..%p",
    531                   reinterpret_cast<void*>(region->start_addr),
    532                   reinterpret_cast<void*>(region->end_addr));
    533       RegionSet::iterator d = region;
    534       ++region;
    535       regions_->erase(d);
    536       continue;
    537     } else if (region->start_addr < start_addr  &&
    538                end_addr < region->end_addr) {  // cutting-out split
    539       RAW_VLOG(12, "Splitting region %p..%p in two",
    540                   reinterpret_cast<void*>(region->start_addr),
    541                   reinterpret_cast<void*>(region->end_addr));
    542       // Make another region for the start portion:
    543       // The new region has to be the start portion because we can't
    544       // just modify region->end_addr as it's the sorting key.
    545       Region r = *region;
    546       r.set_end_addr(start_addr);
    547       InsertRegionLocked(r);
    548       // cut *region from start:
    549       const_cast<Region&>(*region).set_start_addr(end_addr);
    550     } else if (end_addr > region->start_addr  &&
    551                start_addr <= region->start_addr) {  // cut from start
    552       RAW_VLOG(12, "Start-chopping region %p..%p",
    553                   reinterpret_cast<void*>(region->start_addr),
    554                   reinterpret_cast<void*>(region->end_addr));
    555       const_cast<Region&>(*region).set_start_addr(end_addr);
    556     } else if (start_addr > region->start_addr  &&
    557                start_addr < region->end_addr) {  // cut from end
    558       RAW_VLOG(12, "End-chopping region %p..%p",
    559                   reinterpret_cast<void*>(region->start_addr),
    560                   reinterpret_cast<void*>(region->end_addr));
    561       // Can't just modify region->end_addr (it's the sorting key):
    562       Region r = *region;
    563       r.set_end_addr(start_addr);
    564       RegionSet::iterator d = region;
    565       ++region;
    566       // It's safe to erase before inserting since r is independent of *d:
    567       // r contains an own copy of the call stack:
    568       regions_->erase(d);
    569       InsertRegionLocked(r);
    570       continue;
    571     }
    572     ++region;
    573   }
    574   RAW_VLOG(12, "Removed region %p..%p; have %"PRIuS" regions",
    575               reinterpret_cast<void*>(start_addr),
    576               reinterpret_cast<void*>(end_addr),
    577               regions_->size());
    578   if (VLOG_IS_ON(12))  LogAllLocked();
    579   unmap_size_ += size;
    580   Unlock();
    581 }
    582 
    583 void MemoryRegionMap::MmapHook(const void* result,
    584                                const void* start, size_t size,
    585                                int prot, int flags,
    586                                int fd, off_t offset) {
    587   // TODO(maxim): replace all 0x%"PRIxS" by %p when RAW_VLOG uses a safe
    588   // snprintf reimplementation that does not malloc to pretty-print NULL
    589   RAW_VLOG(10, "MMap = 0x%"PRIxPTR" of %"PRIuS" at %"PRIu64" "
    590               "prot %d flags %d fd %d offs %"PRId64,
    591               reinterpret_cast<uintptr_t>(result), size,
    592               reinterpret_cast<uint64>(start), prot, flags, fd,
    593               static_cast<int64>(offset));
    594   if (result != reinterpret_cast<void*>(MAP_FAILED)  &&  size != 0) {
    595     RecordRegionAddition(result, size);
    596   }
    597 }
    598 
    599 void MemoryRegionMap::MunmapHook(const void* ptr, size_t size) {
    600   RAW_VLOG(10, "MUnmap of %p %"PRIuS"", ptr, size);
    601   if (size != 0) {
    602     RecordRegionRemoval(ptr, size);
    603   }
    604 }
    605 
    606 void MemoryRegionMap::MremapHook(const void* result,
    607                                  const void* old_addr, size_t old_size,
    608                                  size_t new_size, int flags,
    609                                  const void* new_addr) {
    610   RAW_VLOG(10, "MRemap = 0x%"PRIxPTR" of 0x%"PRIxPTR" %"PRIuS" "
    611               "to %"PRIuS" flags %d new_addr=0x%"PRIxPTR,
    612               (uintptr_t)result, (uintptr_t)old_addr,
    613                old_size, new_size, flags,
    614                flags & MREMAP_FIXED ? (uintptr_t)new_addr : 0);
    615   if (result != reinterpret_cast<void*>(-1)) {
    616     RecordRegionRemoval(old_addr, old_size);
    617     RecordRegionAddition(result, new_size);
    618   }
    619 }
    620 
    621 extern "C" void* __sbrk(ptrdiff_t increment);  // defined in libc
    622 
    623 void MemoryRegionMap::SbrkHook(const void* result, ptrdiff_t increment) {
    624   RAW_VLOG(10, "Sbrk = 0x%"PRIxPTR" of %"PRIdS"", (uintptr_t)result, increment);
    625   if (result != reinterpret_cast<void*>(-1)) {
    626     if (increment > 0) {
    627       void* new_end = sbrk(0);
    628       RecordRegionAddition(result, reinterpret_cast<uintptr_t>(new_end) -
    629                                    reinterpret_cast<uintptr_t>(result));
    630     } else if (increment < 0) {
    631       void* new_end = sbrk(0);
    632       RecordRegionRemoval(new_end, reinterpret_cast<uintptr_t>(result) -
    633                                    reinterpret_cast<uintptr_t>(new_end));
    634     }
    635   }
    636 }
    637 
    638 void MemoryRegionMap::LogAllLocked() {
    639   RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
    640   RAW_LOG(INFO, "List of regions:");
    641   uintptr_t previous = 0;
    642   for (RegionSet::const_iterator r = regions_->begin();
    643        r != regions_->end(); ++r) {
    644     RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" "
    645                   "from 0x%"PRIxPTR" stack=%d",
    646                   r->start_addr, r->end_addr, r->caller(), r->is_stack);
    647     RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order");
    648       // this must be caused by uncontrolled recursive operations on regions_
    649     previous = r->end_addr;
    650   }
    651   RAW_LOG(INFO, "End of regions list");
    652 }
    653