Home | History | Annotate | Download | only in massif
      1 //--------------------------------------------------------------------*/
      2 //--- Massif: a heap profiling tool.                     ms_main.c ---*/
      3 //--------------------------------------------------------------------*/
      4 
      5 /*
      6    This file is part of Massif, a Valgrind tool for profiling memory
      7    usage of programs.
      8 
      9    Copyright (C) 2003-2012 Nicholas Nethercote
     10       njn (at) valgrind.org
     11 
     12    This program is free software; you can redistribute it and/or
     13    modify it under the terms of the GNU General Public License as
     14    published by the Free Software Foundation; either version 2 of the
     15    License, or (at your option) any later version.
     16 
     17    This program is distributed in the hope that it will be useful, but
     18    WITHOUT ANY WARRANTY; without even the implied warranty of
     19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     20    General Public License for more details.
     21 
     22    You should have received a copy of the GNU General Public License
     23    along with this program; if not, write to the Free Software
     24    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     25    02111-1307, USA.
     26 
     27    The GNU General Public License is contained in the file COPYING.
     28 */
     29 
     30 //---------------------------------------------------------------------------
     31 // XXX:
     32 //---------------------------------------------------------------------------
     33 // Todo -- nice, but less critical:
     34 // - do a graph-drawing test
     35 // - make file format more generic.  Obstacles:
     36 //   - unit prefixes are not generic
     37 //   - preset column widths for stats are not generic
     38 //   - preset column headers are not generic
     39 //   - "Massif arguments:" line is not generic
     40 // - do snapshots on client requests
     41 //   - (Michael Meeks): have an interactive way to request a dump
     42 //     (callgrind_control-style)
     43 //     - "profile now"
     44 //     - "show me the extra allocations since the last snapshot"
     45 //     - "start/stop logging" (eg. quickly skip boring bits)
     46 // - Add ability to draw multiple graphs, eg. heap-only, stack-only, total.
     47 //   Give each graph a title.  (try to do it generically!)
     48 // - allow truncation of long fnnames if the exact line number is
     49 //   identified?  [hmm, could make getting the name of alloc-fns more
     50 //   difficult] [could dump full names to file, truncate in ms_print]
     51 // - make --show-below-main=no work
     52 // - Options like --alloc-fn='operator new(unsigned, std::nothrow_t const&)'
     53 //   don't work in a .valgrindrc file or in $VALGRIND_OPTS.
     54 //   m_commandline.c:add_args_from_string() needs to respect single quotes.
     55 // - With --stack=yes, want to add a stack trace for detailed snapshots so
     56 //   it's clear where/why the peak is occurring. (Mattieu Castet)  Also,
     57 //   possibly useful even with --stack=no? (Andi Yin)
     58 //
     59 // Performance:
     60 // - To run the benchmarks:
     61 //
     62 //     perl perf/vg_perf --tools=massif --reps=3 perf/{heap,tinycc} massif
     63 //     time valgrind --tool=massif --depth=100 konqueror
     64 //
     65 //   The other benchmarks don't do much allocation, and so give similar speeds
     66 //   to Nulgrind.
     67 //
     68 //   Timing results on 'nevermore' (njn's machine) as of r7013:
     69 //
     70 //     heap      0.53s  ma:12.4s (23.5x, -----)
     71 //     tinycc    0.46s  ma: 4.9s (10.7x, -----)
     72 //     many-xpts 0.08s  ma: 2.0s (25.0x, -----)
     73 //     konqueror 29.6s real  0:21.0s user
     74 //
     75 //   [Introduction of --time-unit=i as the default slowed things down by
     76 //   roughly 0--20%.]
     77 //
     78 // - get_XCon accounts for about 9% of konqueror startup time.  Try
     79 //   keeping XPt children sorted by 'ip' and use binary search in get_XCon.
     80 //   Requires factoring out binary search code from various places into a
     81 //   VG_(bsearch) function.
     82 //
     83 // Todo -- low priority:
     84 // - In each XPt, record both bytes and the number of allocations, and
     85 //   possibly the global number of allocations.
     86 // - (Andy Lin) Give a stack trace on detailed snapshots?
     87 // - (Artur Wisz) add a feature to Massif to ignore any heap blocks larger
     88 //   than a certain size!  Because: "linux's malloc allows to set a
     89 //   MMAP_THRESHOLD value, so we set it to 4096 - all blocks above that will
     90 //   be handled directly by the kernel, and are guaranteed to be returned to
     91 //   the system when freed. So we needed to profile only blocks below this
     92 //   limit."
     93 //
     94 // File format working notes:
     95 
     96 #if 0
     97 desc: --heap-admin=foo
     98 cmd: date
     99 time_unit: ms
    100 #-----------
    101 snapshot=0
    102 #-----------
    103 time=0
    104 mem_heap_B=0
    105 mem_heap_admin_B=0
    106 mem_stacks_B=0
    107 heap_tree=empty
    108 #-----------
    109 snapshot=1
    110 #-----------
    111 time=353
    112 mem_heap_B=5
    113 mem_heap_admin_B=0
    114 mem_stacks_B=0
    115 heap_tree=detailed
    116 n1: 5 (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
    117  n1: 5 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
    118   n1: 5 0x279DE6: _nl_load_locale_from_archive (in /lib/libc-2.3.5.so)
    119    n1: 5 0x278E97: _nl_find_locale (in /lib/libc-2.3.5.so)
    120     n1: 5 0x278871: setlocale (in /lib/libc-2.3.5.so)
    121      n1: 5 0x8049821: (within /bin/date)
    122       n0: 5 0x26ED5E: (below main) (in /lib/libc-2.3.5.so)
    123 
    124 
    125 n_events: n  time(ms)  total(B)    useful-heap(B)  admin-heap(B)  stacks(B)
    126 t_events: B
    127 n 0 0 0 0 0
    128 n 0 0 0 0 0
    129 t1: 5 <string...>
    130  t1: 6 <string...>
    131 
    132 Ideas:
    133 - each snapshot specifies an x-axis value and one or more y-axis values.
    134 - can display the y-axis values separately if you like
    135 - can completely separate connection between snapshots and trees.
    136 
    137 Challenges:
    138 - how to specify and scale/abbreviate units on axes?
    139 - how to combine multiple values into the y-axis?
    140 
    141 --------------------------------------------------------------------------------Command:            date
    142 Massif arguments:   --heap-admin=foo
    143 ms_print arguments: massif.out
    144 --------------------------------------------------------------------------------
    145     KB
    146 6.472^                                                       :#
    147      |                                                       :#  ::  .    .
    148      ...
    149      |                                     ::@  :@    :@ :@:::#  ::  :    ::::
    150    0 +-----------------------------------@---@---@-----@--@---#-------------->ms     0                                                                     713
    151 
    152 Number of snapshots: 50
    153  Detailed snapshots: [2, 11, 13, 19, 25, 32 (peak)]
    154 --------------------------------------------------------------------------------  n       time(ms)         total(B)   useful-heap(B) admin-heap(B)    stacks(B)
    155 --------------------------------------------------------------------------------  0              0                0                0             0            0
    156   1            345                5                5             0            0
    157   2            353                5                5             0            0
    158 100.00% (5B) (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
    159 ->100.00% (5B) 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
    160 #endif
    161 
    162 //---------------------------------------------------------------------------
    163 
    164 #include "pub_tool_basics.h"
    165 #include "pub_tool_vki.h"
    166 #include "pub_tool_aspacemgr.h"
    167 #include "pub_tool_debuginfo.h"
    168 #include "pub_tool_hashtable.h"
    169 #include "pub_tool_libcbase.h"
    170 #include "pub_tool_libcassert.h"
    171 #include "pub_tool_libcfile.h"
    172 #include "pub_tool_libcprint.h"
    173 #include "pub_tool_libcproc.h"
    174 #include "pub_tool_machine.h"
    175 #include "pub_tool_mallocfree.h"
    176 #include "pub_tool_options.h"
    177 #include "pub_tool_replacemalloc.h"
    178 #include "pub_tool_stacktrace.h"
    179 #include "pub_tool_threadstate.h"
    180 #include "pub_tool_tooliface.h"
    181 #include "pub_tool_xarray.h"
    182 #include "pub_tool_clientstate.h"
    183 #include "pub_tool_gdbserver.h"
    184 
    185 #include "valgrind.h"           // For {MALLOC,FREE}LIKE_BLOCK
    186 
    187 //------------------------------------------------------------*/
    188 //--- Overview of operation                                ---*/
    189 //------------------------------------------------------------*/
    190 
    191 // The size of the stacks and heap is tracked.  The heap is tracked in a lot
    192 // of detail, enough to tell how many bytes each line of code is responsible
    193 // for, more or less.  The main data structure is a tree representing the
    194 // call tree beneath all the allocation functions like malloc().
    195 // (Alternatively, if --pages-as-heap=yes is specified, memory is tracked at
    196 // the page level, and each page is treated much like a heap block.  We use
    197 // "heap" throughout below to cover this case because the concepts are all the
    198 // same.)
    199 //
    200 // "Snapshots" are recordings of the memory usage.  There are two basic
    201 // kinds:
    202 // - Normal:  these record the current time, total memory size, total heap
    203 //   size, heap admin size and stack size.
    204 // - Detailed: these record those things in a normal snapshot, plus a very
    205 //   detailed XTree (see below) indicating how the heap is structured.
    206 //
    207 // Snapshots are taken every so often.  There are two storage classes of
    208 // snapshots:
    209 // - Temporary:  Massif does a temporary snapshot every so often.  The idea
    210 //   is to always have a certain number of temporary snapshots around.  So
    211 //   we take them frequently to begin with, but decreasingly often as the
    212 //   program continues to run.  Also, we remove some old ones after a while.
    213 //   Overall it's a kind of exponential decay thing.  Most of these are
    214 //   normal snapshots, a small fraction are detailed snapshots.
    215 // - Permanent:  Massif takes a permanent (detailed) snapshot in some
    216 //   circumstances.  They are:
    217 //   - Peak snapshot:  When the memory usage peak is reached, it takes a
    218 //     snapshot.  It keeps this, unless the peak is subsequently exceeded,
    219 //     in which case it will overwrite the peak snapshot.
    220 //   - User-requested snapshots:  These are done in response to client
    221 //     requests.  They are always kept.
    222 
    223 // Used for printing things when clo_verbosity > 1.
    224 #define VERB(verb, format, args...) \
    225    if (VG_(clo_verbosity) > verb) { \
    226       VG_(dmsg)("Massif: " format, ##args); \
    227    }
    228 
    229 // Used for printing stats when clo_stats == True.
    230 #define STATS(format, args...) \
    231    if (VG_(clo_stats)) { \
    232       VG_(dmsg)("Massif: " format, ##args); \
    233    }
    234 
    235 //------------------------------------------------------------//
    236 //--- Statistics                                           ---//
    237 //------------------------------------------------------------//
    238 
    239 // Konqueror startup, to give an idea of the numbers involved with a biggish
    240 // program, with default depth:
    241 //
    242 //  depth=3                   depth=40
    243 //  - 310,000 allocations
    244 //  - 300,000 frees
    245 //  -  15,000 XPts            800,000 XPts
    246 //  -   1,800 top-XPts
    247 
    248 static UInt n_heap_allocs           = 0;
    249 static UInt n_heap_reallocs         = 0;
    250 static UInt n_heap_frees            = 0;
    251 static UInt n_ignored_heap_allocs   = 0;
    252 static UInt n_ignored_heap_frees    = 0;
    253 static UInt n_ignored_heap_reallocs = 0;
    254 static UInt n_stack_allocs          = 0;
    255 static UInt n_stack_frees           = 0;
    256 static UInt n_xpts                  = 0;
    257 static UInt n_xpt_init_expansions   = 0;
    258 static UInt n_xpt_later_expansions  = 0;
    259 static UInt n_sxpt_allocs           = 0;
    260 static UInt n_sxpt_frees            = 0;
    261 static UInt n_skipped_snapshots     = 0;
    262 static UInt n_real_snapshots        = 0;
    263 static UInt n_detailed_snapshots    = 0;
    264 static UInt n_peak_snapshots        = 0;
    265 static UInt n_cullings              = 0;
    266 static UInt n_XCon_redos            = 0;
    267 
    268 //------------------------------------------------------------//
    269 //--- Globals                                              ---//
    270 //------------------------------------------------------------//
    271 
    272 // Number of guest instructions executed so far.  Only used with
    273 // --time-unit=i.
    274 static Long guest_instrs_executed = 0;
    275 
    276 static SizeT heap_szB       = 0; // Live heap size
    277 static SizeT heap_extra_szB = 0; // Live heap extra size -- slop + admin bytes
    278 static SizeT stacks_szB     = 0; // Live stacks size
    279 
    280 // This is the total size from the current peak snapshot, or 0 if no peak
    281 // snapshot has been taken yet.
    282 static SizeT peak_snapshot_total_szB = 0;
    283 
    284 // Incremented every time memory is allocated/deallocated, by the
    285 // allocated/deallocated amount;  includes heap, heap-admin and stack
    286 // memory.  An alternative to milliseconds as a unit of program "time".
    287 static ULong total_allocs_deallocs_szB = 0;
    288 
    289 // When running with --heap=yes --pages-as-heap=no, we don't start taking
    290 // snapshots until the first basic block is executed, rather than doing it in
    291 // ms_post_clo_init (which is the obvious spot), for two reasons.
    292 // - It lets us ignore stack events prior to that, because they're not
    293 //   really proper ones and just would screw things up.
    294 // - Because there's still some core initialisation to do, and so there
    295 //   would be an artificial time gap between the first and second snapshots.
    296 //
    297 // When running with --heap=yes --pages-as-heap=yes, snapshots start much
    298 // earlier due to new_mem_startup so this isn't relevant.
    299 //
    300 static Bool have_started_executing_code = False;
    301 
    302 //------------------------------------------------------------//
    303 //--- Alloc fns                                            ---//
    304 //------------------------------------------------------------//
    305 
    306 static XArray* alloc_fns;
    307 static XArray* ignore_fns;
    308 
    309 static void init_alloc_fns(void)
    310 {
    311    // Create the list, and add the default elements.
    312    alloc_fns = VG_(newXA)(VG_(malloc), "ms.main.iaf.1",
    313                                        VG_(free), sizeof(Char*));
    314    #define DO(x)  { Char* s = x; VG_(addToXA)(alloc_fns, &s); }
    315 
    316    // Ordered roughly according to (presumed) frequency.
    317    // Nb: The C++ "operator new*" ones are overloadable.  We include them
    318    // always anyway, because even if they're overloaded, it would be a
    319    // prodigiously stupid overloading that caused them to not allocate
    320    // memory.
    321    //
    322    // XXX: because we don't look at the first stack entry (unless it's a
    323    // custom allocation) there's not much point to having all these alloc
    324    // functions here -- they should never appear anywhere (I think?) other
    325    // than the top stack entry.  The only exceptions are those that in
    326    // vg_replace_malloc.c are partly or fully implemented in terms of another
    327    // alloc function: realloc (which uses malloc);  valloc,
    328    // malloc_zone_valloc, posix_memalign and memalign_common (which use
    329    // memalign).
    330    //
    331    DO("malloc"                                              );
    332    DO("__builtin_new"                                       );
    333    DO("operator new(unsigned)"                              );
    334    DO("operator new(unsigned long)"                         );
    335    DO("__builtin_vec_new"                                   );
    336    DO("operator new[](unsigned)"                            );
    337    DO("operator new[](unsigned long)"                       );
    338    DO("calloc"                                              );
    339    DO("realloc"                                             );
    340    DO("memalign"                                            );
    341    DO("posix_memalign"                                      );
    342    DO("valloc"                                              );
    343    DO("operator new(unsigned, std::nothrow_t const&)"       );
    344    DO("operator new[](unsigned, std::nothrow_t const&)"     );
    345    DO("operator new(unsigned long, std::nothrow_t const&)"  );
    346    DO("operator new[](unsigned long, std::nothrow_t const&)");
    347 #if defined(VGO_darwin)
    348    DO("malloc_zone_malloc"                                  );
    349    DO("malloc_zone_calloc"                                  );
    350    DO("malloc_zone_realloc"                                 );
    351    DO("malloc_zone_memalign"                                );
    352    DO("malloc_zone_valloc"                                  );
    353 #endif
    354 }
    355 
    356 static void init_ignore_fns(void)
    357 {
    358    // Create the (empty) list.
    359    ignore_fns = VG_(newXA)(VG_(malloc), "ms.main.iif.1",
    360                                         VG_(free), sizeof(Char*));
    361 }
    362 
    363 // Determines if the named function is a member of the XArray.
    364 static Bool is_member_fn(XArray* fns, Char* fnname)
    365 {
    366    Char** fn_ptr;
    367    Int i;
    368 
    369    // Nb: It's a linear search through the list, because we're comparing
    370    // strings rather than pointers to strings.
    371    // Nb: This gets called a lot.  It was an OSet, but they're quite slow to
    372    // iterate through so it wasn't a good choice.
    373    for (i = 0; i < VG_(sizeXA)(fns); i++) {
    374       fn_ptr = VG_(indexXA)(fns, i);
    375       if (VG_STREQ(fnname, *fn_ptr))
    376          return True;
    377    }
    378    return False;
    379 }
    380 
    381 
    382 //------------------------------------------------------------//
    383 //--- Command line args                                    ---//
    384 //------------------------------------------------------------//
    385 
    386 #define MAX_DEPTH       200
    387 
    388 typedef enum { TimeI, TimeMS, TimeB } TimeUnit;
    389 
    390 static Char* TimeUnit_to_string(TimeUnit time_unit)
    391 {
    392    switch (time_unit) {
    393    case TimeI:  return "i";
    394    case TimeMS: return "ms";
    395    case TimeB:  return "B";
    396    default:     tl_assert2(0, "TimeUnit_to_string: unrecognised TimeUnit");
    397    }
    398 }
    399 
    400 static Bool   clo_heap            = True;
    401    // clo_heap_admin is deliberately a word-sized type.  At one point it was
    402    // a UInt, but this caused problems on 64-bit machines when it was
    403    // multiplied by a small negative number and then promoted to a
    404    // word-sized type -- it ended up with a value of 4.2 billion.  Sigh.
    405 static SSizeT clo_heap_admin      = 8;
    406 static Bool   clo_pages_as_heap   = False;
    407 static Bool   clo_stacks          = False;
    408 static Int    clo_depth           = 30;
    409 static double clo_threshold       = 1.0;  // percentage
    410 static double clo_peak_inaccuracy = 1.0;  // percentage
    411 static Int    clo_time_unit       = TimeI;
    412 static Int    clo_detailed_freq   = 10;
    413 static Int    clo_max_snapshots   = 100;
    414 static Char*  clo_massif_out_file = "massif.out.%p";
    415 
    416 static XArray* args_for_massif;
    417 
    418 static Bool ms_process_cmd_line_option(Char* arg)
    419 {
    420    Char* tmp_str;
    421 
    422    // Remember the arg for later use.
    423    VG_(addToXA)(args_for_massif, &arg);
    424 
    425         if VG_BOOL_CLO(arg, "--heap",           clo_heap)   {}
    426    else if VG_BINT_CLO(arg, "--heap-admin",     clo_heap_admin, 0, 1024) {}
    427 
    428    else if VG_BOOL_CLO(arg, "--stacks",         clo_stacks) {}
    429 
    430    else if VG_BOOL_CLO(arg, "--pages-as-heap",  clo_pages_as_heap) {}
    431 
    432    else if VG_BINT_CLO(arg, "--depth",          clo_depth, 1, MAX_DEPTH) {}
    433 
    434    else if VG_STR_CLO(arg, "--alloc-fn",        tmp_str) {
    435       VG_(addToXA)(alloc_fns, &tmp_str);
    436    }
    437    else if VG_STR_CLO(arg, "--ignore-fn",       tmp_str) {
    438       VG_(addToXA)(ignore_fns, &tmp_str);
    439    }
    440 
    441    else if VG_DBL_CLO(arg, "--threshold",  clo_threshold) {
    442       if (clo_threshold < 0 || clo_threshold > 100) {
    443          VG_(fmsg_bad_option)(arg,
    444             "--threshold must be between 0.0 and 100.0\n");
    445       }
    446    }
    447 
    448    else if VG_DBL_CLO(arg, "--peak-inaccuracy", clo_peak_inaccuracy) {}
    449 
    450    else if VG_XACT_CLO(arg, "--time-unit=i",    clo_time_unit, TimeI)  {}
    451    else if VG_XACT_CLO(arg, "--time-unit=ms",   clo_time_unit, TimeMS) {}
    452    else if VG_XACT_CLO(arg, "--time-unit=B",    clo_time_unit, TimeB)  {}
    453 
    454    else if VG_BINT_CLO(arg, "--detailed-freq",  clo_detailed_freq, 1, 1000000) {}
    455 
    456    else if VG_BINT_CLO(arg, "--max-snapshots",  clo_max_snapshots, 10, 1000) {}
    457 
    458    else if VG_STR_CLO(arg, "--massif-out-file", clo_massif_out_file) {}
    459 
    460    else
    461       return VG_(replacement_malloc_process_cmd_line_option)(arg);
    462 
    463    return True;
    464 }
    465 
    466 static void ms_print_usage(void)
    467 {
    468    VG_(printf)(
    469 "    --heap=no|yes             profile heap blocks [yes]\n"
    470 "    --heap-admin=<size>       average admin bytes per heap block;\n"
    471 "                               ignored if --heap=no [8]\n"
    472 "    --stacks=no|yes           profile stack(s) [no]\n"
    473 "    --pages-as-heap=no|yes    profile memory at the page level [no]\n"
    474 "    --depth=<number>          depth of contexts [30]\n"
    475 "    --alloc-fn=<name>         specify <name> as an alloc function [empty]\n"
    476 "    --ignore-fn=<name>        ignore heap allocations within <name> [empty]\n"
    477 "    --threshold=<m.n>         significance threshold, as a percentage [1.0]\n"
    478 "    --peak-inaccuracy=<m.n>   maximum peak inaccuracy, as a percentage [1.0]\n"
    479 "    --time-unit=i|ms|B        time unit: instructions executed, milliseconds\n"
    480 "                              or heap bytes alloc'd/dealloc'd [i]\n"
    481 "    --detailed-freq=<N>       every Nth snapshot should be detailed [10]\n"
    482 "    --max-snapshots=<N>       maximum number of snapshots recorded [100]\n"
    483 "    --massif-out-file=<file>  output file name [massif.out.%%p]\n"
    484    );
    485 }
    486 
    487 static void ms_print_debug_usage(void)
    488 {
    489    VG_(printf)(
    490 "    (none)\n"
    491    );
    492 }
    493 
    494 
    495 //------------------------------------------------------------//
    496 //--- XPts, XTrees and XCons                               ---//
    497 //------------------------------------------------------------//
    498 
    499 // An XPt represents an "execution point", ie. a code address.  Each XPt is
    500 // part of a tree of XPts (an "execution tree", or "XTree").  The details of
    501 // the heap are represented by a single XTree.
    502 //
    503 // The root of the tree is 'alloc_xpt', which represents all allocation
    504 // functions, eg:
    505 // - malloc/calloc/realloc/memalign/new/new[];
    506 // - user-specified allocation functions (using --alloc-fn);
    507 // - custom allocation (MALLOCLIKE) points
    508 // It's a bit of a fake XPt (ie. its 'ip' is zero), and is only used because
    509 // it makes the code simpler.
    510 //
    511 // Any child of 'alloc_xpt' is called a "top-XPt".  The XPts at the bottom
    512 // of an XTree (leaf nodes) are "bottom-XPTs".
    513 //
    514 // Each path from a top-XPt to a bottom-XPt through an XTree gives an
    515 // execution context ("XCon"), ie. a stack trace.  (And sub-paths represent
    516 // stack sub-traces.)  The number of XCons in an XTree is equal to the
    517 // number of bottom-XPTs in that XTree.
    518 //
    519 //      alloc_xpt       XTrees are bi-directional.
    520 //        | ^
    521 //        v |
    522 //     > parent <       Example: if child1() calls parent() and child2()
    523 //    /    |     \      also calls parent(), and parent() calls malloc(),
    524 //   |    / \     |     the XTree will look like this.
    525 //   |   v   v    |
    526 //  child1   child2
    527 //
    528 // (Note that malformed stack traces can lead to difficulties.  See the
    529 // comment at the bottom of get_XCon.)
    530 //
    531 // XTrees and XPts are mirrored by SXTrees and SXPts, where the 'S' is short
    532 // for "saved".  When the XTree is duplicated for a snapshot, we duplicate
    533 // it as an SXTree, which is similar but omits some things it does not need,
    534 // and aggregates up insignificant nodes.  This is important as an SXTree is
    535 // typically much smaller than an XTree.
    536 
    537 // XXX: make XPt and SXPt extensible arrays, to avoid having to do two
    538 // allocations per Pt.
    539 
    540 typedef struct _XPt XPt;
    541 struct _XPt {
    542    Addr  ip;              // code address
    543 
    544    // Bottom-XPts: space for the precise context.
    545    // Other XPts:  space of all the descendent bottom-XPts.
    546    // Nb: this value goes up and down as the program executes.
    547    SizeT szB;
    548 
    549    XPt*  parent;           // pointer to parent XPt
    550 
    551    // Children.
    552    // n_children and max_children are 32-bit integers.  16-bit integers
    553    // are too small -- a very big program might have more than 65536
    554    // allocation points (ie. top-XPts) -- Konqueror starting up has 1800.
    555    UInt  n_children;       // number of children
    556    UInt  max_children;     // capacity of children array
    557    XPt** children;         // pointers to children XPts
    558 };
    559 
    560 typedef
    561    enum {
    562       SigSXPt,
    563       InsigSXPt
    564    }
    565    SXPtTag;
    566 
    567 typedef struct _SXPt SXPt;
    568 struct _SXPt {
    569    SXPtTag tag;
    570    SizeT szB;              // memory size for the node, be it Sig or Insig
    571    union {
    572       // An SXPt representing a single significant code location.  Much like
    573       // an XPt, minus the fields that aren't necessary.
    574       struct {
    575          Addr   ip;
    576          UInt   n_children;
    577          SXPt** children;
    578       }
    579       Sig;
    580 
    581       // An SXPt representing one or more code locations, all below the
    582       // significance threshold.
    583       struct {
    584          Int   n_xpts;     // number of aggregated XPts
    585       }
    586       Insig;
    587    };
    588 };
    589 
    590 // Fake XPt representing all allocation functions like malloc().  Acts as
    591 // parent node to all top-XPts.
    592 static XPt* alloc_xpt;
    593 
    594 // Cheap allocation for blocks that never need to be freed.  Saves about 10%
    595 // for Konqueror startup with --depth=40.
    596 static void* perm_malloc(SizeT n_bytes)
    597 {
    598    static Addr hp     = 0;    // current heap pointer
    599    static Addr hp_lim = 0;    // maximum usable byte in current block
    600 
    601    #define SUPERBLOCK_SIZE  (1 << 20)         // 1 MB
    602 
    603    if (hp + n_bytes > hp_lim) {
    604       hp = (Addr)VG_(am_shadow_alloc)(SUPERBLOCK_SIZE);
    605       if (0 == hp)
    606          VG_(out_of_memory_NORETURN)( "massif:perm_malloc",
    607                                       SUPERBLOCK_SIZE);
    608       hp_lim = hp + SUPERBLOCK_SIZE - 1;
    609    }
    610 
    611    hp += n_bytes;
    612 
    613    return (void*)(hp - n_bytes);
    614 }
    615 
    616 static XPt* new_XPt(Addr ip, XPt* parent)
    617 {
    618    // XPts are never freed, so we can use perm_malloc to allocate them.
    619    // Note that we cannot use perm_malloc for the 'children' array, because
    620    // that needs to be resizable.
    621    XPt* xpt    = perm_malloc(sizeof(XPt));
    622    xpt->ip     = ip;
    623    xpt->szB    = 0;
    624    xpt->parent = parent;
    625 
    626    // We don't initially allocate any space for children.  We let that
    627    // happen on demand.  Many XPts (ie. all the bottom-XPts) don't have any
    628    // children anyway.
    629    xpt->n_children   = 0;
    630    xpt->max_children = 0;
    631    xpt->children     = NULL;
    632 
    633    // Update statistics
    634    n_xpts++;
    635 
    636    return xpt;
    637 }
    638 
    639 static void add_child_xpt(XPt* parent, XPt* child)
    640 {
    641    // Expand 'children' if necessary.
    642    tl_assert(parent->n_children <= parent->max_children);
    643    if (parent->n_children == parent->max_children) {
    644       if (0 == parent->max_children) {
    645          parent->max_children = 4;
    646          parent->children = VG_(malloc)( "ms.main.acx.1",
    647                                          parent->max_children * sizeof(XPt*) );
    648          n_xpt_init_expansions++;
    649       } else {
    650          parent->max_children *= 2;    // Double size
    651          parent->children = VG_(realloc)( "ms.main.acx.2",
    652                                           parent->children,
    653                                           parent->max_children * sizeof(XPt*) );
    654          n_xpt_later_expansions++;
    655       }
    656    }
    657 
    658    // Insert new child XPt in parent's children list.
    659    parent->children[ parent->n_children++ ] = child;
    660 }
    661 
    662 // Reverse comparison for a reverse sort -- biggest to smallest.
    663 static Int SXPt_revcmp_szB(void* n1, void* n2)
    664 {
    665    SXPt* sxpt1 = *(SXPt**)n1;
    666    SXPt* sxpt2 = *(SXPt**)n2;
    667    return ( sxpt1->szB < sxpt2->szB ?  1
    668           : sxpt1->szB > sxpt2->szB ? -1
    669           :                            0);
    670 }
    671 
    672 //------------------------------------------------------------//
    673 //--- XTree Operations                                     ---//
    674 //------------------------------------------------------------//
    675 
    676 // Duplicates an XTree as an SXTree.
    677 static SXPt* dup_XTree(XPt* xpt, SizeT total_szB)
    678 {
    679    Int  i, n_sig_children, n_insig_children, n_child_sxpts;
    680    SizeT sig_child_threshold_szB;
    681    SXPt* sxpt;
    682 
    683    // Number of XPt children  Action for SXPT
    684    // ------------------      ---------------
    685    // 0 sig, 0 insig          alloc 0 children
    686    // N sig, 0 insig          alloc N children, dup all
    687    // N sig, M insig          alloc N+1, dup first N, aggregate remaining M
    688    // 0 sig, M insig          alloc 1, aggregate M
    689 
    690    // Work out how big a child must be to be significant.  If the current
    691    // total_szB is zero, then we set it to 1, which means everything will be
    692    // judged insignificant -- this is sensible, as there's no point showing
    693    // any detail for this case.  Unless they used --threshold=0, in which
    694    // case we show them everything because that's what they asked for.
    695    //
    696    // Nb: We do this once now, rather than once per child, because if we do
    697    // that the cost of all the divisions adds up to something significant.
    698    if (0 == total_szB && 0 != clo_threshold) {
    699       sig_child_threshold_szB = 1;
    700    } else {
    701       sig_child_threshold_szB = (SizeT)((total_szB * clo_threshold) / 100);
    702    }
    703 
    704    // How many children are significant?  And do we need an aggregate SXPt?
    705    n_sig_children = 0;
    706    for (i = 0; i < xpt->n_children; i++) {
    707       if (xpt->children[i]->szB >= sig_child_threshold_szB) {
    708          n_sig_children++;
    709       }
    710    }
    711    n_insig_children = xpt->n_children - n_sig_children;
    712    n_child_sxpts = n_sig_children + ( n_insig_children > 0 ? 1 : 0 );
    713 
    714    // Duplicate the XPt.
    715    sxpt                 = VG_(malloc)("ms.main.dX.1", sizeof(SXPt));
    716    n_sxpt_allocs++;
    717    sxpt->tag            = SigSXPt;
    718    sxpt->szB            = xpt->szB;
    719    sxpt->Sig.ip         = xpt->ip;
    720    sxpt->Sig.n_children = n_child_sxpts;
    721 
    722    // Create the SXPt's children.
    723    if (n_child_sxpts > 0) {
    724       Int j;
    725       SizeT sig_children_szB = 0, insig_children_szB = 0;
    726       sxpt->Sig.children = VG_(malloc)("ms.main.dX.2",
    727                                        n_child_sxpts * sizeof(SXPt*));
    728 
    729       // Duplicate the significant children.  (Nb: sig_children_szB +
    730       // insig_children_szB doesn't necessarily equal xpt->szB.)
    731       j = 0;
    732       for (i = 0; i < xpt->n_children; i++) {
    733          if (xpt->children[i]->szB >= sig_child_threshold_szB) {
    734             sxpt->Sig.children[j++] = dup_XTree(xpt->children[i], total_szB);
    735             sig_children_szB   += xpt->children[i]->szB;
    736          } else {
    737             insig_children_szB += xpt->children[i]->szB;
    738          }
    739       }
    740 
    741       // Create the SXPt for the insignificant children, if any, and put it
    742       // in the last child entry.
    743       if (n_insig_children > 0) {
    744          // Nb: We 'n_sxpt_allocs' here because creating an Insig SXPt
    745          // doesn't involve a call to dup_XTree().
    746          SXPt* insig_sxpt = VG_(malloc)("ms.main.dX.3", sizeof(SXPt));
    747          n_sxpt_allocs++;
    748          insig_sxpt->tag = InsigSXPt;
    749          insig_sxpt->szB = insig_children_szB;
    750          insig_sxpt->Insig.n_xpts = n_insig_children;
    751          sxpt->Sig.children[n_sig_children] = insig_sxpt;
    752       }
    753    } else {
    754       sxpt->Sig.children = NULL;
    755    }
    756 
    757    return sxpt;
    758 }
    759 
    760 static void free_SXTree(SXPt* sxpt)
    761 {
    762    Int  i;
    763    tl_assert(sxpt != NULL);
    764 
    765    switch (sxpt->tag) {
    766     case SigSXPt:
    767       // Free all children SXPts, then the children array.
    768       for (i = 0; i < sxpt->Sig.n_children; i++) {
    769          free_SXTree(sxpt->Sig.children[i]);
    770          sxpt->Sig.children[i] = NULL;
    771       }
    772       VG_(free)(sxpt->Sig.children);  sxpt->Sig.children = NULL;
    773       break;
    774 
    775     case InsigSXPt:
    776       break;
    777 
    778     default: tl_assert2(0, "free_SXTree: unknown SXPt tag");
    779    }
    780 
    781    // Free the SXPt itself.
    782    VG_(free)(sxpt);     sxpt = NULL;
    783    n_sxpt_frees++;
    784 }
    785 
    786 // Sanity checking:  we periodically check the heap XTree with
    787 // ms_expensive_sanity_check.
    788 static void sanity_check_XTree(XPt* xpt, XPt* parent)
    789 {
    790    tl_assert(xpt != NULL);
    791 
    792    // Check back-pointer.
    793    tl_assert2(xpt->parent == parent,
    794       "xpt->parent = %p, parent = %p\n", xpt->parent, parent);
    795 
    796    // Check children counts look sane.
    797    tl_assert(xpt->n_children <= xpt->max_children);
    798 
    799    // Unfortunately, xpt's size is not necessarily equal to the sum of xpt's
    800    // children's sizes.  See comment at the bottom of get_XCon.
    801 }
    802 
    803 // Sanity checking:  we check SXTrees (which are in snapshots) after
    804 // snapshots are created, before they are deleted, and before they are
    805 // printed.
    806 static void sanity_check_SXTree(SXPt* sxpt)
    807 {
    808    Int i;
    809 
    810    tl_assert(sxpt != NULL);
    811 
    812    // Check the sum of any children szBs equals the SXPt's szB.  Check the
    813    // children at the same time.
    814    switch (sxpt->tag) {
    815     case SigSXPt: {
    816       if (sxpt->Sig.n_children > 0) {
    817          for (i = 0; i < sxpt->Sig.n_children; i++) {
    818             sanity_check_SXTree(sxpt->Sig.children[i]);
    819          }
    820       }
    821       break;
    822     }
    823     case InsigSXPt:
    824       break;         // do nothing
    825 
    826     default: tl_assert2(0, "sanity_check_SXTree: unknown SXPt tag");
    827    }
    828 }
    829 
    830 
    831 //------------------------------------------------------------//
    832 //--- XCon Operations                                      ---//
    833 //------------------------------------------------------------//
    834 
    835 // This is the limit on the number of removed alloc-fns that can be in a
    836 // single XCon.
    837 #define MAX_OVERESTIMATE   50
    838 #define MAX_IPS            (MAX_DEPTH + MAX_OVERESTIMATE)
    839 
    840 // This is used for various buffers which can hold function names/IP
    841 // description.  Some C++ names can get really long so 1024 isn't big
    842 // enough.
    843 #define BUF_LEN   2048
    844 
    845 // Determine if the given IP belongs to a function that should be ignored.
    846 static Bool fn_should_be_ignored(Addr ip)
    847 {
    848    static Char buf[BUF_LEN];
    849    return
    850       ( VG_(get_fnname)(ip, buf, BUF_LEN) && is_member_fn(ignore_fns, buf)
    851       ? True : False );
    852 }
    853 
    854 // Get the stack trace for an XCon, filtering out uninteresting entries:
    855 // alloc-fns and entries above alloc-fns, and entries below main-or-below-main.
    856 //   Eg:       alloc-fn1 / alloc-fn2 / a / b / main / (below main) / c
    857 //   becomes:  a / b / main
    858 // Nb: it's possible to end up with an empty trace, eg. if 'main' is marked
    859 // as an alloc-fn.  This is ok.
    860 static
    861 Int get_IPs( ThreadId tid, Bool exclude_first_entry, Addr ips[])
    862 {
    863    static Char buf[BUF_LEN];
    864    Int n_ips, i, n_alloc_fns_removed;
    865    Int overestimate;
    866    Bool redo;
    867 
    868    // We ask for a few more IPs than clo_depth suggests we need.  Then we
    869    // remove every entry that is an alloc-fn.  Depending on the
    870    // circumstances, we may need to redo it all, asking for more IPs.
    871    // Details:
    872    // - If the original stack trace is smaller than asked-for, redo=False
    873    // - Else if after filtering we have >= clo_depth IPs,      redo=False
    874    // - Else redo=True
    875    // In other words, to redo, we'd have to get a stack trace as big as we
    876    // asked for and remove more than 'overestimate' alloc-fns.
    877 
    878    // Main loop.
    879    redo = True;      // Assume this to begin with.
    880    for (overestimate = 3; redo; overestimate += 6) {
    881       // This should never happen -- would require MAX_OVERESTIMATE
    882       // alloc-fns to be removed from the stack trace.
    883       if (overestimate > MAX_OVERESTIMATE)
    884          VG_(tool_panic)("get_IPs: ips[] too small, inc. MAX_OVERESTIMATE?");
    885 
    886       // Ask for more IPs than clo_depth suggests we need.
    887       n_ips = VG_(get_StackTrace)( tid, ips, clo_depth + overestimate,
    888                                    NULL/*array to dump SP values in*/,
    889                                    NULL/*array to dump FP values in*/,
    890                                    0/*first_ip_delta*/ );
    891       tl_assert(n_ips > 0);
    892 
    893       // If the original stack trace is smaller than asked-for, redo=False.
    894       if (n_ips < clo_depth + overestimate) { redo = False; }
    895 
    896       // Filter out alloc fns.  If requested, we automatically remove the
    897       // first entry (which presumably will be something like malloc or
    898       // __builtin_new that we're sure to filter out) without looking at it,
    899       // because VG_(get_fnname) is expensive.
    900       n_alloc_fns_removed = ( exclude_first_entry ? 1 : 0 );
    901       for (i = n_alloc_fns_removed; i < n_ips; i++) {
    902          if (VG_(get_fnname)(ips[i], buf, BUF_LEN)) {
    903             if (is_member_fn(alloc_fns, buf)) {
    904                n_alloc_fns_removed++;
    905             } else {
    906                break;
    907             }
    908          }
    909       }
    910       // Remove the alloc fns by shuffling the rest down over them.
    911       n_ips -= n_alloc_fns_removed;
    912       for (i = 0; i < n_ips; i++) {
    913          ips[i] = ips[i + n_alloc_fns_removed];
    914       }
    915 
    916       // If after filtering we have >= clo_depth IPs, redo=False
    917       if (n_ips >= clo_depth) {
    918          redo = False;
    919          n_ips = clo_depth;      // Ignore any IPs below --depth.
    920       }
    921 
    922       if (redo) {
    923          n_XCon_redos++;
    924       }
    925    }
    926    return n_ips;
    927 }
    928 
    929 // Gets an XCon and puts it in the tree.  Returns the XCon's bottom-XPt.
    930 // Unless the allocation should be ignored, in which case we return NULL.
    931 static XPt* get_XCon( ThreadId tid, Bool exclude_first_entry )
    932 {
    933    static Addr ips[MAX_IPS];
    934    Int i;
    935    XPt* xpt = alloc_xpt;
    936 
    937    // After this call, the IPs we want are in ips[0]..ips[n_ips-1].
    938    Int n_ips = get_IPs(tid, exclude_first_entry, ips);
    939 
    940    // Should we ignore this allocation?  (Nb: n_ips can be zero, eg. if
    941    // 'main' is marked as an alloc-fn.)
    942    if (n_ips > 0 && fn_should_be_ignored(ips[0])) {
    943       return NULL;
    944    }
    945 
    946    // Now do the search/insertion of the XCon.
    947    for (i = 0; i < n_ips; i++) {
    948       Addr ip = ips[i];
    949       Int ch;
    950       // Look for IP in xpt's children.
    951       // Linear search, ugh -- about 10% of time for konqueror startup tried
    952       // caching last result, only hit about 4% for konqueror.
    953       // Nb:  this search hits about 98% of the time for konqueror
    954       for (ch = 0; True; ch++) {
    955          if (ch == xpt->n_children) {
    956             // IP not found in the children.
    957             // Create and add new child XPt, then stop.
    958             XPt* new_child_xpt = new_XPt(ip, xpt);
    959             add_child_xpt(xpt, new_child_xpt);
    960             xpt = new_child_xpt;
    961             break;
    962 
    963          } else if (ip == xpt->children[ch]->ip) {
    964             // Found the IP in the children, stop.
    965             xpt = xpt->children[ch];
    966             break;
    967          }
    968       }
    969    }
    970 
    971    // [Note: several comments refer to this comment.  Do not delete it
    972    // without updating them.]
    973    //
    974    // A complication... If all stack traces were well-formed, then the
    975    // returned xpt would always be a bottom-XPt.  As a consequence, an XPt's
    976    // size would always be equal to the sum of its children's sizes, which
    977    // is an excellent sanity check.
    978    //
    979    // Unfortunately, stack traces occasionally are malformed, ie. truncated.
    980    // This allows a stack trace to be a sub-trace of another, eg. a/b/c is a
    981    // sub-trace of a/b/c/d.  So we can't assume this xpt is a bottom-XPt;
    982    // nor can we do sanity check an XPt's size against its children's sizes.
    983    // This is annoying, but must be dealt with.  (Older versions of Massif
    984    // had this assertion in, and it was reported to fail by real users a
    985    // couple of times.)  Even more annoyingly, I can't come up with a simple
    986    // test case that exhibit such a malformed stack trace, so I can't
    987    // regression test it.  Sigh.
    988    //
    989    // However, we can print a warning, so that if it happens (unexpectedly)
    990    // in existing regression tests we'll know.  Also, it warns users that
    991    // the output snapshots may not add up the way they might expect.
    992    //
    993    //tl_assert(0 == xpt->n_children); // Must be bottom-XPt
    994    if (0 != xpt->n_children) {
    995       static Int n_moans = 0;
    996       if (n_moans < 3) {
    997          VG_(umsg)(
    998             "Warning: Malformed stack trace detected.  In Massif's output,\n");
    999          VG_(umsg)(
   1000             "         the size of an entry's child entries may not sum up\n");
   1001          VG_(umsg)(
   1002             "         to the entry's size as they normally do.\n");
   1003          n_moans++;
   1004          if (3 == n_moans)
   1005             VG_(umsg)(
   1006             "         (And Massif now won't warn about this again.)\n");
   1007       }
   1008    }
   1009    return xpt;
   1010 }
   1011 
   1012 // Update 'szB' of every XPt in the XCon, by percolating upwards.
   1013 static void update_XCon(XPt* xpt, SSizeT space_delta)
   1014 {
   1015    tl_assert(clo_heap);
   1016    tl_assert(NULL != xpt);
   1017 
   1018    if (0 == space_delta)
   1019       return;
   1020 
   1021    while (xpt != alloc_xpt) {
   1022       if (space_delta < 0) tl_assert(xpt->szB >= -space_delta);
   1023       xpt->szB += space_delta;
   1024       xpt = xpt->parent;
   1025    }
   1026    if (space_delta < 0) tl_assert(alloc_xpt->szB >= -space_delta);
   1027    alloc_xpt->szB += space_delta;
   1028 }
   1029 
   1030 
   1031 //------------------------------------------------------------//
   1032 //--- Snapshots                                            ---//
   1033 //------------------------------------------------------------//
   1034 
   1035 // Snapshots are done in a way so that we always have a reasonable number of
   1036 // them.  We start by taking them quickly.  Once we hit our limit, we cull
   1037 // some (eg. half), and start taking them more slowly.  Once we hit the
   1038 // limit again, we again cull and then take them even more slowly, and so
   1039 // on.
   1040 
   1041 // Time is measured either in i or ms or bytes, depending on the --time-unit
   1042 // option.  It's a Long because it can exceed 32-bits reasonably easily, and
   1043 // because we need to allow negative values to represent unset times.
   1044 typedef Long Time;
   1045 
   1046 #define UNUSED_SNAPSHOT_TIME  -333  // A conspicuous negative number.
   1047 
   1048 typedef
   1049    enum {
   1050       Normal = 77,
   1051       Peak,
   1052       Unused
   1053    }
   1054    SnapshotKind;
   1055 
   1056 typedef
   1057    struct {
   1058       SnapshotKind kind;
   1059       Time  time;
   1060       SizeT heap_szB;
   1061       SizeT heap_extra_szB;// Heap slop + admin bytes.
   1062       SizeT stacks_szB;
   1063       SXPt* alloc_sxpt;    // Heap XTree root, if a detailed snapshot,
   1064    }                       // otherwise NULL.
   1065    Snapshot;
   1066 
   1067 static UInt      next_snapshot_i = 0;  // Index of where next snapshot will go.
   1068 static Snapshot* snapshots;            // Array of snapshots.
   1069 
   1070 static Bool is_snapshot_in_use(Snapshot* snapshot)
   1071 {
   1072    if (Unused == snapshot->kind) {
   1073       // If snapshot is unused, check all the fields are unset.
   1074       tl_assert(snapshot->time           == UNUSED_SNAPSHOT_TIME);
   1075       tl_assert(snapshot->heap_extra_szB == 0);
   1076       tl_assert(snapshot->heap_szB       == 0);
   1077       tl_assert(snapshot->stacks_szB     == 0);
   1078       tl_assert(snapshot->alloc_sxpt     == NULL);
   1079       return False;
   1080    } else {
   1081       tl_assert(snapshot->time           != UNUSED_SNAPSHOT_TIME);
   1082       return True;
   1083    }
   1084 }
   1085 
   1086 static Bool is_detailed_snapshot(Snapshot* snapshot)
   1087 {
   1088    return (snapshot->alloc_sxpt ? True : False);
   1089 }
   1090 
   1091 static Bool is_uncullable_snapshot(Snapshot* snapshot)
   1092 {
   1093    return &snapshots[0] == snapshot                   // First snapshot
   1094        || &snapshots[next_snapshot_i-1] == snapshot   // Last snapshot
   1095        || snapshot->kind == Peak;                     // Peak snapshot
   1096 }
   1097 
   1098 static void sanity_check_snapshot(Snapshot* snapshot)
   1099 {
   1100    if (snapshot->alloc_sxpt) {
   1101       sanity_check_SXTree(snapshot->alloc_sxpt);
   1102    }
   1103 }
   1104 
   1105 // All the used entries should look used, all the unused ones should be clear.
   1106 static void sanity_check_snapshots_array(void)
   1107 {
   1108    Int i;
   1109    for (i = 0; i < next_snapshot_i; i++) {
   1110       tl_assert( is_snapshot_in_use( & snapshots[i] ));
   1111    }
   1112    for (    ; i < clo_max_snapshots; i++) {
   1113       tl_assert(!is_snapshot_in_use( & snapshots[i] ));
   1114    }
   1115 }
   1116 
   1117 // This zeroes all the fields in the snapshot, but does not free the heap
   1118 // XTree if present.  It also does a sanity check unless asked not to;  we
   1119 // can't sanity check at startup when clearing the initial snapshots because
   1120 // they're full of junk.
   1121 static void clear_snapshot(Snapshot* snapshot, Bool do_sanity_check)
   1122 {
   1123    if (do_sanity_check) sanity_check_snapshot(snapshot);
   1124    snapshot->kind           = Unused;
   1125    snapshot->time           = UNUSED_SNAPSHOT_TIME;
   1126    snapshot->heap_extra_szB = 0;
   1127    snapshot->heap_szB       = 0;
   1128    snapshot->stacks_szB     = 0;
   1129    snapshot->alloc_sxpt     = NULL;
   1130 }
   1131 
   1132 // This zeroes all the fields in the snapshot, and frees the heap XTree if
   1133 // present.
   1134 static void delete_snapshot(Snapshot* snapshot)
   1135 {
   1136    // Nb: if there's an XTree, we free it after calling clear_snapshot,
   1137    // because clear_snapshot does a sanity check which includes checking the
   1138    // XTree.
   1139    SXPt* tmp_sxpt = snapshot->alloc_sxpt;
   1140    clear_snapshot(snapshot, /*do_sanity_check*/True);
   1141    if (tmp_sxpt) {
   1142       free_SXTree(tmp_sxpt);
   1143    }
   1144 }
   1145 
   1146 static void VERB_snapshot(Int verbosity, Char* prefix, Int i)
   1147 {
   1148    Snapshot* snapshot = &snapshots[i];
   1149    Char* suffix;
   1150    switch (snapshot->kind) {
   1151    case Peak:   suffix = "p";                                            break;
   1152    case Normal: suffix = ( is_detailed_snapshot(snapshot) ? "d" : "." ); break;
   1153    case Unused: suffix = "u";                                            break;
   1154    default:
   1155       tl_assert2(0, "VERB_snapshot: unknown snapshot kind: %d", snapshot->kind);
   1156    }
   1157    VERB(verbosity, "%s S%s%3d (t:%lld, hp:%ld, ex:%ld, st:%ld)\n",
   1158       prefix, suffix, i,
   1159       snapshot->time,
   1160       snapshot->heap_szB,
   1161       snapshot->heap_extra_szB,
   1162       snapshot->stacks_szB
   1163    );
   1164 }
   1165 
   1166 // Cull half the snapshots;  we choose those that represent the smallest
   1167 // time-spans, because that gives us the most even distribution of snapshots
   1168 // over time.  (It's possible to lose interesting spikes, however.)
   1169 //
   1170 // Algorithm for N snapshots:  We find the snapshot representing the smallest
   1171 // timeframe, and remove it.  We repeat this until (N/2) snapshots are gone.
   1172 // We have to do this one snapshot at a time, rather than finding the (N/2)
   1173 // smallest snapshots in one hit, because when a snapshot is removed, its
   1174 // neighbours immediately cover greater timespans.  So it's O(N^2), but N is
   1175 // small, and it's not done very often.
   1176 //
   1177 // Once we're done, we return the new smallest interval between snapshots.
   1178 // That becomes our minimum time interval.
   1179 static UInt cull_snapshots(void)
   1180 {
   1181    Int  i, jp, j, jn, min_timespan_i;
   1182    Int  n_deleted = 0;
   1183    Time min_timespan;
   1184 
   1185    n_cullings++;
   1186 
   1187    // Sets j to the index of the first not-yet-removed snapshot at or after i
   1188    #define FIND_SNAPSHOT(i, j) \
   1189       for (j = i; \
   1190            j < clo_max_snapshots && !is_snapshot_in_use(&snapshots[j]); \
   1191            j++) { }
   1192 
   1193    VERB(2, "Culling...\n");
   1194 
   1195    // First we remove enough snapshots by clearing them in-place.  Once
   1196    // that's done, we can slide the remaining ones down.
   1197    for (i = 0; i < clo_max_snapshots/2; i++) {
   1198       // Find the snapshot representing the smallest timespan.  The timespan
   1199       // for snapshot n = d(N-1,N)+d(N,N+1), where d(A,B) is the time between
   1200       // snapshot A and B.  We don't consider the first and last snapshots for
   1201       // removal.
   1202       Snapshot* min_snapshot;
   1203       Int min_j;
   1204 
   1205       // Initial triple: (prev, curr, next) == (jp, j, jn)
   1206       // Initial min_timespan is the first one.
   1207       jp = 0;
   1208       FIND_SNAPSHOT(1,   j);
   1209       FIND_SNAPSHOT(j+1, jn);
   1210       min_timespan = 0x7fffffffffffffffLL;
   1211       min_j        = -1;
   1212       while (jn < clo_max_snapshots) {
   1213          Time timespan = snapshots[jn].time - snapshots[jp].time;
   1214          tl_assert(timespan >= 0);
   1215          // Nb: We never cull the peak snapshot.
   1216          if (Peak != snapshots[j].kind && timespan < min_timespan) {
   1217             min_timespan = timespan;
   1218             min_j        = j;
   1219          }
   1220          // Move on to next triple
   1221          jp = j;
   1222          j  = jn;
   1223          FIND_SNAPSHOT(jn+1, jn);
   1224       }
   1225       // We've found the least important snapshot, now delete it.  First
   1226       // print it if necessary.
   1227       tl_assert(-1 != min_j);    // Check we found a minimum.
   1228       min_snapshot = & snapshots[ min_j ];
   1229       if (VG_(clo_verbosity) > 1) {
   1230          Char buf[64];
   1231          VG_(snprintf)(buf, 64, " %3d (t-span = %lld)", i, min_timespan);
   1232          VERB_snapshot(2, buf, min_j);
   1233       }
   1234       delete_snapshot(min_snapshot);
   1235       n_deleted++;
   1236    }
   1237 
   1238    // Slide down the remaining snapshots over the removed ones.  First set i
   1239    // to point to the first empty slot, and j to the first full slot after
   1240    // i.  Then slide everything down.
   1241    for (i = 0;  is_snapshot_in_use( &snapshots[i] ); i++) { }
   1242    for (j = i; !is_snapshot_in_use( &snapshots[j] ); j++) { }
   1243    for (  ; j < clo_max_snapshots; j++) {
   1244       if (is_snapshot_in_use( &snapshots[j] )) {
   1245          snapshots[i++] = snapshots[j];
   1246          clear_snapshot(&snapshots[j], /*do_sanity_check*/True);
   1247       }
   1248    }
   1249    next_snapshot_i = i;
   1250 
   1251    // Check snapshots array looks ok after changes.
   1252    sanity_check_snapshots_array();
   1253 
   1254    // Find the minimum timespan remaining;  that will be our new minimum
   1255    // time interval.  Note that above we were finding timespans by measuring
   1256    // two intervals around a snapshot that was under consideration for
   1257    // deletion.  Here we only measure single intervals because all the
   1258    // deletions have occurred.
   1259    //
   1260    // But we have to be careful -- some snapshots (eg. snapshot 0, and the
   1261    // peak snapshot) are uncullable.  If two uncullable snapshots end up
   1262    // next to each other, they'll never be culled (assuming the peak doesn't
   1263    // change), and the time gap between them will not change.  However, the
   1264    // time between the remaining cullable snapshots will grow ever larger.
   1265    // This means that the min_timespan found will always be that between the
   1266    // two uncullable snapshots, and it will be much smaller than it should
   1267    // be.  To avoid this problem, when computing the minimum timespan, we
   1268    // ignore any timespans between two uncullable snapshots.
   1269    tl_assert(next_snapshot_i > 1);
   1270    min_timespan = 0x7fffffffffffffffLL;
   1271    min_timespan_i = -1;
   1272    for (i = 1; i < next_snapshot_i; i++) {
   1273       if (is_uncullable_snapshot(&snapshots[i]) &&
   1274           is_uncullable_snapshot(&snapshots[i-1]))
   1275       {
   1276          VERB(2, "(Ignoring interval %d--%d when computing minimum)\n", i-1, i);
   1277       } else {
   1278          Time timespan = snapshots[i].time - snapshots[i-1].time;
   1279          tl_assert(timespan >= 0);
   1280          if (timespan < min_timespan) {
   1281             min_timespan = timespan;
   1282             min_timespan_i = i;
   1283          }
   1284       }
   1285    }
   1286    tl_assert(-1 != min_timespan_i);    // Check we found a minimum.
   1287 
   1288    // Print remaining snapshots, if necessary.
   1289    if (VG_(clo_verbosity) > 1) {
   1290       VERB(2, "Finished culling (%3d of %3d deleted)\n",
   1291          n_deleted, clo_max_snapshots);
   1292       for (i = 0; i < next_snapshot_i; i++) {
   1293          VERB_snapshot(2, "  post-cull", i);
   1294       }
   1295       VERB(2, "New time interval = %lld (between snapshots %d and %d)\n",
   1296          min_timespan, min_timespan_i-1, min_timespan_i);
   1297    }
   1298 
   1299    return min_timespan;
   1300 }
   1301 
   1302 static Time get_time(void)
   1303 {
   1304    // Get current time, in whatever time unit we're using.
   1305    if (clo_time_unit == TimeI) {
   1306       return guest_instrs_executed;
   1307    } else if (clo_time_unit == TimeMS) {
   1308       // Some stuff happens between the millisecond timer being initialised
   1309       // to zero and us taking our first snapshot.  We determine that time
   1310       // gap so we can subtract it from all subsequent times so that our
   1311       // first snapshot is considered to be at t = 0ms.  Unfortunately, a
   1312       // bunch of symbols get read after the first snapshot is taken but
   1313       // before the second one (which is triggered by the first allocation),
   1314       // so when the time-unit is 'ms' we always have a big gap between the
   1315       // first two snapshots.  But at least users won't have to wonder why
   1316       // the first snapshot isn't at t=0.
   1317       static Bool is_first_get_time = True;
   1318       static Time start_time_ms;
   1319       if (is_first_get_time) {
   1320          start_time_ms = VG_(read_millisecond_timer)();
   1321          is_first_get_time = False;
   1322          return 0;
   1323       } else {
   1324          return VG_(read_millisecond_timer)() - start_time_ms;
   1325       }
   1326    } else if (clo_time_unit == TimeB) {
   1327       return total_allocs_deallocs_szB;
   1328    } else {
   1329       tl_assert2(0, "bad --time-unit value");
   1330    }
   1331 }
   1332 
   1333 // Take a snapshot, and only that -- decisions on whether to take a
   1334 // snapshot, or what kind of snapshot, are made elsewhere.
   1335 // Nb: we call the arg "my_time" because "time" shadows a global declaration
   1336 // in /usr/include/time.h on Darwin.
   1337 static void
   1338 take_snapshot(Snapshot* snapshot, SnapshotKind kind, Time my_time,
   1339               Bool is_detailed)
   1340 {
   1341    tl_assert(!is_snapshot_in_use(snapshot));
   1342    if (!clo_pages_as_heap) {
   1343       tl_assert(have_started_executing_code);
   1344    }
   1345 
   1346    // Heap and heap admin.
   1347    if (clo_heap) {
   1348       snapshot->heap_szB = heap_szB;
   1349       if (is_detailed) {
   1350          SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
   1351          snapshot->alloc_sxpt = dup_XTree(alloc_xpt, total_szB);
   1352          tl_assert(           alloc_xpt->szB == heap_szB);
   1353          tl_assert(snapshot->alloc_sxpt->szB == heap_szB);
   1354       }
   1355       snapshot->heap_extra_szB = heap_extra_szB;
   1356    }
   1357 
   1358    // Stack(s).
   1359    if (clo_stacks) {
   1360       snapshot->stacks_szB = stacks_szB;
   1361    }
   1362 
   1363    // Rest of snapshot.
   1364    snapshot->kind = kind;
   1365    snapshot->time = my_time;
   1366    sanity_check_snapshot(snapshot);
   1367 
   1368    // Update stats.
   1369    if (Peak == kind) n_peak_snapshots++;
   1370    if (is_detailed)  n_detailed_snapshots++;
   1371    n_real_snapshots++;
   1372 }
   1373 
   1374 
   1375 // Take a snapshot, if it's time, or if we've hit a peak.
   1376 static void
   1377 maybe_take_snapshot(SnapshotKind kind, Char* what)
   1378 {
   1379    // 'min_time_interval' is the minimum time interval between snapshots.
   1380    // If we try to take a snapshot and less than this much time has passed,
   1381    // we don't take it.  It gets larger as the program runs longer.  It's
   1382    // initialised to zero so that we begin by taking snapshots as quickly as
   1383    // possible.
   1384    static Time min_time_interval = 0;
   1385    // Zero allows startup snapshot.
   1386    static Time earliest_possible_time_of_next_snapshot = 0;
   1387    static Int  n_snapshots_since_last_detailed         = 0;
   1388    static Int  n_skipped_snapshots_since_last_snapshot = 0;
   1389 
   1390    Snapshot* snapshot;
   1391    Bool      is_detailed;
   1392    // Nb: we call this variable "my_time" because "time" shadows a global
   1393    // declaration in /usr/include/time.h on Darwin.
   1394    Time      my_time = get_time();
   1395 
   1396    switch (kind) {
   1397     case Normal:
   1398       // Only do a snapshot if it's time.
   1399       if (my_time < earliest_possible_time_of_next_snapshot) {
   1400          n_skipped_snapshots++;
   1401          n_skipped_snapshots_since_last_snapshot++;
   1402          return;
   1403       }
   1404       is_detailed = (clo_detailed_freq-1 == n_snapshots_since_last_detailed);
   1405       break;
   1406 
   1407     case Peak: {
   1408       // Because we're about to do a deallocation, we're coming down from a
   1409       // local peak.  If it is (a) actually a global peak, and (b) a certain
   1410       // amount bigger than the previous peak, then we take a peak snapshot.
   1411       // By not taking a snapshot for every peak, we save a lot of effort --
   1412       // because many peaks remain peak only for a short time.
   1413       SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
   1414       SizeT excess_szB_for_new_peak =
   1415          (SizeT)((peak_snapshot_total_szB * clo_peak_inaccuracy) / 100);
   1416       if (total_szB <= peak_snapshot_total_szB + excess_szB_for_new_peak) {
   1417          return;
   1418       }
   1419       is_detailed = True;
   1420       break;
   1421     }
   1422 
   1423     default:
   1424       tl_assert2(0, "maybe_take_snapshot: unrecognised snapshot kind");
   1425    }
   1426 
   1427    // Take the snapshot.
   1428    snapshot = & snapshots[next_snapshot_i];
   1429    take_snapshot(snapshot, kind, my_time, is_detailed);
   1430 
   1431    // Record if it was detailed.
   1432    if (is_detailed) {
   1433       n_snapshots_since_last_detailed = 0;
   1434    } else {
   1435       n_snapshots_since_last_detailed++;
   1436    }
   1437 
   1438    // Update peak data, if it's a Peak snapshot.
   1439    if (Peak == kind) {
   1440       Int i, number_of_peaks_snapshots_found = 0;
   1441 
   1442       // Sanity check the size, then update our recorded peak.
   1443       SizeT snapshot_total_szB =
   1444          snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
   1445       tl_assert2(snapshot_total_szB > peak_snapshot_total_szB,
   1446          "%ld, %ld\n", snapshot_total_szB, peak_snapshot_total_szB);
   1447       peak_snapshot_total_szB = snapshot_total_szB;
   1448 
   1449       // Find the old peak snapshot, if it exists, and mark it as normal.
   1450       for (i = 0; i < next_snapshot_i; i++) {
   1451          if (Peak == snapshots[i].kind) {
   1452             snapshots[i].kind = Normal;
   1453             number_of_peaks_snapshots_found++;
   1454          }
   1455       }
   1456       tl_assert(number_of_peaks_snapshots_found <= 1);
   1457    }
   1458 
   1459    // Finish up verbosity and stats stuff.
   1460    if (n_skipped_snapshots_since_last_snapshot > 0) {
   1461       VERB(2, "  (skipped %d snapshot%s)\n",
   1462          n_skipped_snapshots_since_last_snapshot,
   1463          ( 1 == n_skipped_snapshots_since_last_snapshot ? "" : "s") );
   1464    }
   1465    VERB_snapshot(2, what, next_snapshot_i);
   1466    n_skipped_snapshots_since_last_snapshot = 0;
   1467 
   1468    // Cull the entries, if our snapshot table is full.
   1469    next_snapshot_i++;
   1470    if (clo_max_snapshots == next_snapshot_i) {
   1471       min_time_interval = cull_snapshots();
   1472    }
   1473 
   1474    // Work out the earliest time when the next snapshot can happen.
   1475    earliest_possible_time_of_next_snapshot = my_time + min_time_interval;
   1476 }
   1477 
   1478 
   1479 //------------------------------------------------------------//
   1480 //--- Sanity checking                                      ---//
   1481 //------------------------------------------------------------//
   1482 
   1483 static Bool ms_cheap_sanity_check ( void )
   1484 {
   1485    return True;   // Nothing useful we can cheaply check.
   1486 }
   1487 
   1488 static Bool ms_expensive_sanity_check ( void )
   1489 {
   1490    sanity_check_XTree(alloc_xpt, /*parent*/NULL);
   1491    sanity_check_snapshots_array();
   1492    return True;
   1493 }
   1494 
   1495 
   1496 //------------------------------------------------------------//
   1497 //--- Heap management                                      ---//
   1498 //------------------------------------------------------------//
   1499 
   1500 // Metadata for heap blocks.  Each one contains a pointer to a bottom-XPt,
   1501 // which is a foothold into the XCon at which it was allocated.  From
   1502 // HP_Chunks, XPt 'space' fields are incremented (at allocation) and
   1503 // decremented (at deallocation).
   1504 //
   1505 // Nb: first two fields must match core's VgHashNode.
   1506 typedef
   1507    struct _HP_Chunk {
   1508       struct _HP_Chunk* next;
   1509       Addr              data;       // Ptr to actual block
   1510       SizeT             req_szB;    // Size requested
   1511       SizeT             slop_szB;   // Extra bytes given above those requested
   1512       XPt*              where;      // Where allocated; bottom-XPt
   1513    }
   1514    HP_Chunk;
   1515 
   1516 static VgHashTable malloc_list  = NULL;   // HP_Chunks
   1517 
   1518 static void update_alloc_stats(SSizeT szB_delta)
   1519 {
   1520    // Update total_allocs_deallocs_szB.
   1521    if (szB_delta < 0) szB_delta = -szB_delta;
   1522    total_allocs_deallocs_szB += szB_delta;
   1523 }
   1524 
   1525 static void update_heap_stats(SSizeT heap_szB_delta, Int heap_extra_szB_delta)
   1526 {
   1527    if (heap_szB_delta < 0)
   1528       tl_assert(heap_szB >= -heap_szB_delta);
   1529    if (heap_extra_szB_delta < 0)
   1530       tl_assert(heap_extra_szB >= -heap_extra_szB_delta);
   1531 
   1532    heap_extra_szB += heap_extra_szB_delta;
   1533    heap_szB       += heap_szB_delta;
   1534 
   1535    update_alloc_stats(heap_szB_delta + heap_extra_szB_delta);
   1536 }
   1537 
   1538 static
   1539 void* record_block( ThreadId tid, void* p, SizeT req_szB, SizeT slop_szB,
   1540                     Bool exclude_first_entry, Bool maybe_snapshot )
   1541 {
   1542    // Make new HP_Chunk node, add to malloc_list
   1543    HP_Chunk* hc = VG_(malloc)("ms.main.rb.1", sizeof(HP_Chunk));
   1544    hc->req_szB  = req_szB;
   1545    hc->slop_szB = slop_szB;
   1546    hc->data     = (Addr)p;
   1547    hc->where    = NULL;
   1548    VG_(HT_add_node)(malloc_list, hc);
   1549 
   1550    if (clo_heap) {
   1551       VERB(3, "<<< record_block (%lu, %lu)\n", req_szB, slop_szB);
   1552 
   1553       hc->where = get_XCon( tid, exclude_first_entry );
   1554 
   1555       if (hc->where) {
   1556          // Update statistics.
   1557          n_heap_allocs++;
   1558 
   1559          // Update heap stats.
   1560          update_heap_stats(req_szB, clo_heap_admin + slop_szB);
   1561 
   1562          // Update XTree.
   1563          update_XCon(hc->where, req_szB);
   1564 
   1565          // Maybe take a snapshot.
   1566          if (maybe_snapshot) {
   1567             maybe_take_snapshot(Normal, "  alloc");
   1568          }
   1569 
   1570       } else {
   1571          // Ignored allocation.
   1572          n_ignored_heap_allocs++;
   1573 
   1574          VERB(3, "(ignored)\n");
   1575       }
   1576 
   1577       VERB(3, ">>>\n");
   1578    }
   1579 
   1580    return p;
   1581 }
   1582 
   1583 static __inline__
   1584 void* alloc_and_record_block ( ThreadId tid, SizeT req_szB, SizeT req_alignB,
   1585                                Bool is_zeroed )
   1586 {
   1587    SizeT actual_szB, slop_szB;
   1588    void* p;
   1589 
   1590    if ((SSizeT)req_szB < 0) return NULL;
   1591 
   1592    // Allocate and zero if necessary.
   1593    p = VG_(cli_malloc)( req_alignB, req_szB );
   1594    if (!p) {
   1595       return NULL;
   1596    }
   1597    if (is_zeroed) VG_(memset)(p, 0, req_szB);
   1598    actual_szB = VG_(malloc_usable_size)(p);
   1599    tl_assert(actual_szB >= req_szB);
   1600    slop_szB = actual_szB - req_szB;
   1601 
   1602    // Record block.
   1603    record_block(tid, p, req_szB, slop_szB, /*exclude_first_entry*/True,
   1604                 /*maybe_snapshot*/True);
   1605 
   1606    return p;
   1607 }
   1608 
   1609 static __inline__
   1610 void unrecord_block ( void* p, Bool maybe_snapshot )
   1611 {
   1612    // Remove HP_Chunk from malloc_list
   1613    HP_Chunk* hc = VG_(HT_remove)(malloc_list, (UWord)p);
   1614    if (NULL == hc) {
   1615       return;   // must have been a bogus free()
   1616    }
   1617 
   1618    if (clo_heap) {
   1619       VERB(3, "<<< unrecord_block\n");
   1620 
   1621       if (hc->where) {
   1622          // Update statistics.
   1623          n_heap_frees++;
   1624 
   1625          // Maybe take a peak snapshot, since it's a deallocation.
   1626          if (maybe_snapshot) {
   1627             maybe_take_snapshot(Peak, "de-PEAK");
   1628          }
   1629 
   1630          // Update heap stats.
   1631          update_heap_stats(-hc->req_szB, -clo_heap_admin - hc->slop_szB);
   1632 
   1633          // Update XTree.
   1634          update_XCon(hc->where, -hc->req_szB);
   1635 
   1636          // Maybe take a snapshot.
   1637          if (maybe_snapshot) {
   1638             maybe_take_snapshot(Normal, "dealloc");
   1639          }
   1640 
   1641       } else {
   1642          n_ignored_heap_frees++;
   1643 
   1644          VERB(3, "(ignored)\n");
   1645       }
   1646 
   1647       VERB(3, ">>> (-%lu, -%lu)\n", hc->req_szB, hc->slop_szB);
   1648    }
   1649 
   1650    // Actually free the chunk, and the heap block (if necessary)
   1651    VG_(free)( hc );  hc = NULL;
   1652 }
   1653 
   1654 // Nb: --ignore-fn is tricky for realloc.  If the block's original alloc was
   1655 // ignored, but the realloc is not requested to be ignored, and we are
   1656 // shrinking the block, then we have to ignore the realloc -- otherwise we
   1657 // could end up with negative heap sizes.  This isn't a danger if we are
   1658 // growing such a block, but for consistency (it also simplifies things) we
   1659 // ignore such reallocs as well.
   1660 static __inline__
   1661 void* realloc_block ( ThreadId tid, void* p_old, SizeT new_req_szB )
   1662 {
   1663    HP_Chunk* hc;
   1664    void*     p_new;
   1665    SizeT     old_req_szB, old_slop_szB, new_slop_szB, new_actual_szB;
   1666    XPt      *old_where, *new_where;
   1667    Bool      is_ignored = False;
   1668 
   1669    // Remove the old block
   1670    hc = VG_(HT_remove)(malloc_list, (UWord)p_old);
   1671    if (hc == NULL) {
   1672       return NULL;   // must have been a bogus realloc()
   1673    }
   1674 
   1675    old_req_szB  = hc->req_szB;
   1676    old_slop_szB = hc->slop_szB;
   1677 
   1678    tl_assert(!clo_pages_as_heap);  // Shouldn't be here if --pages-as-heap=yes.
   1679    if (clo_heap) {
   1680       VERB(3, "<<< realloc_block (%lu)\n", new_req_szB);
   1681 
   1682       if (hc->where) {
   1683          // Update statistics.
   1684          n_heap_reallocs++;
   1685 
   1686          // Maybe take a peak snapshot, if it's (effectively) a deallocation.
   1687          if (new_req_szB < old_req_szB) {
   1688             maybe_take_snapshot(Peak, "re-PEAK");
   1689          }
   1690       } else {
   1691          // The original malloc was ignored, so we have to ignore the
   1692          // realloc as well.
   1693          is_ignored = True;
   1694       }
   1695    }
   1696 
   1697    // Actually do the allocation, if necessary.
   1698    if (new_req_szB <= old_req_szB + old_slop_szB) {
   1699       // New size is smaller or same;  block not moved.
   1700       p_new = p_old;
   1701       new_slop_szB = old_slop_szB + (old_req_szB - new_req_szB);
   1702 
   1703    } else {
   1704       // New size is bigger;  make new block, copy shared contents, free old.
   1705       p_new = VG_(cli_malloc)(VG_(clo_alignment), new_req_szB);
   1706       if (!p_new) {
   1707          // Nb: if realloc fails, NULL is returned but the old block is not
   1708          // touched.  What an awful function.
   1709          return NULL;
   1710       }
   1711       VG_(memcpy)(p_new, p_old, old_req_szB + old_slop_szB);
   1712       VG_(cli_free)(p_old);
   1713       new_actual_szB = VG_(malloc_usable_size)(p_new);
   1714       tl_assert(new_actual_szB >= new_req_szB);
   1715       new_slop_szB = new_actual_szB - new_req_szB;
   1716    }
   1717 
   1718    if (p_new) {
   1719       // Update HP_Chunk.
   1720       hc->data     = (Addr)p_new;
   1721       hc->req_szB  = new_req_szB;
   1722       hc->slop_szB = new_slop_szB;
   1723       old_where    = hc->where;
   1724       hc->where    = NULL;
   1725 
   1726       // Update XTree.
   1727       if (clo_heap) {
   1728          new_where = get_XCon( tid, /*exclude_first_entry*/True);
   1729          if (!is_ignored && new_where) {
   1730             hc->where = new_where;
   1731             update_XCon(old_where, -old_req_szB);
   1732             update_XCon(new_where,  new_req_szB);
   1733          } else {
   1734             // The realloc itself is ignored.
   1735             is_ignored = True;
   1736 
   1737             // Update statistics.
   1738             n_ignored_heap_reallocs++;
   1739          }
   1740       }
   1741    }
   1742 
   1743    // Now insert the new hc (with a possibly new 'data' field) into
   1744    // malloc_list.  If this realloc() did not increase the memory size, we
   1745    // will have removed and then re-added hc unnecessarily.  But that's ok
   1746    // because shrinking a block with realloc() is (presumably) much rarer
   1747    // than growing it, and this way simplifies the growing case.
   1748    VG_(HT_add_node)(malloc_list, hc);
   1749 
   1750    if (clo_heap) {
   1751       if (!is_ignored) {
   1752          // Update heap stats.
   1753          update_heap_stats(new_req_szB - old_req_szB,
   1754                           new_slop_szB - old_slop_szB);
   1755 
   1756          // Maybe take a snapshot.
   1757          maybe_take_snapshot(Normal, "realloc");
   1758       } else {
   1759 
   1760          VERB(3, "(ignored)\n");
   1761       }
   1762 
   1763       VERB(3, ">>> (%ld, %ld)\n",
   1764          new_req_szB - old_req_szB, new_slop_szB - old_slop_szB);
   1765    }
   1766 
   1767    return p_new;
   1768 }
   1769 
   1770 
   1771 //------------------------------------------------------------//
   1772 //--- malloc() et al replacement wrappers                  ---//
   1773 //------------------------------------------------------------//
   1774 
   1775 static void* ms_malloc ( ThreadId tid, SizeT szB )
   1776 {
   1777    return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
   1778 }
   1779 
   1780 static void* ms___builtin_new ( ThreadId tid, SizeT szB )
   1781 {
   1782    return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
   1783 }
   1784 
   1785 static void* ms___builtin_vec_new ( ThreadId tid, SizeT szB )
   1786 {
   1787    return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
   1788 }
   1789 
   1790 static void* ms_calloc ( ThreadId tid, SizeT m, SizeT szB )
   1791 {
   1792    return alloc_and_record_block( tid, m*szB, VG_(clo_alignment), /*is_zeroed*/True );
   1793 }
   1794 
   1795 static void *ms_memalign ( ThreadId tid, SizeT alignB, SizeT szB )
   1796 {
   1797    return alloc_and_record_block( tid, szB, alignB, False );
   1798 }
   1799 
   1800 static void ms_free ( ThreadId tid __attribute__((unused)), void* p )
   1801 {
   1802    unrecord_block(p, /*maybe_snapshot*/True);
   1803    VG_(cli_free)(p);
   1804 }
   1805 
   1806 static void ms___builtin_delete ( ThreadId tid, void* p )
   1807 {
   1808    unrecord_block(p, /*maybe_snapshot*/True);
   1809    VG_(cli_free)(p);
   1810 }
   1811 
   1812 static void ms___builtin_vec_delete ( ThreadId tid, void* p )
   1813 {
   1814    unrecord_block(p, /*maybe_snapshot*/True);
   1815    VG_(cli_free)(p);
   1816 }
   1817 
   1818 static void* ms_realloc ( ThreadId tid, void* p_old, SizeT new_szB )
   1819 {
   1820    return realloc_block(tid, p_old, new_szB);
   1821 }
   1822 
   1823 static SizeT ms_malloc_usable_size ( ThreadId tid, void* p )
   1824 {
   1825    HP_Chunk* hc = VG_(HT_lookup)( malloc_list, (UWord)p );
   1826 
   1827    return ( hc ? hc->req_szB + hc->slop_szB : 0 );
   1828 }
   1829 
   1830 //------------------------------------------------------------//
   1831 //--- Page handling                                        ---//
   1832 //------------------------------------------------------------//
   1833 
   1834 static
   1835 void ms_record_page_mem ( Addr a, SizeT len )
   1836 {
   1837    ThreadId tid = VG_(get_running_tid)();
   1838    Addr end;
   1839    tl_assert(VG_IS_PAGE_ALIGNED(len));
   1840    tl_assert(len >= VKI_PAGE_SIZE);
   1841    // Record the first N-1 pages as blocks, but don't do any snapshots.
   1842    for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
   1843       record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
   1844                     /*exclude_first_entry*/False, /*maybe_snapshot*/False );
   1845    }
   1846    // Record the last page as a block, and maybe do a snapshot afterwards.
   1847    record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
   1848                  /*exclude_first_entry*/False, /*maybe_snapshot*/True );
   1849 }
   1850 
   1851 static
   1852 void ms_unrecord_page_mem( Addr a, SizeT len )
   1853 {
   1854    Addr end;
   1855    tl_assert(VG_IS_PAGE_ALIGNED(len));
   1856    tl_assert(len >= VKI_PAGE_SIZE);
   1857    for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
   1858       unrecord_block((void*)a, /*maybe_snapshot*/False);
   1859    }
   1860    unrecord_block((void*)a, /*maybe_snapshot*/True);
   1861 }
   1862 
   1863 //------------------------------------------------------------//
   1864 
   1865 static
   1866 void ms_new_mem_mmap ( Addr a, SizeT len,
   1867                        Bool rr, Bool ww, Bool xx, ULong di_handle )
   1868 {
   1869    tl_assert(VG_IS_PAGE_ALIGNED(len));
   1870    ms_record_page_mem(a, len);
   1871 }
   1872 
   1873 static
   1874 void ms_new_mem_startup( Addr a, SizeT len,
   1875                          Bool rr, Bool ww, Bool xx, ULong di_handle )
   1876 {
   1877    // startup maps are always be page-sized, except the trampoline page is
   1878    // marked by the core as only being the size of the trampoline itself,
   1879    // which is something like 57 bytes.  Round it up to page size.
   1880    len = VG_PGROUNDUP(len);
   1881    ms_record_page_mem(a, len);
   1882 }
   1883 
   1884 static
   1885 void ms_new_mem_brk ( Addr a, SizeT len, ThreadId tid )
   1886 {
   1887    // brk limit is not necessarily aligned on a page boundary.
   1888    // If new memory being brk-ed implies to allocate a new page,
   1889    // then call ms_record_page_mem with page aligned parameters
   1890    // otherwise just ignore.
   1891    Addr old_bottom_page = VG_PGROUNDDN(a - 1);
   1892    Addr new_top_page = VG_PGROUNDDN(a + len - 1);
   1893    if (old_bottom_page != new_top_page)
   1894       ms_record_page_mem(VG_PGROUNDDN(a),
   1895                          (new_top_page - old_bottom_page));
   1896 }
   1897 
   1898 static
   1899 void ms_copy_mem_remap( Addr from, Addr to, SizeT len)
   1900 {
   1901    tl_assert(VG_IS_PAGE_ALIGNED(len));
   1902    ms_unrecord_page_mem(from, len);
   1903    ms_record_page_mem(to, len);
   1904 }
   1905 
   1906 static
   1907 void ms_die_mem_munmap( Addr a, SizeT len )
   1908 {
   1909    tl_assert(VG_IS_PAGE_ALIGNED(len));
   1910    ms_unrecord_page_mem(a, len);
   1911 }
   1912 
   1913 static
   1914 void ms_die_mem_brk( Addr a, SizeT len )
   1915 {
   1916    // Call ms_unrecord_page_mem only if one or more pages are de-allocated.
   1917    // See ms_new_mem_brk for more details.
   1918    Addr new_bottom_page = VG_PGROUNDDN(a - 1);
   1919    Addr old_top_page = VG_PGROUNDDN(a + len - 1);
   1920    if (old_top_page != new_bottom_page)
   1921       ms_unrecord_page_mem(VG_PGROUNDDN(a),
   1922                            (old_top_page - new_bottom_page));
   1923 
   1924 }
   1925 
   1926 //------------------------------------------------------------//
   1927 //--- Stacks                                               ---//
   1928 //------------------------------------------------------------//
   1929 
   1930 // We really want the inlining to occur...
   1931 #define INLINE    inline __attribute__((always_inline))
   1932 
   1933 static void update_stack_stats(SSizeT stack_szB_delta)
   1934 {
   1935    if (stack_szB_delta < 0) tl_assert(stacks_szB >= -stack_szB_delta);
   1936    stacks_szB += stack_szB_delta;
   1937 
   1938    update_alloc_stats(stack_szB_delta);
   1939 }
   1940 
   1941 static INLINE void new_mem_stack_2(SizeT len, Char* what)
   1942 {
   1943    if (have_started_executing_code) {
   1944       VERB(3, "<<< new_mem_stack (%ld)\n", len);
   1945       n_stack_allocs++;
   1946       update_stack_stats(len);
   1947       maybe_take_snapshot(Normal, what);
   1948       VERB(3, ">>>\n");
   1949    }
   1950 }
   1951 
   1952 static INLINE void die_mem_stack_2(SizeT len, Char* what)
   1953 {
   1954    if (have_started_executing_code) {
   1955       VERB(3, "<<< die_mem_stack (%ld)\n", -len);
   1956       n_stack_frees++;
   1957       maybe_take_snapshot(Peak,   "stkPEAK");
   1958       update_stack_stats(-len);
   1959       maybe_take_snapshot(Normal, what);
   1960       VERB(3, ">>>\n");
   1961    }
   1962 }
   1963 
   1964 static void new_mem_stack(Addr a, SizeT len)
   1965 {
   1966    new_mem_stack_2(len, "stk-new");
   1967 }
   1968 
   1969 static void die_mem_stack(Addr a, SizeT len)
   1970 {
   1971    die_mem_stack_2(len, "stk-die");
   1972 }
   1973 
   1974 static void new_mem_stack_signal(Addr a, SizeT len, ThreadId tid)
   1975 {
   1976    new_mem_stack_2(len, "sig-new");
   1977 }
   1978 
   1979 static void die_mem_stack_signal(Addr a, SizeT len)
   1980 {
   1981    die_mem_stack_2(len, "sig-die");
   1982 }
   1983 
   1984 
   1985 //------------------------------------------------------------//
   1986 //--- Client Requests                                      ---//
   1987 //------------------------------------------------------------//
   1988 
   1989 static void print_monitor_help ( void )
   1990 {
   1991    VG_(gdb_printf) ("\n");
   1992    VG_(gdb_printf) ("massif monitor commands:\n");
   1993    VG_(gdb_printf) ("  snapshot [<filename>]\n");
   1994    VG_(gdb_printf) ("  detailed_snapshot [<filename>]\n");
   1995    VG_(gdb_printf) ("       takes a snapshot (or a detailed snapshot)\n");
   1996    VG_(gdb_printf) ("       and saves it in <filename>\n");
   1997    VG_(gdb_printf) ("             default <filename> is massif.vgdb.out\n");
   1998    VG_(gdb_printf) ("\n");
   1999 }
   2000 
   2001 
   2002 /* Forward declaration.
   2003    return True if request recognised, False otherwise */
   2004 static Bool handle_gdb_monitor_command (ThreadId tid, Char *req);
   2005 static Bool ms_handle_client_request ( ThreadId tid, UWord* argv, UWord* ret )
   2006 {
   2007    switch (argv[0]) {
   2008    case VG_USERREQ__MALLOCLIKE_BLOCK: {
   2009       void* p   = (void*)argv[1];
   2010       SizeT szB =        argv[2];
   2011       record_block( tid, p, szB, /*slop_szB*/0, /*exclude_first_entry*/False,
   2012                     /*maybe_snapshot*/True );
   2013       *ret = 0;
   2014       return True;
   2015    }
   2016    case VG_USERREQ__RESIZEINPLACE_BLOCK: {
   2017       void* p        = (void*)argv[1];
   2018       SizeT newSizeB =       argv[3];
   2019 
   2020       unrecord_block(p, /*maybe_snapshot*/True);
   2021       record_block(tid, p, newSizeB, /*slop_szB*/0,
   2022                    /*exclude_first_entry*/False, /*maybe_snapshot*/True);
   2023       return True;
   2024    }
   2025    case VG_USERREQ__FREELIKE_BLOCK: {
   2026       void* p = (void*)argv[1];
   2027       unrecord_block(p, /*maybe_snapshot*/True);
   2028       *ret = 0;
   2029       return True;
   2030    }
   2031    case VG_USERREQ__GDB_MONITOR_COMMAND: {
   2032      Bool handled = handle_gdb_monitor_command (tid, (Char*)argv[1]);
   2033      if (handled)
   2034        *ret = 1;
   2035      else
   2036        *ret = 0;
   2037      return handled;
   2038    }
   2039 
   2040    default:
   2041       *ret = 0;
   2042       return False;
   2043    }
   2044 }
   2045 
   2046 //------------------------------------------------------------//
   2047 //--- Instrumentation                                      ---//
   2048 //------------------------------------------------------------//
   2049 
   2050 static void add_counter_update(IRSB* sbOut, Int n)
   2051 {
   2052    #if defined(VG_BIGENDIAN)
   2053    # define END Iend_BE
   2054    #elif defined(VG_LITTLEENDIAN)
   2055    # define END Iend_LE
   2056    #else
   2057    # error "Unknown endianness"
   2058    #endif
   2059    // Add code to increment 'guest_instrs_executed' by 'n', like this:
   2060    //   WrTmp(t1, Load64(&guest_instrs_executed))
   2061    //   WrTmp(t2, Add64(RdTmp(t1), Const(n)))
   2062    //   Store(&guest_instrs_executed, t2)
   2063    IRTemp t1 = newIRTemp(sbOut->tyenv, Ity_I64);
   2064    IRTemp t2 = newIRTemp(sbOut->tyenv, Ity_I64);
   2065    IRExpr* counter_addr = mkIRExpr_HWord( (HWord)&guest_instrs_executed );
   2066 
   2067    IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(END, Ity_I64, counter_addr));
   2068    IRStmt* st2 =
   2069       IRStmt_WrTmp(t2,
   2070                    IRExpr_Binop(Iop_Add64, IRExpr_RdTmp(t1),
   2071                                            IRExpr_Const(IRConst_U64(n))));
   2072    IRStmt* st3 = IRStmt_Store(END, counter_addr, IRExpr_RdTmp(t2));
   2073 
   2074    addStmtToIRSB( sbOut, st1 );
   2075    addStmtToIRSB( sbOut, st2 );
   2076    addStmtToIRSB( sbOut, st3 );
   2077 }
   2078 
   2079 static IRSB* ms_instrument2( IRSB* sbIn )
   2080 {
   2081    Int   i, n = 0;
   2082    IRSB* sbOut;
   2083 
   2084    // We increment the instruction count in two places:
   2085    // - just before any Ist_Exit statements;
   2086    // - just before the IRSB's end.
   2087    // In the former case, we zero 'n' and then continue instrumenting.
   2088 
   2089    sbOut = deepCopyIRSBExceptStmts(sbIn);
   2090 
   2091    for (i = 0; i < sbIn->stmts_used; i++) {
   2092       IRStmt* st = sbIn->stmts[i];
   2093 
   2094       if (!st || st->tag == Ist_NoOp) continue;
   2095 
   2096       if (st->tag == Ist_IMark) {
   2097          n++;
   2098       } else if (st->tag == Ist_Exit) {
   2099          if (n > 0) {
   2100             // Add an increment before the Exit statement, then reset 'n'.
   2101             add_counter_update(sbOut, n);
   2102             n = 0;
   2103          }
   2104       }
   2105       addStmtToIRSB( sbOut, st );
   2106    }
   2107 
   2108    if (n > 0) {
   2109       // Add an increment before the SB end.
   2110       add_counter_update(sbOut, n);
   2111    }
   2112    return sbOut;
   2113 }
   2114 
   2115 static
   2116 IRSB* ms_instrument ( VgCallbackClosure* closure,
   2117                       IRSB* sbIn,
   2118                       VexGuestLayout* layout,
   2119                       VexGuestExtents* vge,
   2120                       IRType gWordTy, IRType hWordTy )
   2121 {
   2122    if (! have_started_executing_code) {
   2123       // Do an initial sample to guarantee that we have at least one.
   2124       // We use 'maybe_take_snapshot' instead of 'take_snapshot' to ensure
   2125       // 'maybe_take_snapshot's internal static variables are initialised.
   2126       have_started_executing_code = True;
   2127       maybe_take_snapshot(Normal, "startup");
   2128    }
   2129 
   2130    if      (clo_time_unit == TimeI)  { return ms_instrument2(sbIn); }
   2131    else if (clo_time_unit == TimeMS) { return sbIn; }
   2132    else if (clo_time_unit == TimeB)  { return sbIn; }
   2133    else                              { tl_assert2(0, "bad --time-unit value"); }
   2134 }
   2135 
   2136 
   2137 //------------------------------------------------------------//
   2138 //--- Writing snapshots                                    ---//
   2139 //------------------------------------------------------------//
   2140 
   2141 Char FP_buf[BUF_LEN];
   2142 
   2143 // XXX: implement f{,n}printf in m_libcprint.c eventually, and use it here.
   2144 // Then change Cachegrind to use it too.
   2145 #define FP(format, args...) ({ \
   2146    VG_(snprintf)(FP_buf, BUF_LEN, format, ##args); \
   2147    FP_buf[BUF_LEN-1] = '\0';  /* Make sure the string is terminated. */ \
   2148    VG_(write)(fd, (void*)FP_buf, VG_(strlen)(FP_buf)); \
   2149 })
   2150 
   2151 // Nb: uses a static buffer, each call trashes the last string returned.
   2152 static Char* make_perc(double x)
   2153 {
   2154    static Char mbuf[32];
   2155 
   2156    VG_(percentify)((ULong)(x * 100), 10000, 2, 6, mbuf);
   2157    // XXX: this is bogus if the denominator was zero -- resulting string is
   2158    // something like "0 --%")
   2159    if (' ' == mbuf[0]) mbuf[0] = '0';
   2160    return mbuf;
   2161 }
   2162 
   2163 static void pp_snapshot_SXPt(Int fd, SXPt* sxpt, Int depth, Char* depth_str,
   2164                             Int depth_str_len,
   2165                             SizeT snapshot_heap_szB, SizeT snapshot_total_szB)
   2166 {
   2167    Int   i, j, n_insig_children_sxpts;
   2168    SXPt* child = NULL;
   2169 
   2170    // Used for printing function names.  Is made static to keep it out
   2171    // of the stack frame -- this function is recursive.  Obviously this
   2172    // now means its contents are trashed across the recursive call.
   2173    static Char ip_desc_array[BUF_LEN];
   2174    Char* ip_desc = ip_desc_array;
   2175 
   2176    switch (sxpt->tag) {
   2177     case SigSXPt:
   2178       // Print the SXPt itself.
   2179       if (0 == depth) {
   2180          if (clo_heap) {
   2181             ip_desc =
   2182                ( clo_pages_as_heap
   2183                ? "(page allocation syscalls) mmap/mremap/brk, --alloc-fns, etc."
   2184                : "(heap allocation functions) malloc/new/new[], --alloc-fns, etc."
   2185                );
   2186          } else {
   2187             // XXX: --alloc-fns?
   2188          }
   2189       } else {
   2190          // If it's main-or-below-main, we (if appropriate) ignore everything
   2191          // below it by pretending it has no children.
   2192          if ( ! VG_(clo_show_below_main) ) {
   2193             Vg_FnNameKind kind = VG_(get_fnname_kind_from_IP)(sxpt->Sig.ip);
   2194             if (Vg_FnNameMain == kind || Vg_FnNameBelowMain == kind) {
   2195                sxpt->Sig.n_children = 0;
   2196             }
   2197          }
   2198 
   2199          // We need the -1 to get the line number right, But I'm not sure why.
   2200          ip_desc = VG_(describe_IP)(sxpt->Sig.ip-1, ip_desc, BUF_LEN);
   2201       }
   2202 
   2203       // Do the non-ip_desc part first...
   2204       FP("%sn%d: %lu ", depth_str, sxpt->Sig.n_children, sxpt->szB);
   2205 
   2206       // For ip_descs beginning with "0xABCD...:" addresses, we first
   2207       // measure the length of the "0xabcd: " address at the start of the
   2208       // ip_desc.
   2209       j = 0;
   2210       if ('0' == ip_desc[0] && 'x' == ip_desc[1]) {
   2211          j = 2;
   2212          while (True) {
   2213             if (ip_desc[j]) {
   2214                if (':' == ip_desc[j]) break;
   2215                j++;
   2216             } else {
   2217                tl_assert2(0, "ip_desc has unexpected form: %s\n", ip_desc);
   2218             }
   2219          }
   2220       }
   2221       // Nb: We treat this specially (ie. we don't use FP) so that if the
   2222       // ip_desc is too long (eg. due to a long C++ function name), it'll
   2223       // get truncated, but the '\n' is still there so its a valid file.
   2224       // (At one point we were truncating without adding the '\n', which
   2225       // caused bug #155929.)
   2226       //
   2227       // Also, we account for the length of the address in ip_desc when
   2228       // truncating.  (The longest address we could have is 18 chars:  "0x"
   2229       // plus 16 address digits.)  This ensures that the truncated function
   2230       // name always has the same length, which makes truncation
   2231       // deterministic and thus makes testing easier.
   2232       tl_assert(j <= 18);
   2233       VG_(snprintf)(FP_buf, BUF_LEN, "%s\n", ip_desc);
   2234       FP_buf[BUF_LEN-18+j-5] = '.';    // "..." at the end make the
   2235       FP_buf[BUF_LEN-18+j-4] = '.';    //   truncation more obvious.
   2236       FP_buf[BUF_LEN-18+j-3] = '.';
   2237       FP_buf[BUF_LEN-18+j-2] = '\n';   // The last char is '\n'.
   2238       FP_buf[BUF_LEN-18+j-1] = '\0';   // The string is terminated.
   2239       VG_(write)(fd, (void*)FP_buf, VG_(strlen)(FP_buf));
   2240 
   2241       // Indent.
   2242       tl_assert(depth+1 < depth_str_len-1);    // -1 for end NUL char
   2243       depth_str[depth+0] = ' ';
   2244       depth_str[depth+1] = '\0';
   2245 
   2246       // Sort SXPt's children by szB (reverse order:  biggest to smallest).
   2247       // Nb: we sort them here, rather than earlier (eg. in dup_XTree), for
   2248       // two reasons.  First, if we do it during dup_XTree, it can get
   2249       // expensive (eg. 15% of execution time for konqueror
   2250       // startup/shutdown).  Second, this way we get the Insig SXPt (if one
   2251       // is present) in its sorted position, not at the end.
   2252       VG_(ssort)(sxpt->Sig.children, sxpt->Sig.n_children, sizeof(SXPt*),
   2253                  SXPt_revcmp_szB);
   2254 
   2255       // Print the SXPt's children.  They should already be in sorted order.
   2256       n_insig_children_sxpts = 0;
   2257       for (i = 0; i < sxpt->Sig.n_children; i++) {
   2258          child = sxpt->Sig.children[i];
   2259 
   2260          if (InsigSXPt == child->tag)
   2261             n_insig_children_sxpts++;
   2262 
   2263          // Ok, print the child.  NB: contents of ip_desc_array will be
   2264          // trashed by this recursive call.  Doesn't matter currently,
   2265          // but worth noting.
   2266          pp_snapshot_SXPt(fd, child, depth+1, depth_str, depth_str_len,
   2267             snapshot_heap_szB, snapshot_total_szB);
   2268       }
   2269 
   2270       // Unindent.
   2271       depth_str[depth+0] = '\0';
   2272       depth_str[depth+1] = '\0';
   2273 
   2274       // There should be 0 or 1 Insig children SXPts.
   2275       tl_assert(n_insig_children_sxpts <= 1);
   2276       break;
   2277 
   2278     case InsigSXPt: {
   2279       Char* s = ( 1 == sxpt->Insig.n_xpts ? "," : "s, all" );
   2280       FP("%sn0: %lu in %d place%s below massif's threshold (%s)\n",
   2281          depth_str, sxpt->szB, sxpt->Insig.n_xpts, s,
   2282          make_perc(clo_threshold));
   2283       break;
   2284     }
   2285 
   2286     default:
   2287       tl_assert2(0, "pp_snapshot_SXPt: unrecognised SXPt tag");
   2288    }
   2289 }
   2290 
   2291 static void pp_snapshot(Int fd, Snapshot* snapshot, Int snapshot_n)
   2292 {
   2293    sanity_check_snapshot(snapshot);
   2294 
   2295    FP("#-----------\n");
   2296    FP("snapshot=%d\n", snapshot_n);
   2297    FP("#-----------\n");
   2298    FP("time=%lld\n",            snapshot->time);
   2299    FP("mem_heap_B=%lu\n",       snapshot->heap_szB);
   2300    FP("mem_heap_extra_B=%lu\n", snapshot->heap_extra_szB);
   2301    FP("mem_stacks_B=%lu\n",     snapshot->stacks_szB);
   2302 
   2303    if (is_detailed_snapshot(snapshot)) {
   2304       // Detailed snapshot -- print heap tree.
   2305       Int   depth_str_len = clo_depth + 3;
   2306       Char* depth_str = VG_(malloc)("ms.main.pps.1",
   2307                                     sizeof(Char) * depth_str_len);
   2308       SizeT snapshot_total_szB =
   2309          snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
   2310       depth_str[0] = '\0';   // Initialise depth_str to "".
   2311 
   2312       FP("heap_tree=%s\n", ( Peak == snapshot->kind ? "peak" : "detailed" ));
   2313       pp_snapshot_SXPt(fd, snapshot->alloc_sxpt, 0, depth_str,
   2314                        depth_str_len, snapshot->heap_szB,
   2315                        snapshot_total_szB);
   2316 
   2317       VG_(free)(depth_str);
   2318 
   2319    } else {
   2320       FP("heap_tree=empty\n");
   2321    }
   2322 }
   2323 
   2324 static void write_snapshots_to_file(Char* massif_out_file,
   2325                                     Snapshot snapshots_array[],
   2326                                     Int nr_elements)
   2327 {
   2328    Int i, fd;
   2329    SysRes sres;
   2330 
   2331    sres = VG_(open)(massif_out_file, VKI_O_CREAT|VKI_O_TRUNC|VKI_O_WRONLY,
   2332                                      VKI_S_IRUSR|VKI_S_IWUSR);
   2333    if (sr_isError(sres)) {
   2334       // If the file can't be opened for whatever reason (conflict
   2335       // between multiple cachegrinded processes?), give up now.
   2336       VG_(umsg)("error: can't open output file '%s'\n", massif_out_file );
   2337       VG_(umsg)("       ... so profiling results will be missing.\n");
   2338       return;
   2339    } else {
   2340       fd = sr_Res(sres);
   2341    }
   2342 
   2343    // Print massif-specific options that were used.
   2344    // XXX: is it worth having a "desc:" line?  Could just call it "options:"
   2345    // -- this file format isn't as generic as Cachegrind's, so the
   2346    // implied genericity of "desc:" is bogus.
   2347    FP("desc:");
   2348    for (i = 0; i < VG_(sizeXA)(args_for_massif); i++) {
   2349       Char* arg = *(Char**)VG_(indexXA)(args_for_massif, i);
   2350       FP(" %s", arg);
   2351    }
   2352    if (0 == i) FP(" (none)");
   2353    FP("\n");
   2354 
   2355    // Print "cmd:" line.
   2356    FP("cmd: ");
   2357    if (VG_(args_the_exename)) {
   2358       FP("%s", VG_(args_the_exename));
   2359       for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
   2360          HChar* arg = * (HChar**) VG_(indexXA)( VG_(args_for_client), i );
   2361          if (arg)
   2362             FP(" %s", arg);
   2363       }
   2364    } else {
   2365       FP(" ???");
   2366    }
   2367    FP("\n");
   2368 
   2369    FP("time_unit: %s\n", TimeUnit_to_string(clo_time_unit));
   2370 
   2371    for (i = 0; i < nr_elements; i++) {
   2372       Snapshot* snapshot = & snapshots_array[i];
   2373       pp_snapshot(fd, snapshot, i);     // Detailed snapshot!
   2374    }
   2375    VG_(close) (fd);
   2376 }
   2377 
   2378 static void write_snapshots_array_to_file(void)
   2379 {
   2380    // Setup output filename.  Nb: it's important to do this now, ie. as late
   2381    // as possible.  If we do it at start-up and the program forks and the
   2382    // output file format string contains a %p (pid) specifier, both the
   2383    // parent and child will incorrectly write to the same file;  this
   2384    // happened in 3.3.0.
   2385    Char* massif_out_file =
   2386       VG_(expand_file_name)("--massif-out-file", clo_massif_out_file);
   2387    write_snapshots_to_file (massif_out_file, snapshots, next_snapshot_i);
   2388    VG_(free)(massif_out_file);
   2389 }
   2390 
   2391 static void handle_snapshot_monitor_command (Char *filename, Bool detailed)
   2392 {
   2393    Snapshot snapshot;
   2394 
   2395    clear_snapshot(&snapshot, /* do_sanity_check */ False);
   2396    take_snapshot(&snapshot, Normal, get_time(), detailed);
   2397    write_snapshots_to_file ((filename == NULL) ? (Char*) "massif.vgdb.out" : filename,
   2398                             &snapshot,
   2399                             1);
   2400    delete_snapshot(&snapshot);
   2401 }
   2402 
   2403 static Bool handle_gdb_monitor_command (ThreadId tid, Char *req)
   2404 {
   2405    Char* wcmd;
   2406    Char s[VG_(strlen(req))]; /* copy for strtok_r */
   2407    Char *ssaveptr;
   2408 
   2409    VG_(strcpy) (s, req);
   2410 
   2411    wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
   2412    switch (VG_(keyword_id) ("help snapshot detailed_snapshot",
   2413                             wcmd, kwd_report_duplicated_matches)) {
   2414    case -2: /* multiple matches */
   2415       return True;
   2416    case -1: /* not found */
   2417       return False;
   2418    case  0: /* help */
   2419       print_monitor_help();
   2420       return True;
   2421    case  1: { /* snapshot */
   2422       Char* filename;
   2423       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
   2424       handle_snapshot_monitor_command (filename, False /* detailed */);
   2425       return True;
   2426    }
   2427    case  2: { /* detailed_snapshot */
   2428       Char* filename;
   2429       filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
   2430       handle_snapshot_monitor_command (filename, True /* detailed */);
   2431       return True;
   2432    }
   2433    default:
   2434       tl_assert(0);
   2435       return False;
   2436    }
   2437 }
   2438 
   2439 //------------------------------------------------------------//
   2440 //--- Finalisation                                         ---//
   2441 //------------------------------------------------------------//
   2442 
   2443 static void ms_fini(Int exit_status)
   2444 {
   2445    // Output.
   2446    write_snapshots_array_to_file();
   2447 
   2448    // Stats
   2449    tl_assert(n_xpts > 0);  // always have alloc_xpt
   2450    STATS("heap allocs:           %u\n", n_heap_allocs);
   2451    STATS("heap reallocs:         %u\n", n_heap_reallocs);
   2452    STATS("heap frees:            %u\n", n_heap_frees);
   2453    STATS("ignored heap allocs:   %u\n", n_ignored_heap_allocs);
   2454    STATS("ignored heap frees:    %u\n", n_ignored_heap_frees);
   2455    STATS("ignored heap reallocs: %u\n", n_ignored_heap_reallocs);
   2456    STATS("stack allocs:          %u\n", n_stack_allocs);
   2457    STATS("stack frees:           %u\n", n_stack_frees);
   2458    STATS("XPts:                  %u\n", n_xpts);
   2459    STATS("top-XPts:              %u (%d%%)\n",
   2460       alloc_xpt->n_children,
   2461       ( n_xpts ? alloc_xpt->n_children * 100 / n_xpts : 0));
   2462    STATS("XPt init expansions:   %u\n", n_xpt_init_expansions);
   2463    STATS("XPt later expansions:  %u\n", n_xpt_later_expansions);
   2464    STATS("SXPt allocs:           %u\n", n_sxpt_allocs);
   2465    STATS("SXPt frees:            %u\n", n_sxpt_frees);
   2466    STATS("skipped snapshots:     %u\n", n_skipped_snapshots);
   2467    STATS("real snapshots:        %u\n", n_real_snapshots);
   2468    STATS("detailed snapshots:    %u\n", n_detailed_snapshots);
   2469    STATS("peak snapshots:        %u\n", n_peak_snapshots);
   2470    STATS("cullings:              %u\n", n_cullings);
   2471    STATS("XCon redos:            %u\n", n_XCon_redos);
   2472 }
   2473 
   2474 
   2475 //------------------------------------------------------------//
   2476 //--- Initialisation                                       ---//
   2477 //------------------------------------------------------------//
   2478 
   2479 static void ms_post_clo_init(void)
   2480 {
   2481    Int i;
   2482    Char* LD_PRELOAD_val;
   2483    Char* s;
   2484    Char* s2;
   2485 
   2486    // Check options.
   2487    if (clo_pages_as_heap) {
   2488       if (clo_stacks) {
   2489          VG_(fmsg_bad_option)(
   2490             "--pages-as-heap=yes together with --stacks=yes", "");
   2491       }
   2492    }
   2493    if (!clo_heap) {
   2494       clo_pages_as_heap = False;
   2495    }
   2496 
   2497    // If --pages-as-heap=yes we don't want malloc replacement to occur.  So we
   2498    // disable vgpreload_massif-$PLATFORM.so by removing it from LD_PRELOAD (or
   2499    // platform-equivalent).  We replace it entirely with spaces because then
   2500    // the linker doesn't complain (it does complain if we just change the name
   2501    // to a bogus file).  This is a bit of a hack, but LD_PRELOAD is setup well
   2502    // before tool initialisation, so this seems the best way to do it.
   2503    if (clo_pages_as_heap) {
   2504       clo_heap_admin = 0;     // No heap admin on pages.
   2505 
   2506       LD_PRELOAD_val = VG_(getenv)( (Char*)VG_(LD_PRELOAD_var_name) );
   2507       tl_assert(LD_PRELOAD_val);
   2508 
   2509       // Make sure the vgpreload_core-$PLATFORM entry is there, for sanity.
   2510       s2 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_core");
   2511       tl_assert(s2);
   2512 
   2513       // Now find the vgpreload_massif-$PLATFORM entry.
   2514       s2 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_massif");
   2515       tl_assert(s2);
   2516 
   2517       // Blank out everything to the previous ':', which must be there because
   2518       // of the preceding vgpreload_core-$PLATFORM entry.
   2519       for (s = s2; *s != ':'; s--) {
   2520          *s = ' ';
   2521       }
   2522 
   2523       // Blank out everything to the end of the entry, which will be '\0' if
   2524       // LD_PRELOAD was empty before Valgrind started, or ':' otherwise.
   2525       for (s = s2; *s != ':' && *s != '\0'; s++) {
   2526          *s = ' ';
   2527       }
   2528    }
   2529 
   2530    // Print alloc-fns and ignore-fns, if necessary.
   2531    if (VG_(clo_verbosity) > 1) {
   2532       VERB(1, "alloc-fns:\n");
   2533       for (i = 0; i < VG_(sizeXA)(alloc_fns); i++) {
   2534          Char** fn_ptr = VG_(indexXA)(alloc_fns, i);
   2535          VERB(1, "  %s\n", *fn_ptr);
   2536       }
   2537 
   2538       VERB(1, "ignore-fns:\n");
   2539       if (0 == VG_(sizeXA)(ignore_fns)) {
   2540          VERB(1, "  <empty>\n");
   2541       }
   2542       for (i = 0; i < VG_(sizeXA)(ignore_fns); i++) {
   2543          Char** fn_ptr = VG_(indexXA)(ignore_fns, i);
   2544          VERB(1, "  %d: %s\n", i, *fn_ptr);
   2545       }
   2546    }
   2547 
   2548    // Events to track.
   2549    if (clo_stacks) {
   2550       VG_(track_new_mem_stack)        ( new_mem_stack        );
   2551       VG_(track_die_mem_stack)        ( die_mem_stack        );
   2552       VG_(track_new_mem_stack_signal) ( new_mem_stack_signal );
   2553       VG_(track_die_mem_stack_signal) ( die_mem_stack_signal );
   2554    }
   2555 
   2556    if (clo_pages_as_heap) {
   2557       VG_(track_new_mem_startup) ( ms_new_mem_startup );
   2558       VG_(track_new_mem_brk)     ( ms_new_mem_brk     );
   2559       VG_(track_new_mem_mmap)    ( ms_new_mem_mmap    );
   2560 
   2561       VG_(track_copy_mem_remap)  ( ms_copy_mem_remap  );
   2562 
   2563       VG_(track_die_mem_brk)     ( ms_die_mem_brk     );
   2564       VG_(track_die_mem_munmap)  ( ms_die_mem_munmap  );
   2565    }
   2566 
   2567    // Initialise snapshot array, and sanity-check it.
   2568    snapshots = VG_(malloc)("ms.main.mpoci.1",
   2569                            sizeof(Snapshot) * clo_max_snapshots);
   2570    // We don't want to do snapshot sanity checks here, because they're
   2571    // currently uninitialised.
   2572    for (i = 0; i < clo_max_snapshots; i++) {
   2573       clear_snapshot( & snapshots[i], /*do_sanity_check*/False );
   2574    }
   2575    sanity_check_snapshots_array();
   2576 }
   2577 
   2578 static void ms_pre_clo_init(void)
   2579 {
   2580    VG_(details_name)            ("Massif");
   2581    VG_(details_version)         (NULL);
   2582    VG_(details_description)     ("a heap profiler");
   2583    VG_(details_copyright_author)(
   2584       "Copyright (C) 2003-2012, and GNU GPL'd, by Nicholas Nethercote");
   2585    VG_(details_bug_reports_to)  (VG_BUGS_TO);
   2586 
   2587    VG_(details_avg_translation_sizeB) ( 330 );
   2588 
   2589    // Basic functions.
   2590    VG_(basic_tool_funcs)          (ms_post_clo_init,
   2591                                    ms_instrument,
   2592                                    ms_fini);
   2593 
   2594    // Needs.
   2595    VG_(needs_libc_freeres)();
   2596    VG_(needs_command_line_options)(ms_process_cmd_line_option,
   2597                                    ms_print_usage,
   2598                                    ms_print_debug_usage);
   2599    VG_(needs_client_requests)     (ms_handle_client_request);
   2600    VG_(needs_sanity_checks)       (ms_cheap_sanity_check,
   2601                                    ms_expensive_sanity_check);
   2602    VG_(needs_malloc_replacement)  (ms_malloc,
   2603                                    ms___builtin_new,
   2604                                    ms___builtin_vec_new,
   2605                                    ms_memalign,
   2606                                    ms_calloc,
   2607                                    ms_free,
   2608                                    ms___builtin_delete,
   2609                                    ms___builtin_vec_delete,
   2610                                    ms_realloc,
   2611                                    ms_malloc_usable_size,
   2612                                    0 );
   2613 
   2614    // HP_Chunks.
   2615    malloc_list = VG_(HT_construct)( "Massif's malloc list" );
   2616 
   2617    // Dummy node at top of the context structure.
   2618    alloc_xpt = new_XPt(/*ip*/0, /*parent*/NULL);
   2619 
   2620    // Initialise alloc_fns and ignore_fns.
   2621    init_alloc_fns();
   2622    init_ignore_fns();
   2623 
   2624    // Initialise args_for_massif.
   2625    args_for_massif = VG_(newXA)(VG_(malloc), "ms.main.mprci.1",
   2626                                 VG_(free), sizeof(HChar*));
   2627 }
   2628 
   2629 VG_DETERMINE_INTERFACE_VERSION(ms_pre_clo_init)
   2630 
   2631 //--------------------------------------------------------------------//
   2632 //--- end                                                          ---//
   2633 //--------------------------------------------------------------------//
   2634