Home | History | Annotate | Download | only in m_aspacemgr
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- The address space manager: segment initialisation and        ---*/
      4 /*--- tracking, stack operations                                   ---*/
      5 /*---                                                              ---*/
      6 /*--- Implementation for Linux (and Darwin!)   m_aspacemgr-linux.c ---*/
      7 /*--------------------------------------------------------------------*/
      8 
      9 /*
     10    This file is part of Valgrind, a dynamic binary instrumentation
     11    framework.
     12 
     13    Copyright (C) 2000-2010 Julian Seward
     14       jseward (at) acm.org
     15 
     16    This program is free software; you can redistribute it and/or
     17    modify it under the terms of the GNU General Public License as
     18    published by the Free Software Foundation; either version 2 of the
     19    License, or (at your option) any later version.
     20 
     21    This program is distributed in the hope that it will be useful, but
     22    WITHOUT ANY WARRANTY; without even the implied warranty of
     23    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     24    General Public License for more details.
     25 
     26    You should have received a copy of the GNU General Public License
     27    along with this program; if not, write to the Free Software
     28    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     29    02111-1307, USA.
     30 
     31    The GNU General Public License is contained in the file COPYING.
     32 */
     33 
     34 #if defined(VGO_linux) || defined(VGO_darwin)
     35 
     36 /* *************************************************************
     37    DO NOT INCLUDE ANY OTHER FILES HERE.
     38    ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
     39    AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
     40    ************************************************************* */
     41 
     42 #include "priv_aspacemgr.h"
     43 #include "config.h"
     44 
     45 
     46 /* Note: many of the exported functions implemented below are
     47    described more fully in comments in pub_core_aspacemgr.h.
     48 */
     49 
     50 
     51 /*-----------------------------------------------------------------*/
     52 /*---                                                           ---*/
     53 /*--- Overview.                                                 ---*/
     54 /*---                                                           ---*/
     55 /*-----------------------------------------------------------------*/
     56 
     57 /* Purpose
     58    ~~~~~~~
     59    The purpose of the address space manager (aspacem) is:
     60 
     61    (1) to record the disposition of all parts of the process' address
     62        space at all times.
     63 
     64    (2) to the extent that it can, influence layout in ways favourable
     65        to our purposes.
     66 
     67    It is important to appreciate that whilst it can and does attempt
     68    to influence layout, and usually succeeds, it isn't possible to
     69    impose absolute control: in the end, the kernel is the final
     70    arbiter, and can always bounce our requests.
     71 
     72    Strategy
     73    ~~~~~~~~
     74    The strategy is therefore as follows:
     75 
     76    * Track ownership of mappings.  Each one can belong either to
     77      Valgrind or to the client.
     78 
     79    * Try to place the client's fixed and hinted mappings at the
     80      requested addresses.  Fixed mappings are allowed anywhere except
     81      in areas reserved by Valgrind; the client can trash its own
     82      mappings if it wants.  Hinted mappings are allowed providing they
     83      fall entirely in free areas; if not, they will be placed by
     84      aspacem in a free area.
     85 
     86    * Anonymous mappings are allocated so as to keep Valgrind and
     87      client areas widely separated when possible.  If address space
     88      runs low, then they may become intermingled: aspacem will attempt
     89      to use all possible space.  But under most circumstances lack of
     90      address space is not a problem and so the areas will remain far
     91      apart.
     92 
     93      Searches for client space start at aspacem_cStart and will wrap
     94      around the end of the available space if needed.  Searches for
     95      Valgrind space start at aspacem_vStart and will also wrap around.
     96      Because aspacem_cStart is approximately at the start of the
     97      available space and aspacem_vStart is approximately in the
     98      middle, for the most part the client anonymous mappings will be
     99      clustered towards the start of available space, and Valgrind ones
    100      in the middle.
    101 
    102      The available space is delimited by aspacem_minAddr and
    103      aspacem_maxAddr.  aspacem is flexible and can operate with these
    104      at any (sane) setting.  For 32-bit Linux, aspacem_minAddr is set
    105      to some low-ish value at startup (64M) and aspacem_maxAddr is
    106      derived from the stack pointer at system startup.  This seems a
    107      reliable way to establish the initial boundaries.
    108 
    109      64-bit Linux is similar except for the important detail that the
    110      upper boundary is set to 32G.  The reason is so that all
    111      anonymous mappings (basically all client data areas) are kept
    112      below 32G, since that is the maximum range that memcheck can
    113      track shadow memory using a fast 2-level sparse array.  It can go
    114      beyond that but runs much more slowly.  The 32G limit is
    115      arbitrary and is trivially changed.  So, with the current
    116      settings, programs on 64-bit Linux will appear to run out of
    117      address space and presumably fail at the 32G limit.  Given the
    118      9/8 space overhead of Memcheck, that means you should be able to
    119      memcheckify programs that use up to about 14G natively.
    120 
    121    Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
    122    anonymous mappings.  The client can still do fixed and hinted maps
    123    at any addresses provided they do not overlap Valgrind's segments.
    124    This makes Valgrind able to load prelinked .so's at their requested
    125    addresses on 64-bit platforms, even if they are very high (eg,
    126    112TB).
    127 
    128    At startup, aspacem establishes the usable limits, and advises
    129    m_main to place the client stack at the top of the range, which on
    130    a 32-bit machine will be just below the real initial stack.  One
    131    effect of this is that self-hosting sort-of works, because an inner
    132    valgrind will then place its client's stack just below its own
    133    initial stack.
    134 
    135    The segment array and segment kinds
    136    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    137    The central data structure is the segment array (segments[0
    138    .. nsegments_used-1]).  This covers the entire address space in
    139    order, giving account of every byte of it.  Free spaces are
    140    represented explicitly as this makes many operations simpler.
    141    Mergeable adjacent segments are aggressively merged so as to create
    142    a "normalised" representation (preen_nsegments).
    143 
    144    There are 7 (mutually-exclusive) segment kinds, the meaning of
    145    which is important:
    146 
    147    SkFree: a free space, which may be allocated either to Valgrind (V)
    148       or the client (C).
    149 
    150    SkAnonC: an anonymous mapping belonging to C.  For these, aspacem
    151       tracks a boolean indicating whether or not is is part of the
    152       client's heap area (can't remember why).
    153 
    154    SkFileC: a file mapping belonging to C.
    155 
    156    SkShmC: a shared memory segment belonging to C.
    157 
    158    SkAnonV: an anonymous mapping belonging to V.  These cover all V's
    159       dynamic memory needs, including non-client malloc/free areas,
    160       shadow memory, and the translation cache.
    161 
    162    SkFileV: a file mapping belonging to V.  As far as I know these are
    163       only created transiently for the purposes of reading debug info.
    164 
    165    SkResvn: a reservation segment.
    166 
    167    These are mostly straightforward.  Reservation segments have some
    168    subtlety, however.
    169 
    170    A reservation segment is unmapped from the kernel's point of view,
    171    but is an area in which aspacem will not create anonymous maps
    172    (either Vs or Cs).  The idea is that we will try to keep it clear
    173    when the choice to do so is ours.  Reservation segments are
    174    'invisible' from the client's point of view: it may choose to park
    175    a fixed mapping in the middle of one, and that's just tough -- we
    176    can't do anything about that.  From the client's perspective
    177    reservations are semantically equivalent to (although
    178    distinguishable from, if it makes enquiries) free areas.
    179 
    180    Reservations are a primitive mechanism provided for whatever
    181    purposes the rest of the system wants.  Currently they are used to
    182    reserve the expansion space into which a growdown stack is
    183    expanded, and into which the data segment is extended.  Note,
    184    though, those uses are entirely external to this module, which only
    185    supplies the primitives.
    186 
    187    Reservations may be shrunk in order that an adjoining anonymous
    188    mapping may be extended.  This makes dataseg/stack expansion work.
    189    A reservation may not be shrunk below one page.
    190 
    191    The advise/notify concept
    192    ~~~~~~~~~~~~~~~~~~~~~~~~~
    193    All mmap-related calls must be routed via aspacem.  Calling
    194    sys_mmap directly from the rest of the system is very dangerous
    195    because aspacem's data structures will become out of date.
    196 
    197    The fundamental mode of operation of aspacem is to support client
    198    mmaps.  Here's what happens (in ML_(generic_PRE_sys_mmap)):
    199 
    200    * m_syswrap intercepts the mmap call.  It examines the parameters
    201      and identifies the requested placement constraints.  There are
    202      three possibilities: no constraint (MAny), hinted (MHint, "I
    203      prefer X but will accept anything"), and fixed (MFixed, "X or
    204      nothing").
    205 
    206    * This request is passed to VG_(am_get_advisory).  This decides on
    207      a placement as described in detail in Strategy above.  It may
    208      also indicate that the map should fail, because it would trash
    209      one of Valgrind's areas, which would probably kill the system.
    210 
    211    * Control returns to the wrapper.  If VG_(am_get_advisory) has
    212      declared that the map should fail, then it must be made to do so.
    213      Usually, though, the request is considered acceptable, in which
    214      case an "advised" address is supplied.  The advised address
    215      replaces the original address supplied by the client, and
    216      MAP_FIXED is set.
    217 
    218      Note at this point that although aspacem has been asked for
    219      advice on where to place the mapping, no commitment has yet been
    220      made by either it or the kernel.
    221 
    222    * The adjusted request is handed off to the kernel.
    223 
    224    * The kernel's result is examined.  If the map succeeded, aspacem
    225      is told of the outcome (VG_(am_notify_client_mmap)), so it can
    226      update its records accordingly.
    227 
    228   This then is the central advise-notify idiom for handling client
    229   mmap/munmap/mprotect/shmat:
    230 
    231   * ask aspacem for an advised placement (or a veto)
    232 
    233   * if not vetoed, hand request to kernel, using the advised placement
    234 
    235   * examine result, and if successful, notify aspacem of the result.
    236 
    237   There are also many convenience functions, eg
    238   VG_(am_mmap_anon_fixed_client), which do both phases entirely within
    239   aspacem.
    240 
    241   To debug all this, a sync-checker is provided.  It reads
    242   /proc/self/maps, compares what it sees with aspacem's records, and
    243   complains if there is a difference.  --sanity-level=3 runs it before
    244   and after each syscall, which is a powerful, if slow way of finding
    245   buggy syscall wrappers.
    246 
    247   Loss of pointercheck
    248   ~~~~~~~~~~~~~~~~~~~~
    249   Up to and including Valgrind 2.4.1, x86 segmentation was used to
    250   enforce seperation of V and C, so that wild writes by C could not
    251   trash V.  This got called "pointercheck".  Unfortunately, the new
    252   more flexible memory layout, plus the need to be portable across
    253   different architectures, means doing this in hardware is no longer
    254   viable, and doing it in software is expensive.  So at the moment we
    255   don't do it at all.
    256 */
    257 
    258 
    259 /*-----------------------------------------------------------------*/
    260 /*---                                                           ---*/
    261 /*--- The Address Space Manager's state.                        ---*/
    262 /*---                                                           ---*/
    263 /*-----------------------------------------------------------------*/
    264 
    265 /* ------ start of STATE for the address-space manager ------ */
    266 
    267 /* Max number of segments we can track. */
    268 #if defined(VGO_darwin) || defined(ANDROID)
    269 #define VG_N_SEGMENTS 5000
    270 #else
    271 #define VG_N_SEGMENTS 100000
    272 #endif
    273 
    274 /* Max number of segment file names we can track. */
    275 #if defined(VGO_darwin) || defined(ANDROID)
    276 #define VG_N_SEGNAMES 1000
    277 #else
    278 #define VG_N_SEGNAMES 100000
    279 #endif
    280 
    281 /* Max length of a segment file name. */
    282 #define VG_MAX_SEGNAMELEN 1000
    283 
    284 
    285 typedef
    286    struct {
    287       Bool  inUse;
    288       Bool  mark;
    289       HChar fname[VG_MAX_SEGNAMELEN];
    290    }
    291    SegName;
    292 
    293 /* Filename table.  _used is the high water mark; an entry is only
    294    valid if its index >= 0, < _used, and its .inUse field == True.
    295    The .mark field is used to garbage-collect dead entries.
    296 */
    297 static SegName segnames[VG_N_SEGNAMES];
    298 static Int     segnames_used = 0;
    299 
    300 
    301 /* Array [0 .. nsegments_used-1] of all mappings. */
    302 /* Sorted by .addr field. */
    303 /* I: len may not be zero. */
    304 /* I: overlapping segments are not allowed. */
    305 /* I: the segments cover the entire address space precisely. */
    306 /* Each segment can optionally hold an index into the filename table. */
    307 
    308 static NSegment nsegments[VG_N_SEGMENTS];
    309 static Int      nsegments_used = 0;
    310 
    311 #define Addr_MIN ((Addr)0)
    312 #define Addr_MAX ((Addr)(-1ULL))
    313 
    314 /* Limits etc */
    315 
    316 // The smallest address that aspacem will try to allocate
    317 static Addr aspacem_minAddr = 0;
    318 
    319 // The largest address that aspacem will try to allocate
    320 static Addr aspacem_maxAddr = 0;
    321 
    322 // Where aspacem will start looking for client space
    323 static Addr aspacem_cStart = 0;
    324 
    325 // Where aspacem will start looking for Valgrind space
    326 static Addr aspacem_vStart = 0;
    327 
    328 
    329 #define AM_SANITY_CHECK                                      \
    330    do {                                                      \
    331       if (VG_(clo_sanity_level >= 3))                        \
    332          aspacem_assert(VG_(am_do_sync_check)                \
    333             (__PRETTY_FUNCTION__,__FILE__,__LINE__));        \
    334    } while (0)
    335 
    336 /* ------ end of STATE for the address-space manager ------ */
    337 
    338 /* ------ Forwards decls ------ */
    339 inline
    340 static Int  find_nsegment_idx ( Addr a );
    341 
    342 static void parse_procselfmaps (
    343       void (*record_mapping)( Addr addr, SizeT len, UInt prot,
    344                               ULong dev, ULong ino, Off64T offset,
    345                               const UChar* filename ),
    346       void (*record_gap)( Addr addr, SizeT len )
    347    );
    348 
    349 /* ----- Hacks to do with the "commpage" on arm-linux ----- */
    350 /* Not that I have anything against the commpage per se.  It's just
    351    that it's not listed in /proc/self/maps, which is a royal PITA --
    352    we have to fake it up, in parse_procselfmaps.
    353 
    354    But note also bug 254556 comment #2: this is now fixed in newer
    355    kernels -- it is listed as a "[vectors]" entry.  Presumably the
    356    fake entry made here duplicates the [vectors] entry, and so, if at
    357    some point in the future, we can stop supporting buggy kernels,
    358    then this kludge can be removed entirely, since the procmap parser
    359    below will read that entry in the normal way. */
    360 #if defined(VGP_arm_linux)
    361 #  define ARM_LINUX_FAKE_COMMPAGE_START 0xFFFF0000
    362 #  define ARM_LINUX_FAKE_COMMPAGE_END1  0xFFFF1000
    363 #endif
    364 
    365 
    366 /*-----------------------------------------------------------------*/
    367 /*---                                                           ---*/
    368 /*--- SegName array management.                                 ---*/
    369 /*---                                                           ---*/
    370 /*-----------------------------------------------------------------*/
    371 
    372 /* Searches the filename table to find an index for the given name.
    373    If none is found, an index is allocated and the name stored.  If no
    374    space is available we just give up.  If the string is too long to
    375    store, return -1.
    376 */
    377 static Int allocate_segname ( const HChar* name )
    378 {
    379    Int i, j, len;
    380 
    381    aspacem_assert(name);
    382 
    383    if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
    384 
    385    len = VG_(strlen)(name);
    386    if (len >= VG_MAX_SEGNAMELEN-1) {
    387       return -1;
    388    }
    389 
    390    /* first see if we already have the name. */
    391    for (i = 0; i < segnames_used; i++) {
    392       if (!segnames[i].inUse)
    393          continue;
    394       if (0 == VG_(strcmp)(name, &segnames[i].fname[0])) {
    395          return i;
    396       }
    397    }
    398 
    399    /* no we don't.  So look for a free slot. */
    400    for (i = 0; i < segnames_used; i++)
    401       if (!segnames[i].inUse)
    402          break;
    403 
    404    if (i == segnames_used) {
    405       /* no free slots .. advance the high-water mark. */
    406       if (segnames_used+1 < VG_N_SEGNAMES) {
    407          i = segnames_used;
    408          segnames_used++;
    409       } else {
    410          ML_(am_barf_toolow)("VG_N_SEGNAMES");
    411       }
    412    }
    413 
    414    /* copy it in */
    415    segnames[i].inUse = True;
    416    for (j = 0; j < len; j++)
    417       segnames[i].fname[j] = name[j];
    418    aspacem_assert(len < VG_MAX_SEGNAMELEN);
    419    segnames[i].fname[len] = 0;
    420    return i;
    421 }
    422 
    423 
    424 /*-----------------------------------------------------------------*/
    425 /*---                                                           ---*/
    426 /*--- Displaying the segment array.                             ---*/
    427 /*---                                                           ---*/
    428 /*-----------------------------------------------------------------*/
    429 
    430 static HChar* show_SegKind ( SegKind sk )
    431 {
    432    switch (sk) {
    433       case SkFree:  return "    ";
    434       case SkAnonC: return "anon";
    435       case SkAnonV: return "ANON";
    436       case SkFileC: return "file";
    437       case SkFileV: return "FILE";
    438       case SkShmC:  return "shm ";
    439       case SkResvn: return "RSVN";
    440       default:      return "????";
    441    }
    442 }
    443 
    444 static HChar* show_ShrinkMode ( ShrinkMode sm )
    445 {
    446    switch (sm) {
    447       case SmLower: return "SmLower";
    448       case SmUpper: return "SmUpper";
    449       case SmFixed: return "SmFixed";
    450       default: return "Sm?????";
    451    }
    452 }
    453 
    454 static void show_len_concisely ( /*OUT*/HChar* buf, Addr start, Addr end )
    455 {
    456    HChar* fmt;
    457    ULong len = ((ULong)end) - ((ULong)start) + 1;
    458 
    459    if (len < 10*1000*1000ULL) {
    460       fmt = "%7llu";
    461    }
    462    else if (len < 999999ULL * (1ULL<<20)) {
    463       fmt = "%6llum";
    464       len >>= 20;
    465    }
    466    else if (len < 999999ULL * (1ULL<<30)) {
    467       fmt = "%6llug";
    468       len >>= 30;
    469    }
    470    else if (len < 999999ULL * (1ULL<<40)) {
    471       fmt = "%6llut";
    472       len >>= 40;
    473    }
    474    else {
    475       fmt = "%6llue";
    476       len >>= 50;
    477    }
    478    ML_(am_sprintf)(buf, fmt, len);
    479 }
    480 
    481 
    482 /* Show full details of an NSegment */
    483 
    484 static void __attribute__ ((unused))
    485             show_nsegment_full ( Int logLevel, Int segNo, NSegment* seg )
    486 {
    487    HChar len_buf[20];
    488    HChar* name = "(none)";
    489 
    490    if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
    491                        && segnames[seg->fnIdx].inUse
    492                        && segnames[seg->fnIdx].fname[0] != 0)
    493       name = segnames[seg->fnIdx].fname;
    494 
    495    show_len_concisely(len_buf, seg->start, seg->end);
    496 
    497    VG_(debugLog)(
    498       logLevel, "aspacem",
    499       "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s "
    500       "d=0x%03llx i=%-7lld o=%-7lld (%d) m=%d %s\n",
    501       segNo, show_SegKind(seg->kind),
    502       (ULong)seg->start, (ULong)seg->end, len_buf,
    503       seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
    504       seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
    505       seg->isCH ? 'H' : '-',
    506       show_ShrinkMode(seg->smode),
    507       seg->dev, seg->ino, seg->offset, seg->fnIdx,
    508       (Int)seg->mark, name
    509    );
    510 }
    511 
    512 
    513 /* Show an NSegment in a user-friendly-ish way. */
    514 
    515 static void show_nsegment ( Int logLevel, Int segNo, NSegment* seg )
    516 {
    517    HChar len_buf[20];
    518    show_len_concisely(len_buf, seg->start, seg->end);
    519 
    520    switch (seg->kind) {
    521 
    522       case SkFree:
    523          VG_(debugLog)(
    524             logLevel, "aspacem",
    525             "%3d: %s %010llx-%010llx %s\n",
    526             segNo, show_SegKind(seg->kind),
    527             (ULong)seg->start, (ULong)seg->end, len_buf
    528          );
    529          break;
    530 
    531       case SkAnonC: case SkAnonV: case SkShmC:
    532          VG_(debugLog)(
    533             logLevel, "aspacem",
    534             "%3d: %s %010llx-%010llx %s %c%c%c%c%c\n",
    535             segNo, show_SegKind(seg->kind),
    536             (ULong)seg->start, (ULong)seg->end, len_buf,
    537             seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
    538             seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
    539             seg->isCH ? 'H' : '-'
    540          );
    541          break;
    542 
    543       case SkFileC: case SkFileV:
    544          VG_(debugLog)(
    545             logLevel, "aspacem",
    546             "%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
    547             "i=%-7lld o=%-7lld (%d)\n",
    548             segNo, show_SegKind(seg->kind),
    549             (ULong)seg->start, (ULong)seg->end, len_buf,
    550             seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
    551             seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
    552             seg->isCH ? 'H' : '-',
    553             seg->dev, seg->ino, seg->offset, seg->fnIdx
    554          );
    555          break;
    556 
    557       case SkResvn:
    558          VG_(debugLog)(
    559             logLevel, "aspacem",
    560             "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
    561             segNo, show_SegKind(seg->kind),
    562             (ULong)seg->start, (ULong)seg->end, len_buf,
    563             seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
    564             seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
    565             seg->isCH ? 'H' : '-',
    566             show_ShrinkMode(seg->smode)
    567          );
    568          break;
    569 
    570       default:
    571          VG_(debugLog)(
    572             logLevel, "aspacem",
    573             "%3d: ???? UNKNOWN SEGMENT KIND\n",
    574             segNo
    575          );
    576          break;
    577    }
    578 }
    579 
    580 /* Print out the segment array (debugging only!). */
    581 void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
    582 {
    583    Int i;
    584    VG_(debugLog)(logLevel, "aspacem",
    585                  "<<< SHOW_SEGMENTS: %s (%d segments, %d segnames)\n",
    586                  who, nsegments_used, segnames_used);
    587    for (i = 0; i < segnames_used; i++) {
    588       if (!segnames[i].inUse)
    589          continue;
    590       VG_(debugLog)(logLevel, "aspacem",
    591                     "(%2d) %s\n", i, segnames[i].fname);
    592    }
    593    for (i = 0; i < nsegments_used; i++)
    594      show_nsegment( logLevel, i, &nsegments[i] );
    595    VG_(debugLog)(logLevel, "aspacem",
    596                  ">>>\n");
    597 }
    598 
    599 
    600 /* Get the filename corresponding to this segment, if known and if it
    601    has one.  The returned name's storage cannot be assumed to be
    602    persistent, so the caller should immediately copy the name
    603    elsewhere. */
    604 HChar* VG_(am_get_filename)( NSegment const * seg )
    605 {
    606    Int i;
    607    aspacem_assert(seg);
    608    i = seg->fnIdx;
    609    if (i < 0 || i >= segnames_used || !segnames[i].inUse)
    610       return NULL;
    611    else
    612       return &segnames[i].fname[0];
    613 }
    614 
    615 /* Collect up the start addresses of all non-free, non-resvn segments.
    616    The interface is a bit strange in order to avoid potential
    617    segment-creation races caused by dynamic allocation of the result
    618    buffer *starts.
    619 
    620    The function first computes how many entries in the result
    621    buffer *starts will be needed.  If this number <= nStarts,
    622    they are placed in starts[0..], and the number is returned.
    623    If nStarts is not large enough, nothing is written to
    624    starts[0..], and the negation of the size is returned.
    625 
    626    Correct use of this function may mean calling it multiple times in
    627    order to establish a suitably-sized buffer. */
    628 
    629 Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
    630 {
    631    Int i, j, nSegs;
    632 
    633    /* don't pass dumbass arguments */
    634    aspacem_assert(nStarts >= 0);
    635 
    636    nSegs = 0;
    637    for (i = 0; i < nsegments_used; i++) {
    638       if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
    639          continue;
    640       nSegs++;
    641    }
    642 
    643    if (nSegs > nStarts) {
    644       /* The buffer isn't big enough.  Tell the caller how big it needs
    645          to be. */
    646       return -nSegs;
    647    }
    648 
    649    /* There's enough space.  So write into the result buffer. */
    650    aspacem_assert(nSegs <= nStarts);
    651 
    652    j = 0;
    653    for (i = 0; i < nsegments_used; i++) {
    654       if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
    655          continue;
    656       starts[j] = nsegments[i].start;
    657       j++;
    658    }
    659 
    660    aspacem_assert(j == nSegs); /* this should not fail */
    661    return nSegs;
    662 }
    663 
    664 
    665 /*-----------------------------------------------------------------*/
    666 /*---                                                           ---*/
    667 /*--- Sanity checking and preening of the segment array.        ---*/
    668 /*---                                                           ---*/
    669 /*-----------------------------------------------------------------*/
    670 
    671 /* Check representational invariants for NSegments. */
    672 
    673 static Bool sane_NSegment ( NSegment* s )
    674 {
    675    if (s == NULL) return False;
    676 
    677    /* No zero sized segments and no wraparounds. */
    678    if (s->start >= s->end) return False;
    679 
    680    /* .mark is used for admin purposes only. */
    681    if (s->mark) return False;
    682 
    683    /* require page alignment */
    684    if (!VG_IS_PAGE_ALIGNED(s->start)) return False;
    685    if (!VG_IS_PAGE_ALIGNED(s->end+1)) return False;
    686 
    687    switch (s->kind) {
    688 
    689       case SkFree:
    690          return
    691             s->smode == SmFixed
    692             && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
    693             && !s->hasR && !s->hasW && !s->hasX && !s->hasT
    694             && !s->isCH;
    695 
    696       case SkAnonC: case SkAnonV: case SkShmC:
    697          return
    698             s->smode == SmFixed
    699             && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
    700             && (s->kind==SkAnonC ? True : !s->isCH);
    701 
    702       case SkFileC: case SkFileV:
    703          return
    704             s->smode == SmFixed
    705             && (s->fnIdx == -1 ||
    706                 (s->fnIdx >= 0 && s->fnIdx < segnames_used
    707                                && segnames[s->fnIdx].inUse))
    708             && !s->isCH;
    709 
    710       case SkResvn:
    711          return
    712             s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
    713             && !s->hasR && !s->hasW && !s->hasX && !s->hasT
    714             && !s->isCH;
    715 
    716       default:
    717          return False;
    718    }
    719 }
    720 
    721 
    722 /* Try merging s2 into s1, if possible.  If successful, s1 is
    723    modified, and True is returned.  Otherwise s1 is unchanged and
    724    False is returned. */
    725 
    726 static Bool maybe_merge_nsegments ( NSegment* s1, NSegment* s2 )
    727 {
    728    if (s1->kind != s2->kind)
    729       return False;
    730 
    731    if (s1->end+1 != s2->start)
    732       return False;
    733 
    734    /* reject cases which would cause wraparound */
    735    if (s1->start > s2->end)
    736       return False;
    737 
    738    switch (s1->kind) {
    739 
    740       case SkFree:
    741          s1->end = s2->end;
    742          return True;
    743 
    744       case SkAnonC: case SkAnonV:
    745          if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
    746              && s1->hasX == s2->hasX && s1->isCH == s2->isCH) {
    747             s1->end = s2->end;
    748             s1->hasT |= s2->hasT;
    749             return True;
    750          }
    751          break;
    752 
    753       case SkFileC: case SkFileV:
    754          if (s1->hasR == s2->hasR
    755              && s1->hasW == s2->hasW && s1->hasX == s2->hasX
    756              && s1->dev == s2->dev && s1->ino == s2->ino
    757              && s2->offset == s1->offset
    758                               + ((ULong)s2->start) - ((ULong)s1->start) ) {
    759             s1->end = s2->end;
    760             s1->hasT |= s2->hasT;
    761             return True;
    762          }
    763          break;
    764 
    765       case SkShmC:
    766          return False;
    767 
    768       case SkResvn:
    769          if (s1->smode == SmFixed && s2->smode == SmFixed) {
    770             s1->end = s2->end;
    771             return True;
    772          }
    773 
    774       default:
    775          break;
    776 
    777    }
    778 
    779    return False;
    780 }
    781 
    782 
    783 /* Sanity-check and canonicalise the segment array (merge mergable
    784    segments).  Returns True if any segments were merged. */
    785 
    786 static Bool preen_nsegments ( void )
    787 {
    788    Int i, j, r, w, nsegments_used_old = nsegments_used;
    789 
    790    /* Pass 1: check the segment array covers the entire address space
    791       exactly once, and also that each segment is sane. */
    792    aspacem_assert(nsegments_used > 0);
    793    aspacem_assert(nsegments[0].start == Addr_MIN);
    794    aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
    795 
    796    aspacem_assert(sane_NSegment(&nsegments[0]));
    797    for (i = 1; i < nsegments_used; i++) {
    798       aspacem_assert(sane_NSegment(&nsegments[i]));
    799       aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
    800    }
    801 
    802    /* Pass 2: merge as much as possible, using
    803       maybe_merge_segments. */
    804    w = 0;
    805    for (r = 1; r < nsegments_used; r++) {
    806       if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
    807          /* nothing */
    808       } else {
    809          w++;
    810          if (w != r)
    811             nsegments[w] = nsegments[r];
    812       }
    813    }
    814    w++;
    815    aspacem_assert(w > 0 && w <= nsegments_used);
    816    nsegments_used = w;
    817 
    818    /* Pass 3: free up unused string table slots */
    819    /* clear mark bits */
    820    for (i = 0; i < segnames_used; i++)
    821       segnames[i].mark = False;
    822    /* mark */
    823    for (i = 0; i < nsegments_used; i++) {
    824      j = nsegments[i].fnIdx;
    825       aspacem_assert(j >= -1 && j < segnames_used);
    826       if (j >= 0) {
    827          aspacem_assert(segnames[j].inUse);
    828          segnames[j].mark = True;
    829       }
    830    }
    831    /* release */
    832    for (i = 0; i < segnames_used; i++) {
    833       if (segnames[i].mark == False) {
    834          segnames[i].inUse = False;
    835          segnames[i].fname[0] = 0;
    836       }
    837    }
    838 
    839    return nsegments_used != nsegments_used_old;
    840 }
    841 
    842 
    843 /* Check the segment array corresponds with the kernel's view of
    844    memory layout.  sync_check_ok returns True if no anomalies were
    845    found, else False.  In the latter case the mismatching segments are
    846    displayed.
    847 
    848    The general idea is: we get the kernel to show us all its segments
    849    and also the gaps in between.  For each such interval, try and find
    850    a sequence of appropriate intervals in our segment array which
    851    cover or more than cover the kernel's interval, and which all have
    852    suitable kinds/permissions etc.
    853 
    854    Although any specific kernel interval is not matched exactly to a
    855    valgrind interval or sequence thereof, eventually any disagreement
    856    on mapping boundaries will be detected.  This is because, if for
    857    example valgrind's intervals cover a greater range than the current
    858    kernel interval, it must be the case that a neighbouring free-space
    859    interval belonging to valgrind cannot cover the neighbouring
    860    free-space interval belonging to the kernel.  So the disagreement
    861    is detected.
    862 
    863    In other words, we examine each kernel interval in turn, and check
    864    we do not disagree over the range of that interval.  Because all of
    865    the address space is examined, any disagreements must eventually be
    866    detected.
    867 */
    868 
    869 static Bool sync_check_ok = False;
    870 
    871 static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
    872                                           ULong dev, ULong ino, Off64T offset,
    873                                           const UChar* filename )
    874 {
    875    Int  iLo, iHi, i;
    876    Bool sloppyXcheck;
    877 
    878    /* If a problem has already been detected, don't continue comparing
    879       segments, so as to avoid flooding the output with error
    880       messages. */
    881 #if !defined(VGO_darwin)
    882    /* GrP fixme not */
    883    if (!sync_check_ok)
    884       return;
    885 #endif
    886    if (len == 0)
    887       return;
    888 
    889    /* The kernel should not give us wraparounds. */
    890    aspacem_assert(addr <= addr + len - 1);
    891 
    892    iLo = find_nsegment_idx( addr );
    893    iHi = find_nsegment_idx( addr + len - 1 );
    894 
    895    /* These 5 should be guaranteed by find_nsegment_idx. */
    896    aspacem_assert(0 <= iLo && iLo < nsegments_used);
    897    aspacem_assert(0 <= iHi && iHi < nsegments_used);
    898    aspacem_assert(iLo <= iHi);
    899    aspacem_assert(nsegments[iLo].start <= addr );
    900    aspacem_assert(nsegments[iHi].end   >= addr + len - 1 );
    901 
    902    /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
    903       most recent NX-bit enabled CPUs) and so recent kernels attempt
    904       to provide execute protection by placing all executable mappings
    905       low down in the address space and then reducing the size of the
    906       code segment to prevent code at higher addresses being executed.
    907 
    908       These kernels report which mappings are really executable in
    909       the /proc/self/maps output rather than mirroring what was asked
    910       for when each mapping was created. In order to cope with this we
    911       have a sloppyXcheck mode which we enable on x86 - in this mode we
    912       allow the kernel to report execute permission when we weren't
    913       expecting it but not vice versa. */
    914 #  if defined(VGA_x86)
    915    sloppyXcheck = True;
    916 #  else
    917    sloppyXcheck = False;
    918 #  endif
    919 
    920    /* NSegments iLo .. iHi inclusive should agree with the presented
    921       data. */
    922    for (i = iLo; i <= iHi; i++) {
    923 
    924       Bool same, cmp_offsets, cmp_devino;
    925       UInt seg_prot;
    926 
    927       /* compare the kernel's offering against ours. */
    928       same = nsegments[i].kind == SkAnonC
    929              || nsegments[i].kind == SkAnonV
    930              || nsegments[i].kind == SkFileC
    931              || nsegments[i].kind == SkFileV
    932              || nsegments[i].kind == SkShmC;
    933 
    934       seg_prot = 0;
    935       if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
    936       if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
    937       if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
    938 
    939       cmp_offsets
    940          = nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
    941 
    942       cmp_devino
    943          = nsegments[i].dev != 0 || nsegments[i].ino != 0;
    944 
    945       /* Consider other reasons to not compare dev/inode */
    946 #if defined(VGO_linux)
    947       /* bproc does some godawful hack on /dev/zero at process
    948          migration, which changes the name of it, and its dev & ino */
    949       if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
    950          cmp_devino = False;
    951 
    952       /* hack apparently needed on MontaVista Linux */
    953       if (filename && VG_(strstr)(filename, "/.lib-ro/"))
    954          cmp_devino = False;
    955 #endif
    956 
    957 #if defined(VGO_darwin)
    958       // GrP fixme kernel info doesn't have dev/inode
    959       cmp_devino = False;
    960 
    961       // GrP fixme V and kernel don't agree on offsets
    962       cmp_offsets = False;
    963 #endif
    964 
    965       /* If we are doing sloppy execute permission checks then we
    966          allow segment to have X permission when we weren't expecting
    967          it (but not vice versa) so if the kernel reported execute
    968          permission then pretend that this segment has it regardless
    969          of what we were expecting. */
    970       if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
    971          seg_prot |= VKI_PROT_EXEC;
    972       }
    973 
    974       same = same
    975              && seg_prot == prot
    976              && (cmp_devino
    977                    ? (nsegments[i].dev == dev && nsegments[i].ino == ino)
    978                    : True)
    979              && (cmp_offsets
    980                    ? nsegments[i].start-nsegments[i].offset == addr-offset
    981                    : True);
    982       if (!same) {
    983          Addr start = addr;
    984          Addr end = start + len - 1;
    985          HChar len_buf[20];
    986          show_len_concisely(len_buf, start, end);
    987 
    988          sync_check_ok = False;
    989 
    990          VG_(debugLog)(
    991             0,"aspacem",
    992               "segment mismatch: V's seg 1st, kernel's 2nd:\n");
    993          show_nsegment_full( 0, i, &nsegments[i] );
    994          VG_(debugLog)(0,"aspacem",
    995             "...: .... %010llx-%010llx %s %c%c%c.. ....... "
    996             "d=0x%03llx i=%-7lld o=%-7lld (.) m=. %s\n",
    997             (ULong)start, (ULong)end, len_buf,
    998             prot & VKI_PROT_READ  ? 'r' : '-',
    999             prot & VKI_PROT_WRITE ? 'w' : '-',
   1000             prot & VKI_PROT_EXEC  ? 'x' : '-',
   1001             dev, ino, offset, filename ? (HChar*)filename : "(none)" );
   1002 
   1003          return;
   1004       }
   1005    }
   1006 
   1007    /* Looks harmless.  Keep going. */
   1008    return;
   1009 }
   1010 
   1011 static void sync_check_gap_callback ( Addr addr, SizeT len )
   1012 {
   1013    Int iLo, iHi, i;
   1014 
   1015    /* If a problem has already been detected, don't continue comparing
   1016       segments, so as to avoid flooding the output with error
   1017       messages. */
   1018 #if !defined(VGO_darwin)
   1019    /* GrP fixme not */
   1020    if (!sync_check_ok)
   1021       return;
   1022 #endif
   1023    if (len == 0)
   1024       return;
   1025 
   1026    /* The kernel should not give us wraparounds. */
   1027    aspacem_assert(addr <= addr + len - 1);
   1028 
   1029    iLo = find_nsegment_idx( addr );
   1030    iHi = find_nsegment_idx( addr + len - 1 );
   1031 
   1032    /* These 5 should be guaranteed by find_nsegment_idx. */
   1033    aspacem_assert(0 <= iLo && iLo < nsegments_used);
   1034    aspacem_assert(0 <= iHi && iHi < nsegments_used);
   1035    aspacem_assert(iLo <= iHi);
   1036    aspacem_assert(nsegments[iLo].start <= addr );
   1037    aspacem_assert(nsegments[iHi].end   >= addr + len - 1 );
   1038 
   1039    /* NSegments iLo .. iHi inclusive should agree with the presented
   1040       data. */
   1041    for (i = iLo; i <= iHi; i++) {
   1042 
   1043       Bool same;
   1044 
   1045       /* compare the kernel's offering against ours. */
   1046       same = nsegments[i].kind == SkFree
   1047              || nsegments[i].kind == SkResvn;
   1048 
   1049       if (!same) {
   1050          Addr start = addr;
   1051          Addr end = start + len - 1;
   1052          HChar len_buf[20];
   1053          show_len_concisely(len_buf, start, end);
   1054 
   1055          sync_check_ok = False;
   1056 
   1057          VG_(debugLog)(
   1058             0,"aspacem",
   1059               "segment mismatch: V's gap 1st, kernel's 2nd:\n");
   1060          show_nsegment_full( 0, i, &nsegments[i] );
   1061          VG_(debugLog)(0,"aspacem",
   1062             "   : .... %010llx-%010llx %s",
   1063             (ULong)start, (ULong)end, len_buf);
   1064          return;
   1065       }
   1066    }
   1067 
   1068    /* Looks harmless.  Keep going. */
   1069    return;
   1070 }
   1071 
   1072 
   1073 /* Sanity check: check that Valgrind and the kernel agree on the
   1074    address space layout.  Prints offending segments and call point if
   1075    a discrepancy is detected, but does not abort the system.  Returned
   1076    Bool is False if a discrepancy was found. */
   1077 
   1078 Bool VG_(am_do_sync_check) ( const HChar* fn,
   1079                              const HChar* file, Int line )
   1080 {
   1081    sync_check_ok = True;
   1082    if (0)
   1083       VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
   1084    parse_procselfmaps( sync_check_mapping_callback,
   1085                        sync_check_gap_callback );
   1086    if (!sync_check_ok) {
   1087       VG_(debugLog)(0,"aspacem",
   1088                       "sync check at %s:%d (%s): FAILED\n",
   1089                       file, line, fn);
   1090       VG_(debugLog)(0,"aspacem", "\n");
   1091 
   1092 #     if 0
   1093       {
   1094          HChar buf[100];
   1095          VG_(am_show_nsegments)(0,"post syncheck failure");
   1096          VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
   1097          VG_(system)(buf);
   1098       }
   1099 #     endif
   1100 
   1101    }
   1102    return sync_check_ok;
   1103 }
   1104 
   1105 /* Hook to allow sanity checks to be done from aspacemgr-common.c. */
   1106 void ML_(am_do_sanity_check)( void )
   1107 {
   1108    AM_SANITY_CHECK;
   1109 }
   1110 
   1111 
   1112 /*-----------------------------------------------------------------*/
   1113 /*---                                                           ---*/
   1114 /*--- Low level access / modification of the segment array.     ---*/
   1115 /*---                                                           ---*/
   1116 /*-----------------------------------------------------------------*/
   1117 
   1118 /* Binary search the interval array for a given address.  Since the
   1119    array covers the entire address space the search cannot fail.  The
   1120    _WRK function does the real work.  Its caller (just below) caches
   1121    the results thereof, to save time.  With N_CACHE of 63 we get a hit
   1122    rate exceeding 90% when running OpenOffice.
   1123 
   1124    Re ">> 12", it doesn't matter that the page size of some targets
   1125    might be different from 12.  Really "(a >> 12) % N_CACHE" is merely
   1126    a hash function, and the actual cache entry is always validated
   1127    correctly against the selected cache entry before use.
   1128 */
   1129 /* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
   1130 __attribute__((noinline))
   1131 static Int find_nsegment_idx_WRK ( Addr a )
   1132 {
   1133    Addr a_mid_lo, a_mid_hi;
   1134    Int  mid,
   1135         lo = 0,
   1136         hi = nsegments_used-1;
   1137    while (True) {
   1138       /* current unsearched space is from lo to hi, inclusive. */
   1139       if (lo > hi) {
   1140          /* Not found.  This can't happen. */
   1141          ML_(am_barf)("find_nsegment_idx: not found");
   1142       }
   1143       mid      = (lo + hi) / 2;
   1144       a_mid_lo = nsegments[mid].start;
   1145       a_mid_hi = nsegments[mid].end;
   1146 
   1147       if (a < a_mid_lo) { hi = mid-1; continue; }
   1148       if (a > a_mid_hi) { lo = mid+1; continue; }
   1149       aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
   1150       aspacem_assert(0 <= mid && mid < nsegments_used);
   1151       return mid;
   1152    }
   1153 }
   1154 
   1155 inline static Int find_nsegment_idx ( Addr a )
   1156 {
   1157 #  define N_CACHE 63
   1158    static Addr cache_pageno[N_CACHE];
   1159    static Int  cache_segidx[N_CACHE];
   1160    static Bool cache_inited = False;
   1161 
   1162    static UWord n_q = 0;
   1163    static UWord n_m = 0;
   1164 
   1165    UWord ix;
   1166 
   1167    if (LIKELY(cache_inited)) {
   1168       /* do nothing */
   1169    } else {
   1170       for (ix = 0; ix < N_CACHE; ix++) {
   1171          cache_pageno[ix] = 0;
   1172          cache_segidx[ix] = -1;
   1173       }
   1174       cache_inited = True;
   1175    }
   1176 
   1177    ix = (a >> 12) % N_CACHE;
   1178 
   1179    n_q++;
   1180    if (0 && 0 == (n_q & 0xFFFF))
   1181       VG_(debugLog)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q, n_m);
   1182 
   1183    if ((a >> 12) == cache_pageno[ix]
   1184        && cache_segidx[ix] >= 0
   1185        && cache_segidx[ix] < nsegments_used
   1186        && nsegments[cache_segidx[ix]].start <= a
   1187        && a <= nsegments[cache_segidx[ix]].end) {
   1188       /* hit */
   1189       /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
   1190       return cache_segidx[ix];
   1191    }
   1192    /* miss */
   1193    n_m++;
   1194    cache_segidx[ix] = find_nsegment_idx_WRK(a);
   1195    cache_pageno[ix] = a >> 12;
   1196    return cache_segidx[ix];
   1197 #  undef N_CACHE
   1198 }
   1199 
   1200 
   1201 
   1202 /* Finds the segment containing 'a'.  Only returns file/anon/resvn
   1203    segments.  This returns a 'NSegment const *' - a pointer to
   1204    readonly data. */
   1205 NSegment const * VG_(am_find_nsegment) ( Addr a )
   1206 {
   1207    Int i = find_nsegment_idx(a);
   1208    aspacem_assert(i >= 0 && i < nsegments_used);
   1209    aspacem_assert(nsegments[i].start <= a);
   1210    aspacem_assert(a <= nsegments[i].end);
   1211    if (nsegments[i].kind == SkFree)
   1212       return NULL;
   1213    else
   1214       return &nsegments[i];
   1215 }
   1216 
   1217 
   1218 /* Given a pointer to a seg, tries to figure out which one it is in
   1219    nsegments[..].  Very paranoid. */
   1220 static Int segAddr_to_index ( NSegment* seg )
   1221 {
   1222    Int i;
   1223    if (seg < &nsegments[0] || seg >= &nsegments[nsegments_used])
   1224       return -1;
   1225    i = ((UChar*)seg - (UChar*)(&nsegments[0])) / sizeof(NSegment);
   1226    if (i < 0 || i >= nsegments_used)
   1227       return -1;
   1228    if (seg == &nsegments[i])
   1229       return i;
   1230    return -1;
   1231 }
   1232 
   1233 
   1234 /* Find the next segment along from 'here', if it is a file/anon/resvn
   1235    segment. */
   1236 NSegment const * VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
   1237 {
   1238    Int i = segAddr_to_index(here);
   1239    if (i < 0 || i >= nsegments_used)
   1240       return NULL;
   1241    if (fwds) {
   1242       i++;
   1243       if (i >= nsegments_used)
   1244          return NULL;
   1245    } else {
   1246       i--;
   1247       if (i < 0)
   1248          return NULL;
   1249    }
   1250    switch (nsegments[i].kind) {
   1251       case SkFileC: case SkFileV: case SkShmC:
   1252       case SkAnonC: case SkAnonV: case SkResvn:
   1253          return &nsegments[i];
   1254       default:
   1255          break;
   1256    }
   1257    return NULL;
   1258 }
   1259 
   1260 
   1261 /* Trivial fn: return the total amount of space in anonymous mappings,
   1262    both for V and the client.  Is used for printing stats in
   1263    out-of-memory messages. */
   1264 ULong VG_(am_get_anonsize_total)( void )
   1265 {
   1266    Int   i;
   1267    ULong total = 0;
   1268    for (i = 0; i < nsegments_used; i++) {
   1269       if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
   1270          total += (ULong)nsegments[i].end
   1271                   - (ULong)nsegments[i].start + 1ULL;
   1272       }
   1273    }
   1274    return total;
   1275 }
   1276 
   1277 
   1278 /* Test if a piece of memory is addressable by the client with at
   1279    least the "prot" protection permissions by examining the underlying
   1280    segments.  If freeOk is True then SkFree areas are also allowed.
   1281 */
   1282 static
   1283 Bool is_valid_for_client( Addr start, SizeT len, UInt prot, Bool freeOk )
   1284 {
   1285    Int  i, iLo, iHi;
   1286    Bool needR, needW, needX;
   1287 
   1288    if (len == 0)
   1289       return True; /* somewhat dubious case */
   1290    if (start + len < start)
   1291       return False; /* reject wraparounds */
   1292 
   1293    needR = toBool(prot & VKI_PROT_READ);
   1294    needW = toBool(prot & VKI_PROT_WRITE);
   1295    needX = toBool(prot & VKI_PROT_EXEC);
   1296 
   1297    iLo = find_nsegment_idx(start);
   1298    aspacem_assert(start >= nsegments[iLo].start);
   1299 
   1300    if (start+len-1 <= nsegments[iLo].end) {
   1301       /* This is a speedup hack which avoids calling find_nsegment_idx
   1302          a second time when possible.  It is always correct to just
   1303          use the "else" clause below, but is_valid_for_client is
   1304          called a lot by the leak checker, so avoiding pointless calls
   1305          to find_nsegment_idx, which can be expensive, is helpful. */
   1306       iHi = iLo;
   1307    } else {
   1308       iHi = find_nsegment_idx(start + len - 1);
   1309    }
   1310 
   1311    for (i = iLo; i <= iHi; i++) {
   1312       if ( (nsegments[i].kind == SkFileC
   1313             || nsegments[i].kind == SkAnonC
   1314             || nsegments[i].kind == SkShmC
   1315             || (nsegments[i].kind == SkFree  && freeOk)
   1316             || (nsegments[i].kind == SkResvn && freeOk))
   1317            && (needR ? nsegments[i].hasR : True)
   1318            && (needW ? nsegments[i].hasW : True)
   1319            && (needX ? nsegments[i].hasX : True) ) {
   1320          /* ok */
   1321       } else {
   1322          return False;
   1323       }
   1324    }
   1325    return True;
   1326 }
   1327 
   1328 /* Test if a piece of memory is addressable by the client with at
   1329    least the "prot" protection permissions by examining the underlying
   1330    segments. */
   1331 Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
   1332                                   UInt prot )
   1333 {
   1334    return is_valid_for_client( start, len, prot, False/*free not OK*/ );
   1335 }
   1336 
   1337 /* Variant of VG_(am_is_valid_for_client) which allows free areas to
   1338    be consider part of the client's addressable space.  It also
   1339    considers reservations to be allowable, since from the client's
   1340    point of view they don't exist. */
   1341 Bool VG_(am_is_valid_for_client_or_free_or_resvn)
   1342    ( Addr start, SizeT len, UInt prot )
   1343 {
   1344    return is_valid_for_client( start, len, prot, True/*free is OK*/ );
   1345 }
   1346 
   1347 
   1348 /* Test if a piece of memory is addressable by valgrind with at least
   1349    PROT_NONE protection permissions by examining the underlying
   1350    segments. */
   1351 static Bool is_valid_for_valgrind( Addr start, SizeT len )
   1352 {
   1353    Int  i, iLo, iHi;
   1354 
   1355    if (len == 0)
   1356       return True; /* somewhat dubious case */
   1357    if (start + len < start)
   1358       return False; /* reject wraparounds */
   1359 
   1360    iLo = find_nsegment_idx(start);
   1361    iHi = find_nsegment_idx(start + len - 1);
   1362    for (i = iLo; i <= iHi; i++) {
   1363       if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkAnonV) {
   1364          /* ok */
   1365       } else {
   1366          return False;
   1367       }
   1368    }
   1369    return True;
   1370 }
   1371 
   1372 
   1373 /* Returns True if any part of the address range is marked as having
   1374    translations made from it.  This is used to determine when to
   1375    discard code, so if in doubt return True. */
   1376 
   1377 static Bool any_Ts_in_range ( Addr start, SizeT len )
   1378 {
   1379    Int iLo, iHi, i;
   1380    aspacem_assert(len > 0);
   1381    aspacem_assert(start + len > start);
   1382    iLo = find_nsegment_idx(start);
   1383    iHi = find_nsegment_idx(start + len - 1);
   1384    for (i = iLo; i <= iHi; i++) {
   1385       if (nsegments[i].hasT)
   1386          return True;
   1387    }
   1388    return False;
   1389 }
   1390 
   1391 
   1392 /*-----------------------------------------------------------------*/
   1393 /*---                                                           ---*/
   1394 /*--- Modifying the segment array, and constructing segments.   ---*/
   1395 /*---                                                           ---*/
   1396 /*-----------------------------------------------------------------*/
   1397 
   1398 /* Split the segment containing 'a' into two, so that 'a' is
   1399    guaranteed to be the start of a new segment.  If 'a' is already the
   1400    start of a segment, do nothing. */
   1401 
   1402 static void split_nsegment_at ( Addr a )
   1403 {
   1404    Int i, j;
   1405 
   1406    aspacem_assert(a > 0);
   1407    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
   1408 
   1409    i = find_nsegment_idx(a);
   1410    aspacem_assert(i >= 0 && i < nsegments_used);
   1411 
   1412    if (nsegments[i].start == a)
   1413       /* 'a' is already the start point of a segment, so nothing to be
   1414          done. */
   1415       return;
   1416 
   1417    /* else we have to slide the segments upwards to make a hole */
   1418    if (nsegments_used >= VG_N_SEGMENTS)
   1419       ML_(am_barf_toolow)("VG_N_SEGMENTS");
   1420    for (j = nsegments_used-1; j > i; j--)
   1421       nsegments[j+1] = nsegments[j];
   1422    nsegments_used++;
   1423 
   1424    nsegments[i+1]       = nsegments[i];
   1425    nsegments[i+1].start = a;
   1426    nsegments[i].end     = a-1;
   1427 
   1428    if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
   1429       nsegments[i+1].offset
   1430          += ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
   1431 
   1432    aspacem_assert(sane_NSegment(&nsegments[i]));
   1433    aspacem_assert(sane_NSegment(&nsegments[i+1]));
   1434 }
   1435 
   1436 
   1437 /* Do the minimum amount of segment splitting necessary to ensure that
   1438    sLo is the first address denoted by some segment and sHi is the
   1439    highest address denoted by some other segment.  Returns the indices
   1440    of the lowest and highest segments in the range. */
   1441 
   1442 static
   1443 void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
   1444                                  /*OUT*/Int* iLo,
   1445                                  /*OUT*/Int* iHi )
   1446 {
   1447    aspacem_assert(sLo < sHi);
   1448    aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
   1449    aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
   1450 
   1451    if (sLo > 0)
   1452       split_nsegment_at(sLo);
   1453    if (sHi < sHi+1)
   1454       split_nsegment_at(sHi+1);
   1455 
   1456    *iLo = find_nsegment_idx(sLo);
   1457    *iHi = find_nsegment_idx(sHi);
   1458    aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
   1459    aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
   1460    aspacem_assert(*iLo <= *iHi);
   1461    aspacem_assert(nsegments[*iLo].start == sLo);
   1462    aspacem_assert(nsegments[*iHi].end == sHi);
   1463    /* Not that I'm overly paranoid or anything, definitely not :-) */
   1464 }
   1465 
   1466 
   1467 /* Add SEG to the collection, deleting/truncating any it overlaps.
   1468    This deals with all the tricky cases of splitting up segments as
   1469    needed. */
   1470 
   1471 static void add_segment ( NSegment* seg )
   1472 {
   1473    Int  i, iLo, iHi, delta;
   1474    Bool segment_is_sane;
   1475 
   1476    Addr sStart = seg->start;
   1477    Addr sEnd   = seg->end;
   1478 
   1479    aspacem_assert(sStart <= sEnd);
   1480    aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
   1481    aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
   1482 
   1483    segment_is_sane = sane_NSegment(seg);
   1484    if (!segment_is_sane) show_nsegment_full(0,-1,seg);
   1485    aspacem_assert(segment_is_sane);
   1486 
   1487    split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
   1488 
   1489    /* Now iLo .. iHi inclusive is the range of segment indices which
   1490       seg will replace.  If we're replacing more than one segment,
   1491       slide those above the range down to fill the hole. */
   1492    delta = iHi - iLo;
   1493    aspacem_assert(delta >= 0);
   1494    if (delta > 0) {
   1495       for (i = iLo; i < nsegments_used-delta; i++)
   1496          nsegments[i] = nsegments[i+delta];
   1497       nsegments_used -= delta;
   1498    }
   1499 
   1500    nsegments[iLo] = *seg;
   1501 
   1502    (void)preen_nsegments();
   1503    if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
   1504 }
   1505 
   1506 
   1507 /* Clear out an NSegment record. */
   1508 
   1509 static void init_nsegment ( /*OUT*/NSegment* seg )
   1510 {
   1511    seg->kind     = SkFree;
   1512    seg->start    = 0;
   1513    seg->end      = 0;
   1514    seg->smode    = SmFixed;
   1515    seg->dev      = 0;
   1516    seg->ino      = 0;
   1517    seg->mode     = 0;
   1518    seg->offset   = 0;
   1519    seg->fnIdx    = -1;
   1520    seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
   1521    seg->mark = False;
   1522 }
   1523 
   1524 /* Make an NSegment which holds a reservation. */
   1525 
   1526 static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
   1527 {
   1528    aspacem_assert(start < end);
   1529    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   1530    aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
   1531    init_nsegment(seg);
   1532    seg->kind  = SkResvn;
   1533    seg->start = start;
   1534    seg->end   = end;
   1535 }
   1536 
   1537 
   1538 /*-----------------------------------------------------------------*/
   1539 /*---                                                           ---*/
   1540 /*--- Startup, including reading /proc/self/maps.               ---*/
   1541 /*---                                                           ---*/
   1542 /*-----------------------------------------------------------------*/
   1543 
   1544 static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
   1545                                  ULong dev, ULong ino, Off64T offset,
   1546                                  const UChar* filename )
   1547 {
   1548    NSegment seg;
   1549    init_nsegment( &seg );
   1550    seg.start  = addr;
   1551    seg.end    = addr+len-1;
   1552    seg.dev    = dev;
   1553    seg.ino    = ino;
   1554    seg.offset = offset;
   1555    seg.hasR   = toBool(prot & VKI_PROT_READ);
   1556    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   1557    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   1558    seg.hasT   = False;
   1559 
   1560    /* Don't use the presence of a filename to decide if a segment in
   1561       the initial /proc/self/maps to decide if the segment is an AnonV
   1562       or FileV segment as some systems don't report the filename. Use
   1563       the device and inode numbers instead. Fixes bug #124528. */
   1564    seg.kind = SkAnonV;
   1565    if (dev != 0 && ino != 0)
   1566       seg.kind = SkFileV;
   1567 
   1568 #  if defined(VGO_darwin)
   1569    // GrP fixme no dev/ino on darwin
   1570    if (offset != 0)
   1571       seg.kind = SkFileV;
   1572 #  endif // defined(VGO_darwin)
   1573 
   1574 #  if defined(VGP_arm_linux)
   1575    /* The standard handling of entries read from /proc/self/maps will
   1576       cause the faked up commpage segment to have type SkAnonV, which
   1577       is a problem because it contains code we want the client to
   1578       execute, and so later m_translate will segfault the client when
   1579       it tries to go in there.  Hence change the ownership of it here
   1580       to the client (SkAnonC).  The least-worst kludge I could think
   1581       of. */
   1582    if (addr == ARM_LINUX_FAKE_COMMPAGE_START
   1583        && addr + len == ARM_LINUX_FAKE_COMMPAGE_END1
   1584        && seg.kind == SkAnonV)
   1585       seg.kind = SkAnonC;
   1586 #  endif // defined(VGP_arm_linux)
   1587 
   1588    if (filename)
   1589       seg.fnIdx = allocate_segname( filename );
   1590 
   1591    if (0) show_nsegment( 2,0, &seg );
   1592    add_segment( &seg );
   1593 }
   1594 
   1595 /* Initialise the address space manager, setting up the initial
   1596    segment list, and reading /proc/self/maps into it.  This must
   1597    be called before any other function.
   1598 
   1599    Takes a pointer to the SP at the time V gained control.  This is
   1600    taken to be the highest usable address (more or less).  Based on
   1601    that (and general consultation of tea leaves, etc) return a
   1602    suggested end address for the client's stack. */
   1603 
   1604 Addr VG_(am_startup) ( Addr sp_at_startup )
   1605 {
   1606    NSegment seg;
   1607    Addr     suggested_clstack_top;
   1608 
   1609    aspacem_assert(sizeof(Word)   == sizeof(void*));
   1610    aspacem_assert(sizeof(Addr)   == sizeof(void*));
   1611    aspacem_assert(sizeof(SizeT)  == sizeof(void*));
   1612    aspacem_assert(sizeof(SSizeT) == sizeof(void*));
   1613 
   1614    /* Check that we can store the largest imaginable dev, ino and
   1615       offset numbers in an NSegment. */
   1616    aspacem_assert(sizeof(seg.dev)    == 8);
   1617    aspacem_assert(sizeof(seg.ino)    == 8);
   1618    aspacem_assert(sizeof(seg.offset) == 8);
   1619    aspacem_assert(sizeof(seg.mode)   == 4);
   1620 
   1621    /* Add a single interval covering the entire address space. */
   1622    init_nsegment(&seg);
   1623    seg.kind        = SkFree;
   1624    seg.start       = Addr_MIN;
   1625    seg.end         = Addr_MAX;
   1626    nsegments[0]    = seg;
   1627    nsegments_used  = 1;
   1628 
   1629 #if defined(VGO_darwin)
   1630 
   1631 # if VG_WORDSIZE == 4
   1632    aspacem_minAddr = (Addr) 0x00001000;
   1633    aspacem_maxAddr = (Addr) 0xffffffff;
   1634 
   1635    aspacem_cStart = aspacem_minAddr;
   1636    aspacem_vStart = 0xf0000000;  // 0xc0000000..0xf0000000 available
   1637 # else
   1638    aspacem_minAddr = (Addr) 0x100000000;  // 4GB page zero
   1639    aspacem_maxAddr = (Addr) 0x7fffffffffff;
   1640 
   1641    aspacem_cStart = aspacem_minAddr;
   1642    aspacem_vStart = 0x700000000000; // 0x7000:00000000..0x7fff:5c000000 avail
   1643    // 0x7fff:5c000000..0x7fff:ffe00000? is stack, dyld, shared cache
   1644 # endif
   1645 
   1646    suggested_clstack_top = -1; // ignored; Mach-O specifies its stack
   1647 
   1648 #else
   1649 
   1650    /* Establish address limits and block out unusable parts
   1651       accordingly. */
   1652 
   1653    VG_(debugLog)(2, "aspacem",
   1654                     "        sp_at_startup = 0x%010llx (supplied)\n",
   1655                     (ULong)sp_at_startup );
   1656 
   1657    aspacem_minAddr = (Addr) 0x04000000; // 64M
   1658 
   1659 #  if VG_WORDSIZE == 8
   1660      aspacem_maxAddr = (Addr)0x8000000000 - 1; // 512G
   1661 #    ifdef ENABLE_INNER
   1662      { Addr cse = VG_PGROUNDDN( sp_at_startup ) - 1;
   1663        if (aspacem_maxAddr > cse)
   1664           aspacem_maxAddr = cse;
   1665      }
   1666 #    endif
   1667 #  else
   1668      aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
   1669 #  endif
   1670 
   1671    aspacem_cStart = aspacem_minAddr; // 64M
   1672    aspacem_vStart = VG_PGROUNDUP((aspacem_minAddr + aspacem_maxAddr + 1) / 2);
   1673 #  ifdef ENABLE_INNER
   1674    aspacem_vStart -= 0x10000000; // 256M
   1675 #  endif
   1676 
   1677    suggested_clstack_top = aspacem_maxAddr - 16*1024*1024ULL
   1678                                            + VKI_PAGE_SIZE;
   1679 
   1680 #endif
   1681 
   1682    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
   1683    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
   1684    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
   1685    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
   1686    aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_top + 1));
   1687 
   1688    VG_(debugLog)(2, "aspacem",
   1689                     "              minAddr = 0x%010llx (computed)\n",
   1690                     (ULong)aspacem_minAddr);
   1691    VG_(debugLog)(2, "aspacem",
   1692                     "              maxAddr = 0x%010llx (computed)\n",
   1693                     (ULong)aspacem_maxAddr);
   1694    VG_(debugLog)(2, "aspacem",
   1695                     "               cStart = 0x%010llx (computed)\n",
   1696                     (ULong)aspacem_cStart);
   1697    VG_(debugLog)(2, "aspacem",
   1698                     "               vStart = 0x%010llx (computed)\n",
   1699                     (ULong)aspacem_vStart);
   1700    VG_(debugLog)(2, "aspacem",
   1701                     "suggested_clstack_top = 0x%010llx (computed)\n",
   1702                     (ULong)suggested_clstack_top);
   1703 
   1704    if (aspacem_cStart > Addr_MIN) {
   1705       init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
   1706       add_segment(&seg);
   1707    }
   1708    if (aspacem_maxAddr < Addr_MAX) {
   1709       init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
   1710       add_segment(&seg);
   1711    }
   1712 
   1713    /* Create a 1-page reservation at the notional initial
   1714       client/valgrind boundary.  This isn't strictly necessary, but
   1715       because the advisor does first-fit and starts searches for
   1716       valgrind allocations at the boundary, this is kind of necessary
   1717       in order to get it to start allocating in the right place. */
   1718    init_resvn(&seg, aspacem_vStart,  aspacem_vStart + VKI_PAGE_SIZE - 1);
   1719    add_segment(&seg);
   1720 
   1721    VG_(am_show_nsegments)(2, "Initial layout");
   1722 
   1723    VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
   1724    parse_procselfmaps( read_maps_callback, NULL );
   1725    /* NB: on arm-linux, parse_procselfmaps automagically kludges up
   1726       (iow, hands to its callbacks) a description of the ARM Commpage,
   1727       since that's not listed in /proc/self/maps (kernel bug IMO).  We
   1728       have to fake up its existence in parse_procselfmaps and not
   1729       merely add it here as an extra segment, because doing the latter
   1730       causes sync checking to fail: we see we have an extra segment in
   1731       the segments array, which isn't listed in /proc/self/maps.
   1732       Hence we must make it appear that /proc/self/maps contained this
   1733       segment all along.  Sigh. */
   1734 
   1735    VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
   1736 
   1737    AM_SANITY_CHECK;
   1738    return suggested_clstack_top;
   1739 }
   1740 
   1741 
   1742 /*-----------------------------------------------------------------*/
   1743 /*---                                                           ---*/
   1744 /*--- The core query-notify mechanism.                          ---*/
   1745 /*---                                                           ---*/
   1746 /*-----------------------------------------------------------------*/
   1747 
   1748 /* Query aspacem to ask where a mapping should go. */
   1749 
   1750 Addr VG_(am_get_advisory) ( MapRequest*  req,
   1751                             Bool         forClient,
   1752                             /*OUT*/Bool* ok )
   1753 {
   1754    /* This function implements allocation policy.
   1755 
   1756       The nature of the allocation request is determined by req, which
   1757       specifies the start and length of the request and indicates
   1758       whether the start address is mandatory, a hint, or irrelevant,
   1759       and by forClient, which says whether this is for the client or
   1760       for V.
   1761 
   1762       Return values: the request can be vetoed (*ok is set to False),
   1763       in which case the caller should not attempt to proceed with
   1764       making the mapping.  Otherwise, *ok is set to True, the caller
   1765       may proceed, and the preferred address at which the mapping
   1766       should happen is returned.
   1767 
   1768       Note that this is an advisory system only: the kernel can in
   1769       fact do whatever it likes as far as placement goes, and we have
   1770       no absolute control over it.
   1771 
   1772       Allocations will never be granted in a reserved area.
   1773 
   1774       The Default Policy is:
   1775 
   1776         Search the address space for two free intervals: one of them
   1777         big enough to contain the request without regard to the
   1778         specified address (viz, as if it was a floating request) and
   1779         the other being able to contain the request at the specified
   1780         address (viz, as if were a fixed request).  Then, depending on
   1781         the outcome of the search and the kind of request made, decide
   1782         whether the request is allowable and what address to advise.
   1783 
   1784       The Default Policy is overriden by Policy Exception #1:
   1785 
   1786         If the request is for a fixed client map, we are prepared to
   1787         grant it providing all areas inside the request are either
   1788         free, reservations, or mappings belonging to the client.  In
   1789         other words we are prepared to let the client trash its own
   1790         mappings if it wants to.
   1791 
   1792       The Default Policy is overriden by Policy Exception #2:
   1793 
   1794         If the request is for a hinted client map, we are prepared to
   1795         grant it providing all areas inside the request are either
   1796         free or reservations.  In other words we are prepared to let
   1797         the client have a hinted mapping anywhere it likes provided
   1798         it does not trash either any of its own mappings or any of
   1799         valgrind's mappings.
   1800    */
   1801    Int  i, j;
   1802    Addr holeStart, holeEnd, holeLen;
   1803    Bool fixed_not_required;
   1804 
   1805    Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
   1806 
   1807    Addr reqStart = req->rkind==MAny ? 0 : req->start;
   1808    Addr reqEnd   = reqStart + req->len - 1;
   1809    Addr reqLen   = req->len;
   1810 
   1811    /* These hold indices for segments found during search, or -1 if not
   1812       found. */
   1813    Int floatIdx = -1;
   1814    Int fixedIdx = -1;
   1815 
   1816    aspacem_assert(nsegments_used > 0);
   1817 
   1818    if (0) {
   1819       VG_(am_show_nsegments)(0,"getAdvisory");
   1820       VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
   1821                       (ULong)req->start, (ULong)req->len);
   1822    }
   1823 
   1824    /* Reject zero-length requests */
   1825    if (req->len == 0) {
   1826       *ok = False;
   1827       return 0;
   1828    }
   1829 
   1830    /* Reject wraparounds */
   1831    if ((req->rkind==MFixed || req->rkind==MHint)
   1832        && req->start + req->len < req->start) {
   1833       *ok = False;
   1834       return 0;
   1835    }
   1836 
   1837    /* ------ Implement Policy Exception #1 ------ */
   1838 
   1839    if (forClient && req->rkind == MFixed) {
   1840       Int  iLo   = find_nsegment_idx(reqStart);
   1841       Int  iHi   = find_nsegment_idx(reqEnd);
   1842       Bool allow = True;
   1843       for (i = iLo; i <= iHi; i++) {
   1844          if (nsegments[i].kind == SkFree
   1845              || nsegments[i].kind == SkFileC
   1846              || nsegments[i].kind == SkAnonC
   1847              || nsegments[i].kind == SkShmC
   1848              || nsegments[i].kind == SkResvn) {
   1849             /* ok */
   1850          } else {
   1851             allow = False;
   1852             break;
   1853          }
   1854       }
   1855       if (allow) {
   1856          /* Acceptable.  Granted. */
   1857          *ok = True;
   1858          return reqStart;
   1859       }
   1860       /* Not acceptable.  Fail. */
   1861       *ok = False;
   1862       return 0;
   1863    }
   1864 
   1865    /* ------ Implement Policy Exception #2 ------ */
   1866 
   1867    if (forClient && req->rkind == MHint) {
   1868       Int  iLo   = find_nsegment_idx(reqStart);
   1869       Int  iHi   = find_nsegment_idx(reqEnd);
   1870       Bool allow = True;
   1871       for (i = iLo; i <= iHi; i++) {
   1872          if (nsegments[i].kind == SkFree
   1873              || nsegments[i].kind == SkResvn) {
   1874             /* ok */
   1875          } else {
   1876             allow = False;
   1877             break;
   1878          }
   1879       }
   1880       if (allow) {
   1881          /* Acceptable.  Granted. */
   1882          *ok = True;
   1883          return reqStart;
   1884       }
   1885       /* Not acceptable.  Fall through to the default policy. */
   1886    }
   1887 
   1888    /* ------ Implement the Default Policy ------ */
   1889 
   1890    /* Don't waste time looking for a fixed match if not requested to. */
   1891    fixed_not_required = req->rkind == MAny;
   1892 
   1893    i = find_nsegment_idx(startPoint);
   1894 
   1895    /* Examine holes from index i back round to i-1.  Record the
   1896       index first fixed hole and the first floating hole which would
   1897       satisfy the request. */
   1898    for (j = 0; j < nsegments_used; j++) {
   1899 
   1900       if (nsegments[i].kind != SkFree) {
   1901          i++;
   1902          if (i >= nsegments_used) i = 0;
   1903          continue;
   1904       }
   1905 
   1906       holeStart = nsegments[i].start;
   1907       holeEnd   = nsegments[i].end;
   1908 
   1909       /* Stay sane .. */
   1910       aspacem_assert(holeStart <= holeEnd);
   1911       aspacem_assert(aspacem_minAddr <= holeStart);
   1912       aspacem_assert(holeEnd <= aspacem_maxAddr);
   1913 
   1914       /* See if it's any use to us. */
   1915       holeLen = holeEnd - holeStart + 1;
   1916 
   1917       if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
   1918          fixedIdx = i;
   1919 
   1920       if (floatIdx == -1 && holeLen >= reqLen)
   1921          floatIdx = i;
   1922 
   1923       /* Don't waste time searching once we've found what we wanted. */
   1924       if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
   1925          break;
   1926 
   1927       i++;
   1928       if (i >= nsegments_used) i = 0;
   1929    }
   1930 
   1931    aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
   1932    if (fixedIdx >= 0)
   1933       aspacem_assert(nsegments[fixedIdx].kind == SkFree);
   1934 
   1935    aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
   1936    if (floatIdx >= 0)
   1937       aspacem_assert(nsegments[floatIdx].kind == SkFree);
   1938 
   1939    AM_SANITY_CHECK;
   1940 
   1941    /* Now see if we found anything which can satisfy the request. */
   1942    switch (req->rkind) {
   1943       case MFixed:
   1944          if (fixedIdx >= 0) {
   1945             *ok = True;
   1946             return req->start;
   1947          } else {
   1948             *ok = False;
   1949             return 0;
   1950          }
   1951          break;
   1952       case MHint:
   1953          if (fixedIdx >= 0) {
   1954             *ok = True;
   1955             return req->start;
   1956          }
   1957          if (floatIdx >= 0) {
   1958             *ok = True;
   1959             return nsegments[floatIdx].start;
   1960          }
   1961          *ok = False;
   1962          return 0;
   1963       case MAny:
   1964          if (floatIdx >= 0) {
   1965             *ok = True;
   1966             return nsegments[floatIdx].start;
   1967          }
   1968          *ok = False;
   1969          return 0;
   1970       default:
   1971          break;
   1972    }
   1973 
   1974    /*NOTREACHED*/
   1975    ML_(am_barf)("getAdvisory: unknown request kind");
   1976    *ok = False;
   1977    return 0;
   1978 }
   1979 
   1980 /* Convenience wrapper for VG_(am_get_advisory) for client floating or
   1981    fixed requests.  If start is zero, a floating request is issued; if
   1982    nonzero, a fixed request at that address is issued.  Same comments
   1983    about return values apply. */
   1984 
   1985 Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
   1986                                           /*OUT*/Bool* ok )
   1987 {
   1988    MapRequest mreq;
   1989    mreq.rkind = start==0 ? MAny : MFixed;
   1990    mreq.start = start;
   1991    mreq.len   = len;
   1992    return VG_(am_get_advisory)( &mreq, True/*client*/, ok );
   1993 }
   1994 
   1995 
   1996 /* Notifies aspacem that the client completed an mmap successfully.
   1997    The segment array is updated accordingly.  If the returned Bool is
   1998    True, the caller should immediately discard translations from the
   1999    specified address range. */
   2000 
   2001 Bool
   2002 VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
   2003                             Int fd, Off64T offset )
   2004 {
   2005    HChar    buf[VKI_PATH_MAX];
   2006    ULong    dev, ino;
   2007    UInt     mode;
   2008    NSegment seg;
   2009    Bool     needDiscard;
   2010 
   2011    aspacem_assert(len > 0);
   2012    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
   2013    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2014    aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
   2015 
   2016    /* Discard is needed if any of the just-trashed range had T. */
   2017    needDiscard = any_Ts_in_range( a, len );
   2018 
   2019    init_nsegment( &seg );
   2020    seg.kind   = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
   2021    seg.start  = a;
   2022    seg.end    = a + len - 1;
   2023    seg.hasR   = toBool(prot & VKI_PROT_READ);
   2024    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   2025    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   2026    if (!(flags & VKI_MAP_ANONYMOUS)) {
   2027       // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
   2028       seg.offset = offset;
   2029       if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
   2030          seg.dev = dev;
   2031          seg.ino = ino;
   2032          seg.mode = mode;
   2033       }
   2034       if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
   2035          seg.fnIdx = allocate_segname( buf );
   2036       }
   2037    }
   2038    add_segment( &seg );
   2039    AM_SANITY_CHECK;
   2040    return needDiscard;
   2041 }
   2042 
   2043 /* Notifies aspacem that the client completed a shmat successfully.
   2044    The segment array is updated accordingly.  If the returned Bool is
   2045    True, the caller should immediately discard translations from the
   2046    specified address range. */
   2047 
   2048 Bool
   2049 VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
   2050 {
   2051    NSegment seg;
   2052    Bool     needDiscard;
   2053 
   2054    aspacem_assert(len > 0);
   2055    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
   2056    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2057 
   2058    /* Discard is needed if any of the just-trashed range had T. */
   2059    needDiscard = any_Ts_in_range( a, len );
   2060 
   2061    init_nsegment( &seg );
   2062    seg.kind   = SkShmC;
   2063    seg.start  = a;
   2064    seg.end    = a + len - 1;
   2065    seg.offset = 0;
   2066    seg.hasR   = toBool(prot & VKI_PROT_READ);
   2067    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   2068    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   2069    add_segment( &seg );
   2070    AM_SANITY_CHECK;
   2071    return needDiscard;
   2072 }
   2073 
   2074 /* Notifies aspacem that an mprotect was completed successfully.  The
   2075    segment array is updated accordingly.  Note, as with
   2076    VG_(am_notify_munmap), it is not the job of this function to reject
   2077    stupid mprotects, for example the client doing mprotect of
   2078    non-client areas.  Such requests should be intercepted earlier, by
   2079    the syscall wrapper for mprotect.  This function merely records
   2080    whatever it is told.  If the returned Bool is True, the caller
   2081    should immediately discard translations from the specified address
   2082    range. */
   2083 
   2084 Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
   2085 {
   2086    Int  i, iLo, iHi;
   2087    Bool newR, newW, newX, needDiscard;
   2088 
   2089    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   2090    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2091 
   2092    if (len == 0)
   2093       return False;
   2094 
   2095    newR = toBool(prot & VKI_PROT_READ);
   2096    newW = toBool(prot & VKI_PROT_WRITE);
   2097    newX = toBool(prot & VKI_PROT_EXEC);
   2098 
   2099    /* Discard is needed if we're dumping X permission */
   2100    needDiscard = any_Ts_in_range( start, len ) && !newX;
   2101 
   2102    split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
   2103 
   2104    iLo = find_nsegment_idx(start);
   2105    iHi = find_nsegment_idx(start + len - 1);
   2106 
   2107    for (i = iLo; i <= iHi; i++) {
   2108       /* Apply the permissions to all relevant segments. */
   2109       switch (nsegments[i].kind) {
   2110          case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
   2111             nsegments[i].hasR = newR;
   2112             nsegments[i].hasW = newW;
   2113             nsegments[i].hasX = newX;
   2114             aspacem_assert(sane_NSegment(&nsegments[i]));
   2115             break;
   2116          default:
   2117             break;
   2118       }
   2119    }
   2120 
   2121    /* Changing permissions could have made previously un-mergable
   2122       segments mergeable.  Therefore have to re-preen them. */
   2123    (void)preen_nsegments();
   2124    AM_SANITY_CHECK;
   2125    return needDiscard;
   2126 }
   2127 
   2128 
   2129 /* Notifies aspacem that an munmap completed successfully.  The
   2130    segment array is updated accordingly.  As with
   2131    VG_(am_notify_munmap), we merely record the given info, and don't
   2132    check it for sensibleness.  If the returned Bool is True, the
   2133    caller should immediately discard translations from the specified
   2134    address range. */
   2135 
   2136 Bool VG_(am_notify_munmap)( Addr start, SizeT len )
   2137 {
   2138    NSegment seg;
   2139    Bool     needDiscard;
   2140    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   2141    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2142 
   2143    if (len == 0)
   2144       return False;
   2145 
   2146    needDiscard = any_Ts_in_range( start, len );
   2147 
   2148    init_nsegment( &seg );
   2149    seg.start = start;
   2150    seg.end   = start + len - 1;
   2151 
   2152    /* The segment becomes unused (free).  Segments from above
   2153       aspacem_maxAddr were originally SkResvn and so we make them so
   2154       again.  Note, this isn't really right when the segment straddles
   2155       the aspacem_maxAddr boundary - then really it should be split in
   2156       two, the lower part marked as SkFree and the upper part as
   2157       SkResvn.  Ah well. */
   2158    if (start > aspacem_maxAddr
   2159        && /* check previous comparison is meaningful */
   2160           aspacem_maxAddr < Addr_MAX)
   2161       seg.kind = SkResvn;
   2162    else
   2163    /* Ditto for segments from below aspacem_minAddr. */
   2164    if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
   2165       seg.kind = SkResvn;
   2166    else
   2167       seg.kind = SkFree;
   2168 
   2169    add_segment( &seg );
   2170 
   2171    /* Unmapping could create two adjacent free segments, so a preen is
   2172       needed.  add_segment() will do that, so no need to here. */
   2173    AM_SANITY_CHECK;
   2174    return needDiscard;
   2175 }
   2176 
   2177 
   2178 /*-----------------------------------------------------------------*/
   2179 /*---                                                           ---*/
   2180 /*--- Handling mappings which do not arise directly from the    ---*/
   2181 /*--- simulation of the client.                                 ---*/
   2182 /*---                                                           ---*/
   2183 /*-----------------------------------------------------------------*/
   2184 
   2185 /* --- --- --- map, unmap, protect  --- --- --- */
   2186 
   2187 /* Map a file at a fixed address for the client, and update the
   2188    segment array accordingly. */
   2189 
   2190 SysRes VG_(am_mmap_file_fixed_client)
   2191      ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
   2192 {
   2193    return VG_(am_mmap_named_file_fixed_client)(start, length, prot, fd, offset, NULL);
   2194 }
   2195 
   2196 SysRes VG_(am_mmap_named_file_fixed_client)
   2197      ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset, const HChar *name )
   2198 {
   2199    SysRes     sres;
   2200    NSegment   seg;
   2201    Addr       advised;
   2202    Bool       ok;
   2203    MapRequest req;
   2204    ULong      dev, ino;
   2205    UInt       mode;
   2206    HChar      buf[VKI_PATH_MAX];
   2207 
   2208    /* Not allowable. */
   2209    if (length == 0
   2210        || !VG_IS_PAGE_ALIGNED(start)
   2211        || !VG_IS_PAGE_ALIGNED(offset))
   2212       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2213 
   2214    /* Ask for an advisory.  If it's negative, fail immediately. */
   2215    req.rkind = MFixed;
   2216    req.start = start;
   2217    req.len   = length;
   2218    advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
   2219    if (!ok || advised != start)
   2220       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2221 
   2222    /* We have been advised that the mapping is allowable at the
   2223       specified address.  So hand it off to the kernel, and propagate
   2224       any resulting failure immediately. */
   2225    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2226    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2227              start, length, prot,
   2228              VKI_MAP_FIXED|VKI_MAP_PRIVATE,
   2229              fd, offset
   2230           );
   2231    if (sr_isError(sres))
   2232       return sres;
   2233 
   2234    if (sr_Res(sres) != start) {
   2235       /* I don't think this can happen.  It means the kernel made a
   2236          fixed map succeed but not at the requested location.  Try to
   2237          repair the damage, then return saying the mapping failed. */
   2238       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2239       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2240    }
   2241 
   2242    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2243    init_nsegment( &seg );
   2244    seg.kind   = SkFileC;
   2245    seg.start  = start;
   2246    seg.end    = seg.start + VG_PGROUNDUP(length) - 1;
   2247    seg.offset = offset;
   2248    seg.hasR   = toBool(prot & VKI_PROT_READ);
   2249    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   2250    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   2251    if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
   2252       seg.dev = dev;
   2253       seg.ino = ino;
   2254       seg.mode = mode;
   2255    }
   2256    if (name) {
   2257       seg.fnIdx = allocate_segname( name );
   2258    } else if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
   2259       seg.fnIdx = allocate_segname( buf );
   2260    }
   2261    add_segment( &seg );
   2262 
   2263    AM_SANITY_CHECK;
   2264    return sres;
   2265 }
   2266 
   2267 
   2268 /* Map anonymously at a fixed address for the client, and update
   2269    the segment array accordingly. */
   2270 
   2271 SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
   2272 {
   2273    SysRes     sres;
   2274    NSegment   seg;
   2275    Addr       advised;
   2276    Bool       ok;
   2277    MapRequest req;
   2278 
   2279    /* Not allowable. */
   2280    if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
   2281       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2282 
   2283    /* Ask for an advisory.  If it's negative, fail immediately. */
   2284    req.rkind = MFixed;
   2285    req.start = start;
   2286    req.len   = length;
   2287    advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
   2288    if (!ok || advised != start)
   2289       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2290 
   2291    /* We have been advised that the mapping is allowable at the
   2292       specified address.  So hand it off to the kernel, and propagate
   2293       any resulting failure immediately. */
   2294    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2295    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2296              start, length, prot,
   2297              VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2298              0, 0
   2299           );
   2300    if (sr_isError(sres))
   2301       return sres;
   2302 
   2303    if (sr_Res(sres) != start) {
   2304       /* I don't think this can happen.  It means the kernel made a
   2305          fixed map succeed but not at the requested location.  Try to
   2306          repair the damage, then return saying the mapping failed. */
   2307       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2308       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2309    }
   2310 
   2311    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2312    init_nsegment( &seg );
   2313    seg.kind  = SkAnonC;
   2314    seg.start = start;
   2315    seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
   2316    seg.hasR  = toBool(prot & VKI_PROT_READ);
   2317    seg.hasW  = toBool(prot & VKI_PROT_WRITE);
   2318    seg.hasX  = toBool(prot & VKI_PROT_EXEC);
   2319    add_segment( &seg );
   2320 
   2321    AM_SANITY_CHECK;
   2322    return sres;
   2323 }
   2324 
   2325 
   2326 /* Map anonymously at an unconstrained address for the client, and
   2327    update the segment array accordingly.  */
   2328 
   2329 SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
   2330 {
   2331    SysRes     sres;
   2332    NSegment   seg;
   2333    Addr       advised;
   2334    Bool       ok;
   2335    MapRequest req;
   2336 
   2337    /* Not allowable. */
   2338    if (length == 0)
   2339       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2340 
   2341    /* Ask for an advisory.  If it's negative, fail immediately. */
   2342    req.rkind = MAny;
   2343    req.start = 0;
   2344    req.len   = length;
   2345    advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
   2346    if (!ok)
   2347       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2348 
   2349    /* We have been advised that the mapping is allowable at the
   2350       advised address.  So hand it off to the kernel, and propagate
   2351       any resulting failure immediately. */
   2352    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2353    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2354              advised, length, prot,
   2355              VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2356              0, 0
   2357           );
   2358    if (sr_isError(sres))
   2359       return sres;
   2360 
   2361    if (sr_Res(sres) != advised) {
   2362       /* I don't think this can happen.  It means the kernel made a
   2363          fixed map succeed but not at the requested location.  Try to
   2364          repair the damage, then return saying the mapping failed. */
   2365       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2366       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2367    }
   2368 
   2369    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2370    init_nsegment( &seg );
   2371    seg.kind  = SkAnonC;
   2372    seg.start = advised;
   2373    seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
   2374    seg.hasR  = toBool(prot & VKI_PROT_READ);
   2375    seg.hasW  = toBool(prot & VKI_PROT_WRITE);
   2376    seg.hasX  = toBool(prot & VKI_PROT_EXEC);
   2377    add_segment( &seg );
   2378 
   2379    AM_SANITY_CHECK;
   2380    return sres;
   2381 }
   2382 
   2383 
   2384 /* Similarly, acquire new address space for the client but with
   2385    considerable restrictions on what can be done with it: (1) the
   2386    actual protections may exceed those stated in 'prot', (2) the
   2387    area's protections cannot be later changed using any form of
   2388    mprotect, and (3) the area cannot be freed using any form of
   2389    munmap.  On Linux this behaves the same as
   2390    VG_(am_mmap_anon_float_client).  On AIX5 this *may* allocate memory
   2391    by using sbrk, so as to make use of large pages on AIX. */
   2392 
   2393 SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot )
   2394 {
   2395    return VG_(am_mmap_anon_float_client) ( length, prot );
   2396 }
   2397 
   2398 
   2399 /* Map anonymously at an unconstrained address for V, and update the
   2400    segment array accordingly.  This is fundamentally how V allocates
   2401    itself more address space when needed. */
   2402 
   2403 SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
   2404 {
   2405    SysRes     sres;
   2406    NSegment   seg;
   2407    Addr       advised;
   2408    Bool       ok;
   2409    MapRequest req;
   2410 
   2411    /* Not allowable. */
   2412    if (length == 0)
   2413       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2414 
   2415    /* Ask for an advisory.  If it's negative, fail immediately. */
   2416    req.rkind = MAny;
   2417    req.start = 0;
   2418    req.len   = length;
   2419    advised = VG_(am_get_advisory)( &req, False/*valgrind*/, &ok );
   2420    if (!ok)
   2421       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2422 
   2423 // On Darwin, for anonymous maps you can pass in a tag which is used by
   2424 // programs like vmmap for statistical purposes.
   2425 #ifndef VM_TAG_VALGRIND
   2426 #  define VM_TAG_VALGRIND 0
   2427 #endif
   2428 
   2429    /* We have been advised that the mapping is allowable at the
   2430       specified address.  So hand it off to the kernel, and propagate
   2431       any resulting failure immediately. */
   2432    /* GrP fixme darwin: use advisory as a hint only, otherwise syscall in
   2433       another thread can pre-empt our spot.  [At one point on the DARWIN
   2434       branch the VKI_MAP_FIXED was commented out;  unclear if this is
   2435       necessary or not given the second Darwin-only call that immediately
   2436       follows if this one fails.  --njn] */
   2437    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2438              advised, length,
   2439              VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
   2440              VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2441              VM_TAG_VALGRIND, 0
   2442           );
   2443 #if defined(VGO_darwin)
   2444    if (sr_isError(sres)) {
   2445        /* try again, ignoring the advisory */
   2446        sres = VG_(am_do_mmap_NO_NOTIFY)(
   2447              0, length,
   2448              VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
   2449              /*VKI_MAP_FIXED|*/VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2450              VM_TAG_VALGRIND, 0
   2451           );
   2452    }
   2453 #endif
   2454    if (sr_isError(sres))
   2455       return sres;
   2456 
   2457 #if defined(VGO_linux)
   2458    if (sr_Res(sres) != advised) {
   2459       /* I don't think this can happen.  It means the kernel made a
   2460          fixed map succeed but not at the requested location.  Try to
   2461          repair the damage, then return saying the mapping failed. */
   2462       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2463       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2464    }
   2465 #endif
   2466 
   2467    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2468    init_nsegment( &seg );
   2469    seg.kind  = SkAnonV;
   2470    seg.start = sr_Res(sres);
   2471    seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
   2472    seg.hasR  = True;
   2473    seg.hasW  = True;
   2474    seg.hasX  = True;
   2475    add_segment( &seg );
   2476 
   2477    AM_SANITY_CHECK;
   2478    return sres;
   2479 }
   2480 
   2481 /* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
   2482 
   2483 void* VG_(am_shadow_alloc)(SizeT size)
   2484 {
   2485    SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
   2486    return sr_isError(sres) ? NULL : (void*)sr_Res(sres);
   2487 }
   2488 
   2489 /* Same comments apply as per VG_(am_sbrk_anon_float_client).  On
   2490    Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
   2491 
   2492 SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT cszB )
   2493 {
   2494    return VG_(am_mmap_anon_float_valgrind)( cszB );
   2495 }
   2496 
   2497 
   2498 /* Map a file at an unconstrained address for V, and update the
   2499    segment array accordingly.  This is used by V for transiently
   2500    mapping in object files to read their debug info.  */
   2501 
   2502 SysRes VG_(am_mmap_file_float_valgrind_with_flags) ( SizeT length, UInt prot,
   2503                                                      UInt flags,
   2504                                                      Int fd, Off64T offset )
   2505 {
   2506    SysRes     sres;
   2507    NSegment   seg;
   2508    Addr       advised;
   2509    Bool       ok;
   2510    MapRequest req;
   2511    ULong      dev, ino;
   2512    UInt       mode;
   2513    HChar      buf[VKI_PATH_MAX];
   2514 
   2515    /* Not allowable. */
   2516    if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
   2517       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2518 
   2519    /* Ask for an advisory.  If it's negative, fail immediately. */
   2520    req.rkind = MAny;
   2521    req.start = 0;
   2522    req.len   = length;
   2523    advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
   2524    if (!ok)
   2525       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2526 
   2527    /* We have been advised that the mapping is allowable at the
   2528       specified address.  So hand it off to the kernel, and propagate
   2529       any resulting failure immediately. */
   2530    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2531              advised, length, prot,
   2532              flags,
   2533              fd, offset
   2534           );
   2535    if (sr_isError(sres))
   2536       return sres;
   2537 
   2538    if (sr_Res(sres) != advised) {
   2539       /* I don't think this can happen.  It means the kernel made a
   2540          fixed map succeed but not at the requested location.  Try to
   2541          repair the damage, then return saying the mapping failed. */
   2542        /*TODO(kcc): it apprers this may actually happen if allocating
   2543         in hugetlbfs. No idea why. */
   2544 //      (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2545 //      return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2546    }
   2547 
   2548    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2549    init_nsegment( &seg );
   2550    seg.kind   = SkFileV;
   2551    seg.start  = sr_Res(sres);
   2552    seg.end    = seg.start + VG_PGROUNDUP(length) - 1;
   2553    seg.offset = offset;
   2554    seg.hasR   = toBool(prot & VKI_PROT_READ);
   2555    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   2556    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   2557    if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
   2558       seg.dev  = dev;
   2559       seg.ino  = ino;
   2560       seg.mode = mode;
   2561    }
   2562    if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
   2563       seg.fnIdx = allocate_segname( buf );
   2564    }
   2565    add_segment( &seg );
   2566 
   2567    AM_SANITY_CHECK;
   2568    return sres;
   2569 }
   2570 
   2571 SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
   2572                                           Int fd, Off64T offset ) {
   2573   return VG_(am_mmap_file_float_valgrind_with_flags) (
   2574       length, prot, VKI_MAP_FIXED|VKI_MAP_PRIVATE, fd, offset);
   2575 }
   2576 
   2577 
   2578 /* --- --- munmap helper --- --- */
   2579 
   2580 static
   2581 SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
   2582                             Addr start, SizeT len, Bool forClient )
   2583 {
   2584    Bool   d;
   2585    SysRes sres;
   2586 
   2587    if (!VG_IS_PAGE_ALIGNED(start))
   2588       goto eINVAL;
   2589 
   2590    if (len == 0) {
   2591       *need_discard = False;
   2592       return VG_(mk_SysRes_Success)( 0 );
   2593    }
   2594 
   2595    if (start + len < len)
   2596       goto eINVAL;
   2597 
   2598    len = VG_PGROUNDUP(len);
   2599    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   2600    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2601 
   2602    if (forClient) {
   2603       if (!VG_(am_is_valid_for_client_or_free_or_resvn)
   2604             ( start, len, VKI_PROT_NONE ))
   2605          goto eINVAL;
   2606    } else {
   2607       if (!is_valid_for_valgrind( start, len ))
   2608          goto eINVAL;
   2609    }
   2610 
   2611    d = any_Ts_in_range( start, len );
   2612 
   2613    sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
   2614    if (sr_isError(sres))
   2615       return sres;
   2616 
   2617    VG_(am_notify_munmap)( start, len );
   2618    AM_SANITY_CHECK;
   2619    *need_discard = d;
   2620    return sres;
   2621 
   2622   eINVAL:
   2623    return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2624 }
   2625 
   2626 /* Unmap the given address range and update the segment array
   2627    accordingly.  This fails if the range isn't valid for the client.
   2628    If *need_discard is True after a successful return, the caller
   2629    should immediately discard translations from the specified address
   2630    range. */
   2631 
   2632 SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
   2633                               Addr start, SizeT len )
   2634 {
   2635    return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
   2636 }
   2637 
   2638 /* Unmap the given address range and update the segment array
   2639    accordingly.  This fails if the range isn't valid for valgrind. */
   2640 
   2641 SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
   2642 {
   2643    Bool need_discard;
   2644    SysRes r = am_munmap_both_wrk( &need_discard,
   2645                                   start, len, False/*valgrind*/ );
   2646    /* If this assertion fails, it means we allowed translations to be
   2647       made from a V-owned section.  Which shouldn't happen. */
   2648    if (!sr_isError(r))
   2649       aspacem_assert(!need_discard);
   2650    return r;
   2651 }
   2652 
   2653 /* Let (start,len) denote an area within a single Valgrind-owned
   2654   segment (anon or file).  Change the ownership of [start, start+len)
   2655   to the client instead.  Fails if (start,len) does not denote a
   2656   suitable segment. */
   2657 
   2658 Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
   2659 {
   2660    Int i, iLo, iHi;
   2661 
   2662    if (len == 0)
   2663       return True;
   2664    if (start + len < start)
   2665       return False;
   2666    if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
   2667       return False;
   2668 
   2669    i = find_nsegment_idx(start);
   2670    if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
   2671       return False;
   2672    if (start+len-1 > nsegments[i].end)
   2673       return False;
   2674 
   2675    aspacem_assert(start >= nsegments[i].start);
   2676    aspacem_assert(start+len-1 <= nsegments[i].end);
   2677 
   2678    /* This scheme is like how mprotect works: split the to-be-changed
   2679       range into its own segment(s), then mess with them (it).  There
   2680       should be only one. */
   2681    split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
   2682    aspacem_assert(iLo == iHi);
   2683    switch (nsegments[iLo].kind) {
   2684       case SkFileV: nsegments[iLo].kind = SkFileC; break;
   2685       case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
   2686       default: aspacem_assert(0); /* can't happen - guarded above */
   2687    }
   2688 
   2689    preen_nsegments();
   2690    return True;
   2691 }
   2692 
   2693 /* 'seg' must be NULL or have been obtained from
   2694    VG_(am_find_nsegment), and still valid.  If non-NULL, and if it
   2695    denotes a SkAnonC (anonymous client mapping) area, set the .isCH
   2696    (is-client-heap) flag for that area.  Otherwise do nothing.
   2697    (Bizarre interface so that the same code works for both Linux and
   2698    AIX and does not impose inefficiencies on the Linux version.) */
   2699 void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg )
   2700 {
   2701    Int i = segAddr_to_index( seg );
   2702    aspacem_assert(i >= 0 && i < nsegments_used);
   2703    if (nsegments[i].kind == SkAnonC) {
   2704       nsegments[i].isCH = True;
   2705    } else {
   2706       aspacem_assert(nsegments[i].isCH == False);
   2707    }
   2708 }
   2709 
   2710 /* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
   2711    segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
   2712    segment. */
   2713 void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* seg )
   2714 {
   2715    Int i = segAddr_to_index( seg );
   2716    aspacem_assert(i >= 0 && i < nsegments_used);
   2717    if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkFileC) {
   2718       nsegments[i].hasT = True;
   2719    }
   2720 }
   2721 
   2722 
   2723 /* --- --- --- reservations --- --- --- */
   2724 
   2725 /* Create a reservation from START .. START+LENGTH-1, with the given
   2726    ShrinkMode.  When checking whether the reservation can be created,
   2727    also ensure that at least abs(EXTRA) extra free bytes will remain
   2728    above (> 0) or below (< 0) the reservation.
   2729 
   2730    The reservation will only be created if it, plus the extra-zone,
   2731    falls entirely within a single free segment.  The returned Bool
   2732    indicates whether the creation succeeded. */
   2733 
   2734 Bool VG_(am_create_reservation) ( Addr start, SizeT length,
   2735                                   ShrinkMode smode, SSizeT extra )
   2736 {
   2737    Int      startI, endI;
   2738    NSegment seg;
   2739 
   2740    /* start and end, not taking into account the extra space. */
   2741    Addr start1 = start;
   2742    Addr end1   = start + length - 1;
   2743 
   2744    /* start and end, taking into account the extra space. */
   2745    Addr start2 = start1;
   2746    Addr end2   = end1;
   2747 
   2748    if (extra < 0) start2 += extra; // this moves it down :-)
   2749    if (extra > 0) end2 += extra;
   2750 
   2751    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   2752    aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
   2753    aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
   2754    aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
   2755 
   2756    startI = find_nsegment_idx( start2 );
   2757    endI = find_nsegment_idx( end2 );
   2758 
   2759    /* If the start and end points don't fall within the same (free)
   2760       segment, we're hosed.  This does rely on the assumption that all
   2761       mergeable adjacent segments can be merged, but add_segment()
   2762       should ensure that. */
   2763    if (startI != endI)
   2764       return False;
   2765 
   2766    if (nsegments[startI].kind != SkFree)
   2767       return False;
   2768 
   2769    /* Looks good - make the reservation. */
   2770    aspacem_assert(nsegments[startI].start <= start2);
   2771    aspacem_assert(end2 <= nsegments[startI].end);
   2772 
   2773    init_nsegment( &seg );
   2774    seg.kind  = SkResvn;
   2775    seg.start = start1;  /* NB: extra space is not included in the
   2776                            reservation. */
   2777    seg.end   = end1;
   2778    seg.smode = smode;
   2779    add_segment( &seg );
   2780 
   2781    AM_SANITY_CHECK;
   2782    return True;
   2783 }
   2784 
   2785 
   2786 /* Let SEG be an anonymous client mapping.  This fn extends the
   2787    mapping by DELTA bytes, taking the space from a reservation section
   2788    which must be adjacent.  If DELTA is positive, the segment is
   2789    extended forwards in the address space, and the reservation must be
   2790    the next one along.  If DELTA is negative, the segment is extended
   2791    backwards in the address space and the reservation must be the
   2792    previous one.  DELTA must be page aligned.  abs(DELTA) must not
   2793    exceed the size of the reservation segment minus one page, that is,
   2794    the reservation segment after the operation must be at least one
   2795    page long. */
   2796 
   2797 Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
   2798                                                        SSizeT    delta )
   2799 {
   2800    Int    segA, segR;
   2801    UInt   prot;
   2802    SysRes sres;
   2803 
   2804    /* Find the segment array index for SEG.  If the assertion fails it
   2805       probably means you passed in a bogus SEG. */
   2806    segA = segAddr_to_index( seg );
   2807    aspacem_assert(segA >= 0 && segA < nsegments_used);
   2808 
   2809    if (nsegments[segA].kind != SkAnonC)
   2810       return False;
   2811 
   2812    if (delta == 0)
   2813       return True;
   2814 
   2815    prot =   (nsegments[segA].hasR ? VKI_PROT_READ : 0)
   2816           | (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
   2817           | (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
   2818 
   2819    aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
   2820 
   2821    if (delta > 0) {
   2822 
   2823       /* Extending the segment forwards. */
   2824       segR = segA+1;
   2825       if (segR >= nsegments_used
   2826           || nsegments[segR].kind != SkResvn
   2827           || nsegments[segR].smode != SmLower
   2828           || nsegments[segR].start != nsegments[segA].end + 1
   2829           || delta + VKI_PAGE_SIZE
   2830                 > (nsegments[segR].end - nsegments[segR].start + 1))
   2831         return False;
   2832 
   2833       /* Extend the kernel's mapping. */
   2834       // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2835       sres = VG_(am_do_mmap_NO_NOTIFY)(
   2836                 nsegments[segR].start, delta,
   2837                 prot,
   2838                 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2839                 0, 0
   2840              );
   2841       if (sr_isError(sres))
   2842          return False; /* kernel bug if this happens? */
   2843       if (sr_Res(sres) != nsegments[segR].start) {
   2844          /* kernel bug if this happens? */
   2845         (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
   2846         return False;
   2847       }
   2848 
   2849       /* Ok, success with the kernel.  Update our structures. */
   2850       nsegments[segR].start += delta;
   2851       nsegments[segA].end += delta;
   2852       aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
   2853 
   2854    } else {
   2855 
   2856       /* Extending the segment backwards. */
   2857       delta = -delta;
   2858       aspacem_assert(delta > 0);
   2859 
   2860       segR = segA-1;
   2861       if (segR < 0
   2862           || nsegments[segR].kind != SkResvn
   2863           || nsegments[segR].smode != SmUpper
   2864           || nsegments[segR].end + 1 != nsegments[segA].start
   2865           || delta + VKI_PAGE_SIZE
   2866                 > (nsegments[segR].end - nsegments[segR].start + 1))
   2867         return False;
   2868 
   2869       /* Extend the kernel's mapping. */
   2870       // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2871       sres = VG_(am_do_mmap_NO_NOTIFY)(
   2872                 nsegments[segA].start-delta, delta,
   2873                 prot,
   2874                 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2875                 0, 0
   2876              );
   2877       if (sr_isError(sres))
   2878          return False; /* kernel bug if this happens? */
   2879       if (sr_Res(sres) != nsegments[segA].start-delta) {
   2880          /* kernel bug if this happens? */
   2881         (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
   2882         return False;
   2883       }
   2884 
   2885       /* Ok, success with the kernel.  Update our structures. */
   2886       nsegments[segR].end -= delta;
   2887       nsegments[segA].start -= delta;
   2888       aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
   2889 
   2890    }
   2891 
   2892    AM_SANITY_CHECK;
   2893    return True;
   2894 }
   2895 
   2896 
   2897 /* --- --- --- resizing/move a mapping --- --- --- */
   2898 
   2899 #if HAVE_MREMAP
   2900 
   2901 /* Let SEG be a client mapping (anonymous or file).  This fn extends
   2902    the mapping forwards only by DELTA bytes, and trashes whatever was
   2903    in the new area.  Fails if SEG is not a single client mapping or if
   2904    the new area is not accessible to the client.  Fails if DELTA is
   2905    not page aligned.  *seg is invalid after a successful return.  If
   2906    *need_discard is True after a successful return, the caller should
   2907    immediately discard translations from the new area. */
   2908 
   2909 Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
   2910                                 NSegment* seg, SizeT delta )
   2911 {
   2912    Addr     xStart;
   2913    SysRes   sres;
   2914    NSegment seg_copy = *seg;
   2915    SizeT    seg_old_len = seg->end + 1 - seg->start;
   2916 
   2917    if (0)
   2918       VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
   2919 
   2920    if (seg->kind != SkFileC && seg->kind != SkAnonC)
   2921       return False;
   2922 
   2923    if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
   2924       return False;
   2925 
   2926    xStart = seg->end+1;
   2927    if (xStart + delta < delta)
   2928       return False;
   2929 
   2930    if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
   2931                                                       VKI_PROT_NONE ))
   2932       return False;
   2933 
   2934    AM_SANITY_CHECK;
   2935    sres = ML_(am_do_extend_mapping_NO_NOTIFY)( seg->start,
   2936                                                seg_old_len,
   2937                                                seg_old_len + delta );
   2938    if (sr_isError(sres)) {
   2939       AM_SANITY_CHECK;
   2940       return False;
   2941    } else {
   2942       /* the area must not have moved */
   2943       aspacem_assert(sr_Res(sres) == seg->start);
   2944    }
   2945 
   2946    *need_discard = any_Ts_in_range( seg_copy.end+1, delta );
   2947 
   2948    seg_copy.end += delta;
   2949    add_segment( &seg_copy );
   2950 
   2951    if (0)
   2952       VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
   2953 
   2954    AM_SANITY_CHECK;
   2955    return True;
   2956 }
   2957 
   2958 
   2959 /* Remap the old address range to the new address range.  Fails if any
   2960    parameter is not page aligned, if the either size is zero, if any
   2961    wraparound is implied, if the old address range does not fall
   2962    entirely within a single segment, if the new address range overlaps
   2963    with the old one, or if the old address range is not a valid client
   2964    mapping.  If *need_discard is True after a successful return, the
   2965    caller should immediately discard translations from both specified
   2966    address ranges.  */
   2967 
   2968 Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
   2969                                         Addr old_addr, SizeT old_len,
   2970                                         Addr new_addr, SizeT new_len )
   2971 {
   2972    Int      iLo, iHi;
   2973    SysRes   sres;
   2974    NSegment seg;
   2975 
   2976    if (old_len == 0 || new_len == 0)
   2977       return False;
   2978 
   2979    if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
   2980        || !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
   2981       return False;
   2982 
   2983    if (old_addr + old_len < old_addr
   2984        || new_addr + new_len < new_addr)
   2985       return False;
   2986 
   2987    if (old_addr + old_len - 1 < new_addr
   2988        || new_addr + new_len - 1 < old_addr) {
   2989       /* no overlap */
   2990    } else
   2991       return False;
   2992 
   2993    iLo = find_nsegment_idx( old_addr );
   2994    iHi = find_nsegment_idx( old_addr + old_len - 1 );
   2995    if (iLo != iHi)
   2996       return False;
   2997 
   2998    if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
   2999       return False;
   3000 
   3001    sres = ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)
   3002              ( old_addr, old_len, new_addr, new_len );
   3003    if (sr_isError(sres)) {
   3004       AM_SANITY_CHECK;
   3005       return False;
   3006    } else {
   3007       aspacem_assert(sr_Res(sres) == new_addr);
   3008    }
   3009 
   3010    *need_discard = any_Ts_in_range( old_addr, old_len )
   3011                    || any_Ts_in_range( new_addr, new_len );
   3012 
   3013    seg = nsegments[iLo];
   3014 
   3015    /* Mark the new area based on the old seg. */
   3016    if (seg.kind == SkFileC) {
   3017       seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
   3018    } else {
   3019       aspacem_assert(seg.kind == SkAnonC);
   3020       aspacem_assert(seg.offset == 0);
   3021    }
   3022    seg.start = new_addr;
   3023    seg.end   = new_addr + new_len - 1;
   3024    add_segment( &seg );
   3025 
   3026    /* Create a free hole in the old location. */
   3027    init_nsegment( &seg );
   3028    seg.start = old_addr;
   3029    seg.end   = old_addr + old_len - 1;
   3030    /* See comments in VG_(am_notify_munmap) about this SkResvn vs
   3031       SkFree thing. */
   3032    if (old_addr > aspacem_maxAddr
   3033        && /* check previous comparison is meaningful */
   3034           aspacem_maxAddr < Addr_MAX)
   3035       seg.kind = SkResvn;
   3036    else
   3037       seg.kind = SkFree;
   3038 
   3039    add_segment( &seg );
   3040 
   3041    AM_SANITY_CHECK;
   3042    return True;
   3043 }
   3044 
   3045 #endif // HAVE_MREMAP
   3046 
   3047 
   3048 #if defined(VGO_linux)
   3049 
   3050 /*-----------------------------------------------------------------*/
   3051 /*---                                                           ---*/
   3052 /*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
   3053 /*--- Almost completely independent of the stuff above.  The    ---*/
   3054 /*--- only function it 'exports' to the code above this comment ---*/
   3055 /*--- is parse_procselfmaps.                                    ---*/
   3056 /*---                                                           ---*/
   3057 /*-----------------------------------------------------------------*/
   3058 
   3059 /*------BEGIN-procmaps-parser-for-Linux--------------------------*/
   3060 
   3061 /* Size of a smallish table used to read /proc/self/map entries. */
   3062 #define M_PROCMAP_BUF 100000
   3063 
   3064 /* static ... to keep it out of the stack frame. */
   3065 static Char procmap_buf[M_PROCMAP_BUF];
   3066 
   3067 /* Records length of /proc/self/maps read into procmap_buf. */
   3068 static Int  buf_n_tot;
   3069 
   3070 /* Helper fns. */
   3071 
   3072 static Int hexdigit ( Char c )
   3073 {
   3074    if (c >= '0' && c <= '9') return (Int)(c - '0');
   3075    if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
   3076    if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
   3077    return -1;
   3078 }
   3079 
   3080 static Int decdigit ( Char c )
   3081 {
   3082    if (c >= '0' && c <= '9') return (Int)(c - '0');
   3083    return -1;
   3084 }
   3085 
   3086 static Int readchar ( const Char* buf, Char* ch )
   3087 {
   3088    if (*buf == 0) return 0;
   3089    *ch = *buf;
   3090    return 1;
   3091 }
   3092 
   3093 static Int readhex ( const Char* buf, UWord* val )
   3094 {
   3095    /* Read a word-sized hex number. */
   3096    Int n = 0;
   3097    *val = 0;
   3098    while (hexdigit(*buf) >= 0) {
   3099       *val = (*val << 4) + hexdigit(*buf);
   3100       n++; buf++;
   3101    }
   3102    return n;
   3103 }
   3104 
   3105 static Int readhex64 ( const Char* buf, ULong* val )
   3106 {
   3107    /* Read a potentially 64-bit hex number. */
   3108    Int n = 0;
   3109    *val = 0;
   3110    while (hexdigit(*buf) >= 0) {
   3111       *val = (*val << 4) + hexdigit(*buf);
   3112       n++; buf++;
   3113    }
   3114    return n;
   3115 }
   3116 
   3117 static Int readdec64 ( const Char* buf, ULong* val )
   3118 {
   3119    Int n = 0;
   3120    *val = 0;
   3121    while (hexdigit(*buf) >= 0) {
   3122       *val = (*val * 10) + decdigit(*buf);
   3123       n++; buf++;
   3124    }
   3125    return n;
   3126 }
   3127 
   3128 
   3129 /* Get the contents of /proc/self/maps into a static buffer.  If
   3130    there's a syntax error, it won't fit, or other failure, just
   3131    abort. */
   3132 
   3133 static void read_procselfmaps_into_buf ( void )
   3134 {
   3135    Int    n_chunk;
   3136    SysRes fd;
   3137 
   3138    /* Read the initial memory mapping from the /proc filesystem. */
   3139    fd = ML_(am_open)( "/proc/self/maps", VKI_O_RDONLY, 0 );
   3140    if (sr_isError(fd))
   3141       ML_(am_barf)("can't open /proc/self/maps");
   3142 
   3143    buf_n_tot = 0;
   3144    do {
   3145       n_chunk = ML_(am_read)( sr_Res(fd), &procmap_buf[buf_n_tot],
   3146                               M_PROCMAP_BUF - buf_n_tot );
   3147       if (n_chunk >= 0)
   3148          buf_n_tot += n_chunk;
   3149    } while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
   3150 
   3151    ML_(am_close)(sr_Res(fd));
   3152 
   3153    if (buf_n_tot >= M_PROCMAP_BUF-5)
   3154       ML_(am_barf_toolow)("M_PROCMAP_BUF");
   3155    if (buf_n_tot == 0)
   3156       ML_(am_barf)("I/O error on /proc/self/maps");
   3157 
   3158    procmap_buf[buf_n_tot] = 0;
   3159 }
   3160 
   3161 /* Parse /proc/self/maps.  For each map entry, call
   3162    record_mapping, passing it, in this order:
   3163 
   3164       start address in memory
   3165       length
   3166       page protections (using the VKI_PROT_* flags)
   3167       mapped file device and inode
   3168       offset in file, or zero if no file
   3169       filename, zero terminated, or NULL if no file
   3170 
   3171    So the sig of the called fn might be
   3172 
   3173       void (*record_mapping)( Addr start, SizeT size, UInt prot,
   3174 			      UInt dev, UInt info,
   3175                               ULong foffset, UChar* filename )
   3176 
   3177    Note that the supplied filename is transiently stored; record_mapping
   3178    should make a copy if it wants to keep it.
   3179 
   3180    Nb: it is important that this function does not alter the contents of
   3181        procmap_buf!
   3182 */
   3183 static void parse_procselfmaps (
   3184       void (*record_mapping)( Addr addr, SizeT len, UInt prot,
   3185                               ULong dev, ULong ino, Off64T offset,
   3186                               const UChar* filename ),
   3187       void (*record_gap)( Addr addr, SizeT len )
   3188    )
   3189 {
   3190    Int    i, j, i_eol;
   3191    Addr   start, endPlusOne, gapStart;
   3192    UChar* filename;
   3193    UChar  rr, ww, xx, pp, ch, tmp;
   3194    UInt	  prot;
   3195    UWord  maj, min;
   3196    ULong  foffset, dev, ino;
   3197 
   3198    foffset = ino = 0; /* keep gcc-4.1.0 happy */
   3199 
   3200    read_procselfmaps_into_buf();
   3201 
   3202    aspacem_assert('\0' != procmap_buf[0] && 0 != buf_n_tot);
   3203 
   3204    if (0)
   3205       VG_(debugLog)(0, "procselfmaps", "raw:\n%s\n", procmap_buf);
   3206 
   3207    /* Ok, it's safely aboard.  Parse the entries. */
   3208    i = 0;
   3209    gapStart = Addr_MIN;
   3210    while (True) {
   3211       if (i >= buf_n_tot) break;
   3212 
   3213       /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
   3214       j = readhex(&procmap_buf[i], &start);
   3215       if (j > 0) i += j; else goto syntaxerror;
   3216       j = readchar(&procmap_buf[i], &ch);
   3217       if (j == 1 && ch == '-') i += j; else goto syntaxerror;
   3218       j = readhex(&procmap_buf[i], &endPlusOne);
   3219       if (j > 0) i += j; else goto syntaxerror;
   3220 
   3221       j = readchar(&procmap_buf[i], &ch);
   3222       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
   3223 
   3224       j = readchar(&procmap_buf[i], &rr);
   3225       if (j == 1 && (rr == 'r' || rr == '-')) i += j; else goto syntaxerror;
   3226       j = readchar(&procmap_buf[i], &ww);
   3227       if (j == 1 && (ww == 'w' || ww == '-')) i += j; else goto syntaxerror;
   3228       j = readchar(&procmap_buf[i], &xx);
   3229       if (j == 1 && (xx == 'x' || xx == '-')) i += j; else goto syntaxerror;
   3230       /* This field is the shared/private flag */
   3231       j = readchar(&procmap_buf[i], &pp);
   3232       if (j == 1 && (pp == 'p' || pp == '-' || pp == 's'))
   3233                                               i += j; else goto syntaxerror;
   3234 
   3235       j = readchar(&procmap_buf[i], &ch);
   3236       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
   3237 
   3238       j = readhex64(&procmap_buf[i], &foffset);
   3239       if (j > 0) i += j; else goto syntaxerror;
   3240 
   3241       j = readchar(&procmap_buf[i], &ch);
   3242       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
   3243 
   3244       j = readhex(&procmap_buf[i], &maj);
   3245       if (j > 0) i += j; else goto syntaxerror;
   3246       j = readchar(&procmap_buf[i], &ch);
   3247       if (j == 1 && ch == ':') i += j; else goto syntaxerror;
   3248       j = readhex(&procmap_buf[i], &min);
   3249       if (j > 0) i += j; else goto syntaxerror;
   3250 
   3251       j = readchar(&procmap_buf[i], &ch);
   3252       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
   3253 
   3254       j = readdec64(&procmap_buf[i], &ino);
   3255       if (j > 0) i += j; else goto syntaxerror;
   3256 
   3257       goto read_line_ok;
   3258 
   3259     syntaxerror:
   3260       VG_(debugLog)(0, "Valgrind:",
   3261                        "FATAL: syntax error reading /proc/self/maps\n");
   3262       { Int k, m;
   3263         HChar buf50[51];
   3264         m = 0;
   3265         buf50[m] = 0;
   3266         k = i - 50;
   3267         if (k < 0) k = 0;
   3268         for (; k <= i; k++) {
   3269            buf50[m] = procmap_buf[k];
   3270            buf50[m+1] = 0;
   3271            if (m < 50-1) m++;
   3272         }
   3273         VG_(debugLog)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50);
   3274       }
   3275       ML_(am_exit)(1);
   3276 
   3277     read_line_ok:
   3278 
   3279       /* Try and find the name of the file mapped to this segment, if
   3280          it exists.  Note that files can contains spaces. */
   3281 
   3282       // Move i to the next non-space char, which should be either a '/' or
   3283       // a newline.
   3284       while (procmap_buf[i] == ' ' && i < buf_n_tot-1) i++;
   3285 
   3286       // Move i_eol to the end of the line.
   3287       i_eol = i;
   3288       while (procmap_buf[i_eol] != '\n' && i_eol < buf_n_tot-1) i_eol++;
   3289 
   3290       // If there's a filename...
   3291       if (i < i_eol-1 && procmap_buf[i] == '/') {
   3292          /* Minor hack: put a '\0' at the filename end for the call to
   3293             'record_mapping', then restore the old char with 'tmp'. */
   3294          filename = &procmap_buf[i];
   3295          tmp = filename[i_eol - i];
   3296          filename[i_eol - i] = '\0';
   3297       } else {
   3298 	 tmp = 0;
   3299          filename = NULL;
   3300          foffset = 0;
   3301       }
   3302 
   3303       prot = 0;
   3304       if (rr == 'r') prot |= VKI_PROT_READ;
   3305       if (ww == 'w') prot |= VKI_PROT_WRITE;
   3306       if (xx == 'x') prot |= VKI_PROT_EXEC;
   3307 
   3308       /* Linux has two ways to encode a device number when it
   3309          is exposed to user space (via fstat etc). The old way
   3310          is the traditional unix scheme that produces a 16 bit
   3311          device number with the top 8 being the major number and
   3312          the bottom 8 the minor number.
   3313 
   3314          The new scheme allows for a 12 bit major number and
   3315          a 20 bit minor number by using a 32 bit device number
   3316          and putting the top 12 bits of the minor number into
   3317          the top 12 bits of the device number thus leaving an
   3318          extra 4 bits for the major number.
   3319 
   3320          If the minor and major number are both single byte
   3321          values then both schemes give the same result so we
   3322          use the new scheme here in case either number is
   3323          outside the 0-255 range and then use fstat64 when
   3324          available (or fstat on 64 bit systems) so that we
   3325          should always have a new style device number and
   3326          everything should match. */
   3327       dev = (min & 0xff) | (maj << 8) | ((min & ~0xff) << 12);
   3328 
   3329       if (record_gap && gapStart < start)
   3330          (*record_gap) ( gapStart, start-gapStart );
   3331 
   3332       if (record_mapping && start < endPlusOne)
   3333          (*record_mapping) ( start, endPlusOne-start,
   3334                              prot, dev, ino,
   3335                              foffset, filename );
   3336 
   3337       if ('\0' != tmp) {
   3338          filename[i_eol - i] = tmp;
   3339       }
   3340 
   3341       i = i_eol + 1;
   3342       gapStart = endPlusOne;
   3343    }
   3344 
   3345 #  if defined(VGP_arm_linux)
   3346    /* ARM puts code at the end of memory that contains processor
   3347       specific stuff (cmpxchg, getting the thread local storage, etc.)
   3348       This isn't specified in /proc/self/maps, so do it here.  This
   3349       kludgery causes the view of memory, as presented to
   3350       record_gap/record_mapping, to actually reflect reality.  IMO
   3351       (JRS, 2010-Jan-03) the fact that /proc/.../maps does not list
   3352       the commpage should be regarded as a bug in the kernel. */
   3353    { const Addr commpage_start = ARM_LINUX_FAKE_COMMPAGE_START;
   3354      const Addr commpage_end1  = ARM_LINUX_FAKE_COMMPAGE_END1;
   3355      if (gapStart < commpage_start) {
   3356         if (record_gap)
   3357            (*record_gap)( gapStart, commpage_start - gapStart );
   3358         if (record_mapping)
   3359            (*record_mapping)( commpage_start, commpage_end1 - commpage_start,
   3360                               VKI_PROT_READ|VKI_PROT_EXEC,
   3361                               0/*dev*/, 0/*ino*/, 0/*foffset*/,
   3362                               NULL);
   3363         gapStart = commpage_end1;
   3364      }
   3365    }
   3366 #  endif
   3367 
   3368    if (record_gap && gapStart < Addr_MAX)
   3369       (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
   3370 }
   3371 
   3372 /*------END-procmaps-parser-for-Linux----------------------------*/
   3373 
   3374 /*------BEGIN-procmaps-parser-for-Darwin-------------------------*/
   3375 
   3376 #elif defined(VGO_darwin)
   3377 #include <mach/mach.h>
   3378 #include <mach/mach_vm.h>
   3379 
   3380 static unsigned int mach2vki(unsigned int vm_prot)
   3381 {
   3382    return
   3383       ((vm_prot & VM_PROT_READ)    ? VKI_PROT_READ    : 0) |
   3384       ((vm_prot & VM_PROT_WRITE)   ? VKI_PROT_WRITE   : 0) |
   3385       ((vm_prot & VM_PROT_EXECUTE) ? VKI_PROT_EXEC    : 0) ;
   3386 }
   3387 
   3388 static UInt stats_machcalls = 0;
   3389 
   3390 static void parse_procselfmaps (
   3391       void (*record_mapping)( Addr addr, SizeT len, UInt prot,
   3392                               ULong dev, ULong ino, Off64T offset,
   3393                               const UChar* filename ),
   3394       void (*record_gap)( Addr addr, SizeT len )
   3395    )
   3396 {
   3397    vm_address_t iter;
   3398    unsigned int depth;
   3399    vm_address_t last;
   3400 
   3401    iter = 0;
   3402    depth = 0;
   3403    last = 0;
   3404    while (1) {
   3405       mach_vm_address_t addr = iter;
   3406       mach_vm_size_t size;
   3407       vm_region_submap_short_info_data_64_t info;
   3408       kern_return_t kr;
   3409 
   3410       while (1) {
   3411          mach_msg_type_number_t info_count
   3412             = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
   3413          stats_machcalls++;
   3414          kr = mach_vm_region_recurse(mach_task_self(), &addr, &size, &depth,
   3415                                      (vm_region_info_t)&info, &info_count);
   3416          if (kr)
   3417             return;
   3418          if (info.is_submap) {
   3419             depth++;
   3420             continue;
   3421          }
   3422          break;
   3423       }
   3424       iter = addr + size;
   3425 
   3426       if (addr > last  &&  record_gap) {
   3427          (*record_gap)(last, addr - last);
   3428       }
   3429       if (record_mapping) {
   3430          (*record_mapping)(addr, size, mach2vki(info.protection),
   3431                            0, 0, info.offset, NULL);
   3432       }
   3433       last = addr + size;
   3434    }
   3435 
   3436    if ((Addr)-1 > last  &&  record_gap)
   3437       (*record_gap)(last, (Addr)-1 - last);
   3438 }
   3439 
   3440 // Urr.  So much for thread safety.
   3441 static Bool        css_overflowed;
   3442 static ChangedSeg* css_local;
   3443 static Int         css_size_local;
   3444 static Int         css_used_local;
   3445 
   3446 static void add_mapping_callback(Addr addr, SizeT len, UInt prot,
   3447                                  ULong dev, ULong ino, Off64T offset,
   3448                                  const UChar *filename)
   3449 {
   3450    // derived from sync_check_mapping_callback()
   3451 
   3452    Int iLo, iHi, i;
   3453 
   3454    if (len == 0) return;
   3455 
   3456    /* The kernel should not give us wraparounds. */
   3457    aspacem_assert(addr <= addr + len - 1);
   3458 
   3459    iLo = find_nsegment_idx( addr );
   3460    iHi = find_nsegment_idx( addr + len - 1 );
   3461 
   3462 
   3463    /* NSegments iLo .. iHi inclusive should agree with the presented
   3464       data. */
   3465    for (i = iLo; i <= iHi; i++) {
   3466 
   3467       UInt seg_prot;
   3468 
   3469       if (nsegments[i].kind == SkAnonV  ||  nsegments[i].kind == SkFileV) {
   3470          /* Ignore V regions */
   3471          continue;
   3472       }
   3473       else if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn) {
   3474           /* Add mapping for SkResvn regions */
   3475          ChangedSeg* cs = &css_local[css_used_local];
   3476          if (css_used_local < css_size_local) {
   3477             cs->is_added = True;
   3478             cs->start    = addr;
   3479             cs->end      = addr + len - 1;
   3480             cs->prot     = prot;
   3481             cs->offset   = offset;
   3482             css_used_local++;
   3483          } else {
   3484             css_overflowed = True;
   3485          }
   3486          return;
   3487 
   3488       } else if (nsegments[i].kind == SkAnonC ||
   3489                  nsegments[i].kind == SkFileC ||
   3490                  nsegments[i].kind == SkShmC)
   3491       {
   3492          /* Check permissions on client regions */
   3493          // GrP fixme
   3494          seg_prot = 0;
   3495          if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
   3496          if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
   3497 #        if defined(VGA_x86)
   3498          // GrP fixme sloppyXcheck
   3499          // darwin: kernel X ignored and spuriously changes? (vm_copy)
   3500          seg_prot |= (prot & VKI_PROT_EXEC);
   3501 #        else
   3502          if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
   3503 #        endif
   3504          if (seg_prot != prot) {
   3505              if (VG_(clo_trace_syscalls))
   3506                  VG_(debugLog)(0,"aspacem","region %p..%p permission "
   3507                                  "mismatch (kernel %x, V %x)\n",
   3508                                  (void*)nsegments[i].start,
   3509                                  (void*)(nsegments[i].end+1), prot, seg_prot);
   3510          }
   3511 
   3512       } else {
   3513          aspacem_assert(0);
   3514       }
   3515    }
   3516 }
   3517 
   3518 static void remove_mapping_callback(Addr addr, SizeT len)
   3519 {
   3520    // derived from sync_check_gap_callback()
   3521 
   3522    Int iLo, iHi, i;
   3523 
   3524    if (len == 0)
   3525       return;
   3526 
   3527    /* The kernel should not give us wraparounds. */
   3528    aspacem_assert(addr <= addr + len - 1);
   3529 
   3530    iLo = find_nsegment_idx( addr );
   3531    iHi = find_nsegment_idx( addr + len - 1 );
   3532 
   3533    /* NSegments iLo .. iHi inclusive should agree with the presented data. */
   3534    for (i = iLo; i <= iHi; i++) {
   3535       if (nsegments[i].kind != SkFree  &&  nsegments[i].kind != SkResvn) {
   3536          // V has a mapping, kernel doesn't
   3537          ChangedSeg* cs = &css_local[css_used_local];
   3538          if (css_used_local < css_size_local) {
   3539             cs->is_added = False;
   3540             cs->start    = nsegments[i].start;
   3541             cs->end      = nsegments[i].end;
   3542             cs->prot     = 0;
   3543             cs->offset   = 0;
   3544             css_used_local++;
   3545          } else {
   3546             css_overflowed = True;
   3547          }
   3548          return;
   3549       }
   3550    }
   3551 }
   3552 
   3553 
   3554 // Returns False if 'css' wasn't big enough.
   3555 Bool VG_(get_changed_segments)(
   3556       const HChar* when, const HChar* where, /*OUT*/ChangedSeg* css,
   3557       Int css_size, /*OUT*/Int* css_used)
   3558 {
   3559    static UInt stats_synccalls = 1;
   3560    aspacem_assert(when && where);
   3561 
   3562    if (0)
   3563       VG_(debugLog)(0,"aspacem",
   3564          "[%u,%u] VG_(get_changed_segments)(%s, %s)\n",
   3565          stats_synccalls++, stats_machcalls, when, where
   3566       );
   3567 
   3568    css_overflowed = False;
   3569    css_local = css;
   3570    css_size_local = css_size;
   3571    css_used_local = 0;
   3572 
   3573    // Get the list of segs that need to be added/removed.
   3574    parse_procselfmaps(&add_mapping_callback, &remove_mapping_callback);
   3575 
   3576    *css_used = css_used_local;
   3577 
   3578    if (css_overflowed) {
   3579       aspacem_assert(css_used_local == css_size_local);
   3580    }
   3581 
   3582    return !css_overflowed;
   3583 }
   3584 
   3585 #endif // defined(VGO_darwin)
   3586 
   3587 /*------END-procmaps-parser-for-Darwin---------------------------*/
   3588 
   3589 #endif // defined(VGO_linux) || defined(VGO_darwin)
   3590 
   3591 /*--------------------------------------------------------------------*/
   3592 /*--- end                                                          ---*/
   3593 /*--------------------------------------------------------------------*/
   3594