Home | History | Annotate | Download | only in m_aspacemgr
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- The address space manager: segment initialisation and        ---*/
      4 /*--- tracking, stack operations                                   ---*/
      5 /*---                                                              ---*/
      6 /*--- Implementation for Linux (and Darwin!)   m_aspacemgr-linux.c ---*/
      7 /*--------------------------------------------------------------------*/
      8 
      9 /*
     10    This file is part of Valgrind, a dynamic binary instrumentation
     11    framework.
     12 
     13    Copyright (C) 2000-2011 Julian Seward
     14       jseward (at) acm.org
     15 
     16    This program is free software; you can redistribute it and/or
     17    modify it under the terms of the GNU General Public License as
     18    published by the Free Software Foundation; either version 2 of the
     19    License, or (at your option) any later version.
     20 
     21    This program is distributed in the hope that it will be useful, but
     22    WITHOUT ANY WARRANTY; without even the implied warranty of
     23    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     24    General Public License for more details.
     25 
     26    You should have received a copy of the GNU General Public License
     27    along with this program; if not, write to the Free Software
     28    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     29    02111-1307, USA.
     30 
     31    The GNU General Public License is contained in the file COPYING.
     32 */
     33 
     34 #if defined(VGO_linux) || defined(VGO_darwin)
     35 
     36 /* *************************************************************
     37    DO NOT INCLUDE ANY OTHER FILES HERE.
     38    ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
     39    AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
     40    ************************************************************* */
     41 
     42 #include "priv_aspacemgr.h"
     43 #include "config.h"
     44 
     45 
     46 /* Note: many of the exported functions implemented below are
     47    described more fully in comments in pub_core_aspacemgr.h.
     48 */
     49 
     50 
     51 /*-----------------------------------------------------------------*/
     52 /*---                                                           ---*/
     53 /*--- Overview.                                                 ---*/
     54 /*---                                                           ---*/
     55 /*-----------------------------------------------------------------*/
     56 
     57 /* Purpose
     58    ~~~~~~~
     59    The purpose of the address space manager (aspacem) is:
     60 
     61    (1) to record the disposition of all parts of the process' address
     62        space at all times.
     63 
     64    (2) to the extent that it can, influence layout in ways favourable
     65        to our purposes.
     66 
     67    It is important to appreciate that whilst it can and does attempt
     68    to influence layout, and usually succeeds, it isn't possible to
     69    impose absolute control: in the end, the kernel is the final
     70    arbiter, and can always bounce our requests.
     71 
     72    Strategy
     73    ~~~~~~~~
     74    The strategy is therefore as follows:
     75 
     76    * Track ownership of mappings.  Each one can belong either to
     77      Valgrind or to the client.
     78 
     79    * Try to place the client's fixed and hinted mappings at the
     80      requested addresses.  Fixed mappings are allowed anywhere except
     81      in areas reserved by Valgrind; the client can trash its own
     82      mappings if it wants.  Hinted mappings are allowed providing they
     83      fall entirely in free areas; if not, they will be placed by
     84      aspacem in a free area.
     85 
     86    * Anonymous mappings are allocated so as to keep Valgrind and
     87      client areas widely separated when possible.  If address space
     88      runs low, then they may become intermingled: aspacem will attempt
     89      to use all possible space.  But under most circumstances lack of
     90      address space is not a problem and so the areas will remain far
     91      apart.
     92 
     93      Searches for client space start at aspacem_cStart and will wrap
     94      around the end of the available space if needed.  Searches for
     95      Valgrind space start at aspacem_vStart and will also wrap around.
     96      Because aspacem_cStart is approximately at the start of the
     97      available space and aspacem_vStart is approximately in the
     98      middle, for the most part the client anonymous mappings will be
     99      clustered towards the start of available space, and Valgrind ones
    100      in the middle.
    101 
    102      The available space is delimited by aspacem_minAddr and
    103      aspacem_maxAddr.  aspacem is flexible and can operate with these
    104      at any (sane) setting.  For 32-bit Linux, aspacem_minAddr is set
    105      to some low-ish value at startup (64M) and aspacem_maxAddr is
    106      derived from the stack pointer at system startup.  This seems a
    107      reliable way to establish the initial boundaries.
    108 
    109      64-bit Linux is similar except for the important detail that the
    110      upper boundary is set to 32G.  The reason is so that all
    111      anonymous mappings (basically all client data areas) are kept
    112      below 32G, since that is the maximum range that memcheck can
    113      track shadow memory using a fast 2-level sparse array.  It can go
    114      beyond that but runs much more slowly.  The 32G limit is
    115      arbitrary and is trivially changed.  So, with the current
    116      settings, programs on 64-bit Linux will appear to run out of
    117      address space and presumably fail at the 32G limit.  Given the
    118      9/8 space overhead of Memcheck, that means you should be able to
    119      memcheckify programs that use up to about 14G natively.
    120 
    121    Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
    122    anonymous mappings.  The client can still do fixed and hinted maps
    123    at any addresses provided they do not overlap Valgrind's segments.
    124    This makes Valgrind able to load prelinked .so's at their requested
    125    addresses on 64-bit platforms, even if they are very high (eg,
    126    112TB).
    127 
    128    At startup, aspacem establishes the usable limits, and advises
    129    m_main to place the client stack at the top of the range, which on
    130    a 32-bit machine will be just below the real initial stack.  One
    131    effect of this is that self-hosting sort-of works, because an inner
    132    valgrind will then place its client's stack just below its own
    133    initial stack.
    134 
    135    The segment array and segment kinds
    136    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    137    The central data structure is the segment array (segments[0
    138    .. nsegments_used-1]).  This covers the entire address space in
    139    order, giving account of every byte of it.  Free spaces are
    140    represented explicitly as this makes many operations simpler.
    141    Mergeable adjacent segments are aggressively merged so as to create
    142    a "normalised" representation (preen_nsegments).
    143 
    144    There are 7 (mutually-exclusive) segment kinds, the meaning of
    145    which is important:
    146 
    147    SkFree: a free space, which may be allocated either to Valgrind (V)
    148       or the client (C).
    149 
    150    SkAnonC: an anonymous mapping belonging to C.  For these, aspacem
    151       tracks a boolean indicating whether or not is is part of the
    152       client's heap area (can't remember why).
    153 
    154    SkFileC: a file mapping belonging to C.
    155 
    156    SkShmC: a shared memory segment belonging to C.
    157 
    158    SkAnonV: an anonymous mapping belonging to V.  These cover all V's
    159       dynamic memory needs, including non-client malloc/free areas,
    160       shadow memory, and the translation cache.
    161 
    162    SkFileV: a file mapping belonging to V.  As far as I know these are
    163       only created transiently for the purposes of reading debug info.
    164 
    165    SkResvn: a reservation segment.
    166 
    167    These are mostly straightforward.  Reservation segments have some
    168    subtlety, however.
    169 
    170    A reservation segment is unmapped from the kernel's point of view,
    171    but is an area in which aspacem will not create anonymous maps
    172    (either Vs or Cs).  The idea is that we will try to keep it clear
    173    when the choice to do so is ours.  Reservation segments are
    174    'invisible' from the client's point of view: it may choose to park
    175    a fixed mapping in the middle of one, and that's just tough -- we
    176    can't do anything about that.  From the client's perspective
    177    reservations are semantically equivalent to (although
    178    distinguishable from, if it makes enquiries) free areas.
    179 
    180    Reservations are a primitive mechanism provided for whatever
    181    purposes the rest of the system wants.  Currently they are used to
    182    reserve the expansion space into which a growdown stack is
    183    expanded, and into which the data segment is extended.  Note,
    184    though, those uses are entirely external to this module, which only
    185    supplies the primitives.
    186 
    187    Reservations may be shrunk in order that an adjoining anonymous
    188    mapping may be extended.  This makes dataseg/stack expansion work.
    189    A reservation may not be shrunk below one page.
    190 
    191    The advise/notify concept
    192    ~~~~~~~~~~~~~~~~~~~~~~~~~
    193    All mmap-related calls must be routed via aspacem.  Calling
    194    sys_mmap directly from the rest of the system is very dangerous
    195    because aspacem's data structures will become out of date.
    196 
    197    The fundamental mode of operation of aspacem is to support client
    198    mmaps.  Here's what happens (in ML_(generic_PRE_sys_mmap)):
    199 
    200    * m_syswrap intercepts the mmap call.  It examines the parameters
    201      and identifies the requested placement constraints.  There are
    202      three possibilities: no constraint (MAny), hinted (MHint, "I
    203      prefer X but will accept anything"), and fixed (MFixed, "X or
    204      nothing").
    205 
    206    * This request is passed to VG_(am_get_advisory).  This decides on
    207      a placement as described in detail in Strategy above.  It may
    208      also indicate that the map should fail, because it would trash
    209      one of Valgrind's areas, which would probably kill the system.
    210 
    211    * Control returns to the wrapper.  If VG_(am_get_advisory) has
    212      declared that the map should fail, then it must be made to do so.
    213      Usually, though, the request is considered acceptable, in which
    214      case an "advised" address is supplied.  The advised address
    215      replaces the original address supplied by the client, and
    216      MAP_FIXED is set.
    217 
    218      Note at this point that although aspacem has been asked for
    219      advice on where to place the mapping, no commitment has yet been
    220      made by either it or the kernel.
    221 
    222    * The adjusted request is handed off to the kernel.
    223 
    224    * The kernel's result is examined.  If the map succeeded, aspacem
    225      is told of the outcome (VG_(am_notify_client_mmap)), so it can
    226      update its records accordingly.
    227 
    228   This then is the central advise-notify idiom for handling client
    229   mmap/munmap/mprotect/shmat:
    230 
    231   * ask aspacem for an advised placement (or a veto)
    232 
    233   * if not vetoed, hand request to kernel, using the advised placement
    234 
    235   * examine result, and if successful, notify aspacem of the result.
    236 
    237   There are also many convenience functions, eg
    238   VG_(am_mmap_anon_fixed_client), which do both phases entirely within
    239   aspacem.
    240 
    241   To debug all this, a sync-checker is provided.  It reads
    242   /proc/self/maps, compares what it sees with aspacem's records, and
    243   complains if there is a difference.  --sanity-level=3 runs it before
    244   and after each syscall, which is a powerful, if slow way of finding
    245   buggy syscall wrappers.
    246 
    247   Loss of pointercheck
    248   ~~~~~~~~~~~~~~~~~~~~
    249   Up to and including Valgrind 2.4.1, x86 segmentation was used to
    250   enforce seperation of V and C, so that wild writes by C could not
    251   trash V.  This got called "pointercheck".  Unfortunately, the new
    252   more flexible memory layout, plus the need to be portable across
    253   different architectures, means doing this in hardware is no longer
    254   viable, and doing it in software is expensive.  So at the moment we
    255   don't do it at all.
    256 */
    257 
    258 
    259 /*-----------------------------------------------------------------*/
    260 /*---                                                           ---*/
    261 /*--- The Address Space Manager's state.                        ---*/
    262 /*---                                                           ---*/
    263 /*-----------------------------------------------------------------*/
    264 
    265 /* ------ start of STATE for the address-space manager ------ */
    266 
    267 /* Max number of segments we can track. */
    268 /* glider: We keep VG_N_SEGMENTS low on Android, because they occupy
    269    too much memory. We used to have VG_N_SEGMENTS=10000 on Darwin,
    270    but it turned out to be too low for Chromium.
    271 */
    272 #if defined(VGO_darwin)
    273 #define VG_N_SEGMENTS 50000
    274 #elif defined(ANDROID)
    275 #define VG_N_SEGMENTS 10000
    276 #else
    277 #define VG_N_SEGMENTS 100000
    278 #endif
    279 
    280 /* Max number of segment file names we can track. */
    281 #if defined(VGO_darwin) || defined(ANDROID)
    282 #define VG_N_SEGNAMES 1000
    283 #else
    284 #define VG_N_SEGNAMES 100000
    285 #endif
    286 
    287 /* Max length of a segment file name. */
    288 #define VG_MAX_SEGNAMELEN 1000
    289 
    290 
    291 typedef
    292    struct {
    293       Bool  inUse;
    294       Bool  mark;
    295       HChar fname[VG_MAX_SEGNAMELEN];
    296    }
    297    SegName;
    298 
    299 /* Filename table.  _used is the high water mark; an entry is only
    300    valid if its index >= 0, < _used, and its .inUse field == True.
    301    The .mark field is used to garbage-collect dead entries.
    302 */
    303 static SegName segnames[VG_N_SEGNAMES];
    304 static Int     segnames_used = 0;
    305 
    306 
    307 /* Array [0 .. nsegments_used-1] of all mappings. */
    308 /* Sorted by .addr field. */
    309 /* I: len may not be zero. */
    310 /* I: overlapping segments are not allowed. */
    311 /* I: the segments cover the entire address space precisely. */
    312 /* Each segment can optionally hold an index into the filename table. */
    313 
    314 static NSegment nsegments[VG_N_SEGMENTS];
    315 static Int      nsegments_used = 0;
    316 
    317 #define Addr_MIN ((Addr)0)
    318 #define Addr_MAX ((Addr)(-1ULL))
    319 
    320 /* Limits etc */
    321 
    322 // The smallest address that aspacem will try to allocate
    323 static Addr aspacem_minAddr = 0;
    324 
    325 // The largest address that aspacem will try to allocate
    326 static Addr aspacem_maxAddr = 0;
    327 
    328 // Where aspacem will start looking for client space
    329 static Addr aspacem_cStart = 0;
    330 
    331 // Where aspacem will start looking for Valgrind space
    332 static Addr aspacem_vStart = 0;
    333 
    334 
    335 #define AM_SANITY_CHECK                                      \
    336    do {                                                      \
    337       if (VG_(clo_sanity_level >= 3))                        \
    338          aspacem_assert(VG_(am_do_sync_check)                \
    339             (__PRETTY_FUNCTION__,__FILE__,__LINE__));        \
    340    } while (0)
    341 
    342 /* ------ end of STATE for the address-space manager ------ */
    343 
    344 /* ------ Forwards decls ------ */
    345 inline
    346 static Int  find_nsegment_idx ( Addr a );
    347 
    348 static void parse_procselfmaps (
    349       void (*record_mapping)( Addr addr, SizeT len, UInt prot,
    350                               ULong dev, ULong ino, Off64T offset,
    351                               const UChar* filename ),
    352       void (*record_gap)( Addr addr, SizeT len )
    353    );
    354 
    355 /* ----- Hacks to do with the "commpage" on arm-linux ----- */
    356 /* Not that I have anything against the commpage per se.  It's just
    357    that it's not listed in /proc/self/maps, which is a royal PITA --
    358    we have to fake it up, in parse_procselfmaps.
    359 
    360    But note also bug 254556 comment #2: this is now fixed in newer
    361    kernels -- it is listed as a "[vectors]" entry.  Presumably the
    362    fake entry made here duplicates the [vectors] entry, and so, if at
    363    some point in the future, we can stop supporting buggy kernels,
    364    then this kludge can be removed entirely, since the procmap parser
    365    below will read that entry in the normal way. */
    366 #if defined(VGP_arm_linux)
    367 #  define ARM_LINUX_FAKE_COMMPAGE_START 0xFFFF0000
    368 #  define ARM_LINUX_FAKE_COMMPAGE_END1  0xFFFF1000
    369 #endif
    370 
    371 
    372 /*-----------------------------------------------------------------*/
    373 /*---                                                           ---*/
    374 /*--- SegName array management.                                 ---*/
    375 /*---                                                           ---*/
    376 /*-----------------------------------------------------------------*/
    377 
    378 /* Searches the filename table to find an index for the given name.
    379    If none is found, an index is allocated and the name stored.  If no
    380    space is available we just give up.  If the string is too long to
    381    store, return -1.
    382 */
    383 static Int allocate_segname ( const HChar* name )
    384 {
    385    Int i, j, len;
    386 
    387    aspacem_assert(name);
    388 
    389    if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
    390 
    391    len = VG_(strlen)(name);
    392    if (len >= VG_MAX_SEGNAMELEN-1) {
    393       return -1;
    394    }
    395 
    396    /* first see if we already have the name. */
    397    for (i = 0; i < segnames_used; i++) {
    398       if (!segnames[i].inUse)
    399          continue;
    400       if (0 == VG_(strcmp)(name, &segnames[i].fname[0])) {
    401          return i;
    402       }
    403    }
    404 
    405    /* no we don't.  So look for a free slot. */
    406    for (i = 0; i < segnames_used; i++)
    407       if (!segnames[i].inUse)
    408          break;
    409 
    410    if (i == segnames_used) {
    411       /* no free slots .. advance the high-water mark. */
    412       if (segnames_used+1 < VG_N_SEGNAMES) {
    413          i = segnames_used;
    414          segnames_used++;
    415       } else {
    416          ML_(am_barf_toolow)("VG_N_SEGNAMES");
    417       }
    418    }
    419 
    420    /* copy it in */
    421    segnames[i].inUse = True;
    422    for (j = 0; j < len; j++)
    423       segnames[i].fname[j] = name[j];
    424    aspacem_assert(len < VG_MAX_SEGNAMELEN);
    425    segnames[i].fname[len] = 0;
    426    return i;
    427 }
    428 
    429 
    430 /*-----------------------------------------------------------------*/
    431 /*---                                                           ---*/
    432 /*--- Displaying the segment array.                             ---*/
    433 /*---                                                           ---*/
    434 /*-----------------------------------------------------------------*/
    435 
    436 static HChar* show_SegKind ( SegKind sk )
    437 {
    438    switch (sk) {
    439       case SkFree:  return "    ";
    440       case SkAnonC: return "anon";
    441       case SkAnonV: return "ANON";
    442       case SkFileC: return "file";
    443       case SkFileV: return "FILE";
    444       case SkShmC:  return "shm ";
    445       case SkResvn: return "RSVN";
    446       default:      return "????";
    447    }
    448 }
    449 
    450 static HChar* show_ShrinkMode ( ShrinkMode sm )
    451 {
    452    switch (sm) {
    453       case SmLower: return "SmLower";
    454       case SmUpper: return "SmUpper";
    455       case SmFixed: return "SmFixed";
    456       default: return "Sm?????";
    457    }
    458 }
    459 
    460 static void show_len_concisely ( /*OUT*/HChar* buf, Addr start, Addr end )
    461 {
    462    HChar* fmt;
    463    ULong len = ((ULong)end) - ((ULong)start) + 1;
    464 
    465    if (len < 10*1000*1000ULL) {
    466       fmt = "%7llu";
    467    }
    468    else if (len < 999999ULL * (1ULL<<20)) {
    469       fmt = "%6llum";
    470       len >>= 20;
    471    }
    472    else if (len < 999999ULL * (1ULL<<30)) {
    473       fmt = "%6llug";
    474       len >>= 30;
    475    }
    476    else if (len < 999999ULL * (1ULL<<40)) {
    477       fmt = "%6llut";
    478       len >>= 40;
    479    }
    480    else {
    481       fmt = "%6llue";
    482       len >>= 50;
    483    }
    484    ML_(am_sprintf)(buf, fmt, len);
    485 }
    486 
    487 
    488 /* Show full details of an NSegment */
    489 
    490 static void __attribute__ ((unused))
    491             show_nsegment_full ( Int logLevel, Int segNo, NSegment* seg )
    492 {
    493    HChar len_buf[20];
    494    HChar* name = "(none)";
    495 
    496    if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
    497                        && segnames[seg->fnIdx].inUse
    498                        && segnames[seg->fnIdx].fname[0] != 0)
    499       name = segnames[seg->fnIdx].fname;
    500 
    501    show_len_concisely(len_buf, seg->start, seg->end);
    502 
    503    VG_(debugLog)(
    504       logLevel, "aspacem",
    505       "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s "
    506       "d=0x%03llx i=%-7lld o=%-7lld (%d) m=%d %s\n",
    507       segNo, show_SegKind(seg->kind),
    508       (ULong)seg->start, (ULong)seg->end, len_buf,
    509       seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
    510       seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
    511       seg->isCH ? 'H' : '-',
    512       show_ShrinkMode(seg->smode),
    513       seg->dev, seg->ino, seg->offset, seg->fnIdx,
    514       (Int)seg->mark, name
    515    );
    516 }
    517 
    518 
    519 /* Show an NSegment in a user-friendly-ish way. */
    520 
    521 static void show_nsegment ( Int logLevel, Int segNo, NSegment* seg )
    522 {
    523    HChar len_buf[20];
    524    show_len_concisely(len_buf, seg->start, seg->end);
    525 
    526    switch (seg->kind) {
    527 
    528       case SkFree:
    529          VG_(debugLog)(
    530             logLevel, "aspacem",
    531             "%3d: %s %010llx-%010llx %s\n",
    532             segNo, show_SegKind(seg->kind),
    533             (ULong)seg->start, (ULong)seg->end, len_buf
    534          );
    535          break;
    536 
    537       case SkAnonC: case SkAnonV: case SkShmC:
    538          VG_(debugLog)(
    539             logLevel, "aspacem",
    540             "%3d: %s %010llx-%010llx %s %c%c%c%c%c\n",
    541             segNo, show_SegKind(seg->kind),
    542             (ULong)seg->start, (ULong)seg->end, len_buf,
    543             seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
    544             seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
    545             seg->isCH ? 'H' : '-'
    546          );
    547          break;
    548 
    549       case SkFileC: case SkFileV:
    550          VG_(debugLog)(
    551             logLevel, "aspacem",
    552             "%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
    553             "i=%-7lld o=%-7lld (%d)\n",
    554             segNo, show_SegKind(seg->kind),
    555             (ULong)seg->start, (ULong)seg->end, len_buf,
    556             seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
    557             seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
    558             seg->isCH ? 'H' : '-',
    559             seg->dev, seg->ino, seg->offset, seg->fnIdx
    560          );
    561          break;
    562 
    563       case SkResvn:
    564          VG_(debugLog)(
    565             logLevel, "aspacem",
    566             "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
    567             segNo, show_SegKind(seg->kind),
    568             (ULong)seg->start, (ULong)seg->end, len_buf,
    569             seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
    570             seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
    571             seg->isCH ? 'H' : '-',
    572             show_ShrinkMode(seg->smode)
    573          );
    574          break;
    575 
    576       default:
    577          VG_(debugLog)(
    578             logLevel, "aspacem",
    579             "%3d: ???? UNKNOWN SEGMENT KIND\n",
    580             segNo
    581          );
    582          break;
    583    }
    584 }
    585 
    586 /* Print out the segment array (debugging only!). */
    587 void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
    588 {
    589    Int i;
    590    VG_(debugLog)(logLevel, "aspacem",
    591                  "<<< SHOW_SEGMENTS: %s (%d segments, %d segnames)\n",
    592                  who, nsegments_used, segnames_used);
    593    for (i = 0; i < segnames_used; i++) {
    594       if (!segnames[i].inUse)
    595          continue;
    596       VG_(debugLog)(logLevel, "aspacem",
    597                     "(%2d) %s\n", i, segnames[i].fname);
    598    }
    599    for (i = 0; i < nsegments_used; i++)
    600      show_nsegment( logLevel, i, &nsegments[i] );
    601    VG_(debugLog)(logLevel, "aspacem",
    602                  ">>>\n");
    603 }
    604 
    605 
    606 /* Get the filename corresponding to this segment, if known and if it
    607    has one.  The returned name's storage cannot be assumed to be
    608    persistent, so the caller should immediately copy the name
    609    elsewhere. */
    610 HChar* VG_(am_get_filename)( NSegment const * seg )
    611 {
    612    Int i;
    613    aspacem_assert(seg);
    614    i = seg->fnIdx;
    615    if (i < 0 || i >= segnames_used || !segnames[i].inUse)
    616       return NULL;
    617    else
    618       return &segnames[i].fname[0];
    619 }
    620 
    621 /* Collect up the start addresses of all non-free, non-resvn segments.
    622    The interface is a bit strange in order to avoid potential
    623    segment-creation races caused by dynamic allocation of the result
    624    buffer *starts.
    625 
    626    The function first computes how many entries in the result
    627    buffer *starts will be needed.  If this number <= nStarts,
    628    they are placed in starts[0..], and the number is returned.
    629    If nStarts is not large enough, nothing is written to
    630    starts[0..], and the negation of the size is returned.
    631 
    632    Correct use of this function may mean calling it multiple times in
    633    order to establish a suitably-sized buffer. */
    634 
    635 Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
    636 {
    637    Int i, j, nSegs;
    638 
    639    /* don't pass dumbass arguments */
    640    aspacem_assert(nStarts >= 0);
    641 
    642    nSegs = 0;
    643    for (i = 0; i < nsegments_used; i++) {
    644       if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
    645          continue;
    646       nSegs++;
    647    }
    648 
    649    if (nSegs > nStarts) {
    650       /* The buffer isn't big enough.  Tell the caller how big it needs
    651          to be. */
    652       return -nSegs;
    653    }
    654 
    655    /* There's enough space.  So write into the result buffer. */
    656    aspacem_assert(nSegs <= nStarts);
    657 
    658    j = 0;
    659    for (i = 0; i < nsegments_used; i++) {
    660       if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
    661          continue;
    662       starts[j] = nsegments[i].start;
    663       j++;
    664    }
    665 
    666    aspacem_assert(j == nSegs); /* this should not fail */
    667    return nSegs;
    668 }
    669 
    670 
    671 /*-----------------------------------------------------------------*/
    672 /*---                                                           ---*/
    673 /*--- Sanity checking and preening of the segment array.        ---*/
    674 /*---                                                           ---*/
    675 /*-----------------------------------------------------------------*/
    676 
    677 /* Check representational invariants for NSegments. */
    678 
    679 static Bool sane_NSegment ( NSegment* s )
    680 {
    681    if (s == NULL) return False;
    682 
    683    /* No zero sized segments and no wraparounds. */
    684    if (s->start >= s->end) return False;
    685 
    686    /* .mark is used for admin purposes only. */
    687    if (s->mark) return False;
    688 
    689    /* require page alignment */
    690    if (!VG_IS_PAGE_ALIGNED(s->start)) return False;
    691    if (!VG_IS_PAGE_ALIGNED(s->end+1)) return False;
    692 
    693    switch (s->kind) {
    694 
    695       case SkFree:
    696          return
    697             s->smode == SmFixed
    698             && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
    699             && !s->hasR && !s->hasW && !s->hasX && !s->hasT
    700             && !s->isCH;
    701 
    702       case SkAnonC: case SkAnonV: case SkShmC:
    703          return
    704             s->smode == SmFixed
    705             && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
    706             && (s->kind==SkAnonC ? True : !s->isCH);
    707 
    708       case SkFileC: case SkFileV:
    709          return
    710             s->smode == SmFixed
    711             && (s->fnIdx == -1 ||
    712                 (s->fnIdx >= 0 && s->fnIdx < segnames_used
    713                                && segnames[s->fnIdx].inUse))
    714             && !s->isCH;
    715 
    716       case SkResvn:
    717          return
    718             s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
    719             && !s->hasR && !s->hasW && !s->hasX && !s->hasT
    720             && !s->isCH;
    721 
    722       default:
    723          return False;
    724    }
    725 }
    726 
    727 
    728 /* Try merging s2 into s1, if possible.  If successful, s1 is
    729    modified, and True is returned.  Otherwise s1 is unchanged and
    730    False is returned. */
    731 
    732 static Bool maybe_merge_nsegments ( NSegment* s1, NSegment* s2 )
    733 {
    734    if (s1->kind != s2->kind)
    735       return False;
    736 
    737    if (s1->end+1 != s2->start)
    738       return False;
    739 
    740    /* reject cases which would cause wraparound */
    741    if (s1->start > s2->end)
    742       return False;
    743 
    744    switch (s1->kind) {
    745 
    746       case SkFree:
    747          s1->end = s2->end;
    748          return True;
    749 
    750       case SkAnonC: case SkAnonV:
    751          if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
    752              && s1->hasX == s2->hasX && s1->isCH == s2->isCH) {
    753             s1->end = s2->end;
    754             s1->hasT |= s2->hasT;
    755             return True;
    756          }
    757          break;
    758 
    759       case SkFileC: case SkFileV:
    760          if (s1->hasR == s2->hasR
    761              && s1->hasW == s2->hasW && s1->hasX == s2->hasX
    762              && s1->dev == s2->dev && s1->ino == s2->ino
    763              && s2->offset == s1->offset
    764                               + ((ULong)s2->start) - ((ULong)s1->start) ) {
    765             s1->end = s2->end;
    766             s1->hasT |= s2->hasT;
    767             return True;
    768          }
    769          break;
    770 
    771       case SkShmC:
    772          return False;
    773 
    774       case SkResvn:
    775          if (s1->smode == SmFixed && s2->smode == SmFixed) {
    776             s1->end = s2->end;
    777             return True;
    778          }
    779 
    780       default:
    781          break;
    782 
    783    }
    784 
    785    return False;
    786 }
    787 
    788 
    789 /* Sanity-check and canonicalise the segment array (merge mergable
    790    segments).  Returns True if any segments were merged. */
    791 
    792 static Bool preen_nsegments ( void )
    793 {
    794    Int i, j, r, w, nsegments_used_old = nsegments_used;
    795 
    796    /* Pass 1: check the segment array covers the entire address space
    797       exactly once, and also that each segment is sane. */
    798    aspacem_assert(nsegments_used > 0);
    799    aspacem_assert(nsegments[0].start == Addr_MIN);
    800    aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
    801 
    802    aspacem_assert(sane_NSegment(&nsegments[0]));
    803    for (i = 1; i < nsegments_used; i++) {
    804       aspacem_assert(sane_NSegment(&nsegments[i]));
    805       aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
    806    }
    807 
    808    /* Pass 2: merge as much as possible, using
    809       maybe_merge_segments. */
    810    w = 0;
    811    for (r = 1; r < nsegments_used; r++) {
    812       if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
    813          /* nothing */
    814       } else {
    815          w++;
    816          if (w != r)
    817             nsegments[w] = nsegments[r];
    818       }
    819    }
    820    w++;
    821    aspacem_assert(w > 0 && w <= nsegments_used);
    822    nsegments_used = w;
    823 
    824    /* Pass 3: free up unused string table slots */
    825    /* clear mark bits */
    826    for (i = 0; i < segnames_used; i++)
    827       segnames[i].mark = False;
    828    /* mark */
    829    for (i = 0; i < nsegments_used; i++) {
    830      j = nsegments[i].fnIdx;
    831       aspacem_assert(j >= -1 && j < segnames_used);
    832       if (j >= 0) {
    833          aspacem_assert(segnames[j].inUse);
    834          segnames[j].mark = True;
    835       }
    836    }
    837    /* release */
    838    for (i = 0; i < segnames_used; i++) {
    839       if (segnames[i].mark == False) {
    840          segnames[i].inUse = False;
    841          segnames[i].fname[0] = 0;
    842       }
    843    }
    844 
    845    return nsegments_used != nsegments_used_old;
    846 }
    847 
    848 
    849 /* Check the segment array corresponds with the kernel's view of
    850    memory layout.  sync_check_ok returns True if no anomalies were
    851    found, else False.  In the latter case the mismatching segments are
    852    displayed.
    853 
    854    The general idea is: we get the kernel to show us all its segments
    855    and also the gaps in between.  For each such interval, try and find
    856    a sequence of appropriate intervals in our segment array which
    857    cover or more than cover the kernel's interval, and which all have
    858    suitable kinds/permissions etc.
    859 
    860    Although any specific kernel interval is not matched exactly to a
    861    valgrind interval or sequence thereof, eventually any disagreement
    862    on mapping boundaries will be detected.  This is because, if for
    863    example valgrind's intervals cover a greater range than the current
    864    kernel interval, it must be the case that a neighbouring free-space
    865    interval belonging to valgrind cannot cover the neighbouring
    866    free-space interval belonging to the kernel.  So the disagreement
    867    is detected.
    868 
    869    In other words, we examine each kernel interval in turn, and check
    870    we do not disagree over the range of that interval.  Because all of
    871    the address space is examined, any disagreements must eventually be
    872    detected.
    873 */
    874 
    875 static Bool sync_check_ok = False;
    876 
    877 static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
    878                                           ULong dev, ULong ino, Off64T offset,
    879                                           const UChar* filename )
    880 {
    881    Int  iLo, iHi, i;
    882    Bool sloppyXcheck;
    883 
    884    /* If a problem has already been detected, don't continue comparing
    885       segments, so as to avoid flooding the output with error
    886       messages. */
    887 #if !defined(VGO_darwin)
    888    /* GrP fixme not */
    889    if (!sync_check_ok)
    890       return;
    891 #endif
    892    if (len == 0)
    893       return;
    894 
    895    /* The kernel should not give us wraparounds. */
    896    aspacem_assert(addr <= addr + len - 1);
    897 
    898    iLo = find_nsegment_idx( addr );
    899    iHi = find_nsegment_idx( addr + len - 1 );
    900 
    901    /* These 5 should be guaranteed by find_nsegment_idx. */
    902    aspacem_assert(0 <= iLo && iLo < nsegments_used);
    903    aspacem_assert(0 <= iHi && iHi < nsegments_used);
    904    aspacem_assert(iLo <= iHi);
    905    aspacem_assert(nsegments[iLo].start <= addr );
    906    aspacem_assert(nsegments[iHi].end   >= addr + len - 1 );
    907 
    908    /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
    909       most recent NX-bit enabled CPUs) and so recent kernels attempt
    910       to provide execute protection by placing all executable mappings
    911       low down in the address space and then reducing the size of the
    912       code segment to prevent code at higher addresses being executed.
    913 
    914       These kernels report which mappings are really executable in
    915       the /proc/self/maps output rather than mirroring what was asked
    916       for when each mapping was created. In order to cope with this we
    917       have a sloppyXcheck mode which we enable on x86 and s390 - in this
    918       mode we allow the kernel to report execute permission when we weren't
    919       expecting it but not vice versa. */
    920 #  if defined(VGA_x86) || defined (VGA_s390x)
    921    sloppyXcheck = True;
    922 #  else
    923    sloppyXcheck = False;
    924 #  endif
    925 
    926    /* NSegments iLo .. iHi inclusive should agree with the presented
    927       data. */
    928    for (i = iLo; i <= iHi; i++) {
    929 
    930       Bool same, cmp_offsets, cmp_devino;
    931       UInt seg_prot;
    932 
    933       /* compare the kernel's offering against ours. */
    934       same = nsegments[i].kind == SkAnonC
    935              || nsegments[i].kind == SkAnonV
    936              || nsegments[i].kind == SkFileC
    937              || nsegments[i].kind == SkFileV
    938              || nsegments[i].kind == SkShmC;
    939 
    940       seg_prot = 0;
    941       if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
    942       if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
    943       if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
    944 
    945       cmp_offsets
    946          = nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
    947 
    948       cmp_devino
    949          = nsegments[i].dev != 0 || nsegments[i].ino != 0;
    950 
    951       /* Consider other reasons to not compare dev/inode */
    952 #if defined(VGO_linux)
    953       /* bproc does some godawful hack on /dev/zero at process
    954          migration, which changes the name of it, and its dev & ino */
    955       if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
    956          cmp_devino = False;
    957 
    958       /* hack apparently needed on MontaVista Linux */
    959       if (filename && VG_(strstr)(filename, "/.lib-ro/"))
    960          cmp_devino = False;
    961 #endif
    962 
    963 #if defined(VGO_darwin)
    964       // GrP fixme kernel info doesn't have dev/inode
    965       cmp_devino = False;
    966 
    967       // GrP fixme V and kernel don't agree on offsets
    968       cmp_offsets = False;
    969 #endif
    970 
    971       /* If we are doing sloppy execute permission checks then we
    972          allow segment to have X permission when we weren't expecting
    973          it (but not vice versa) so if the kernel reported execute
    974          permission then pretend that this segment has it regardless
    975          of what we were expecting. */
    976       if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
    977          seg_prot |= VKI_PROT_EXEC;
    978       }
    979 
    980       same = same
    981              && seg_prot == prot
    982              && (cmp_devino
    983                    ? (nsegments[i].dev == dev && nsegments[i].ino == ino)
    984                    : True)
    985              && (cmp_offsets
    986                    ? nsegments[i].start-nsegments[i].offset == addr-offset
    987                    : True);
    988       if (!same) {
    989          Addr start = addr;
    990          Addr end = start + len - 1;
    991          HChar len_buf[20];
    992          show_len_concisely(len_buf, start, end);
    993 
    994          sync_check_ok = False;
    995 
    996          VG_(debugLog)(
    997             0,"aspacem",
    998               "segment mismatch: V's seg 1st, kernel's 2nd:\n");
    999          show_nsegment_full( 0, i, &nsegments[i] );
   1000          VG_(debugLog)(0,"aspacem",
   1001             "...: .... %010llx-%010llx %s %c%c%c.. ....... "
   1002             "d=0x%03llx i=%-7lld o=%-7lld (.) m=. %s\n",
   1003             (ULong)start, (ULong)end, len_buf,
   1004             prot & VKI_PROT_READ  ? 'r' : '-',
   1005             prot & VKI_PROT_WRITE ? 'w' : '-',
   1006             prot & VKI_PROT_EXEC  ? 'x' : '-',
   1007             dev, ino, offset, filename ? (HChar*)filename : "(none)" );
   1008 
   1009          return;
   1010       }
   1011    }
   1012 
   1013    /* Looks harmless.  Keep going. */
   1014    return;
   1015 }
   1016 
   1017 static void sync_check_gap_callback ( Addr addr, SizeT len )
   1018 {
   1019    Int iLo, iHi, i;
   1020 
   1021    /* If a problem has already been detected, don't continue comparing
   1022       segments, so as to avoid flooding the output with error
   1023       messages. */
   1024 #if !defined(VGO_darwin)
   1025    /* GrP fixme not */
   1026    if (!sync_check_ok)
   1027       return;
   1028 #endif
   1029    if (len == 0)
   1030       return;
   1031 
   1032    /* The kernel should not give us wraparounds. */
   1033    aspacem_assert(addr <= addr + len - 1);
   1034 
   1035    iLo = find_nsegment_idx( addr );
   1036    iHi = find_nsegment_idx( addr + len - 1 );
   1037 
   1038    /* These 5 should be guaranteed by find_nsegment_idx. */
   1039    aspacem_assert(0 <= iLo && iLo < nsegments_used);
   1040    aspacem_assert(0 <= iHi && iHi < nsegments_used);
   1041    aspacem_assert(iLo <= iHi);
   1042    aspacem_assert(nsegments[iLo].start <= addr );
   1043    aspacem_assert(nsegments[iHi].end   >= addr + len - 1 );
   1044 
   1045    /* NSegments iLo .. iHi inclusive should agree with the presented
   1046       data. */
   1047    for (i = iLo; i <= iHi; i++) {
   1048 
   1049       Bool same;
   1050 
   1051       /* compare the kernel's offering against ours. */
   1052       same = nsegments[i].kind == SkFree
   1053              || nsegments[i].kind == SkResvn;
   1054 
   1055       if (!same) {
   1056          Addr start = addr;
   1057          Addr end = start + len - 1;
   1058          HChar len_buf[20];
   1059          show_len_concisely(len_buf, start, end);
   1060 
   1061          sync_check_ok = False;
   1062 
   1063          VG_(debugLog)(
   1064             0,"aspacem",
   1065               "segment mismatch: V's gap 1st, kernel's 2nd:\n");
   1066          show_nsegment_full( 0, i, &nsegments[i] );
   1067          VG_(debugLog)(0,"aspacem",
   1068             "   : .... %010llx-%010llx %s",
   1069             (ULong)start, (ULong)end, len_buf);
   1070          return;
   1071       }
   1072    }
   1073 
   1074    /* Looks harmless.  Keep going. */
   1075    return;
   1076 }
   1077 
   1078 
   1079 /* Sanity check: check that Valgrind and the kernel agree on the
   1080    address space layout.  Prints offending segments and call point if
   1081    a discrepancy is detected, but does not abort the system.  Returned
   1082    Bool is False if a discrepancy was found. */
   1083 
   1084 Bool VG_(am_do_sync_check) ( const HChar* fn,
   1085                              const HChar* file, Int line )
   1086 {
   1087    sync_check_ok = True;
   1088    if (0)
   1089       VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
   1090    parse_procselfmaps( sync_check_mapping_callback,
   1091                        sync_check_gap_callback );
   1092    if (!sync_check_ok) {
   1093       VG_(debugLog)(0,"aspacem",
   1094                       "sync check at %s:%d (%s): FAILED\n",
   1095                       file, line, fn);
   1096       VG_(debugLog)(0,"aspacem", "\n");
   1097 
   1098 #     if 0
   1099       {
   1100          HChar buf[100];
   1101          VG_(am_show_nsegments)(0,"post syncheck failure");
   1102          VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
   1103          VG_(system)(buf);
   1104       }
   1105 #     endif
   1106 
   1107    }
   1108    return sync_check_ok;
   1109 }
   1110 
   1111 /* Hook to allow sanity checks to be done from aspacemgr-common.c. */
   1112 void ML_(am_do_sanity_check)( void )
   1113 {
   1114    AM_SANITY_CHECK;
   1115 }
   1116 
   1117 
   1118 /*-----------------------------------------------------------------*/
   1119 /*---                                                           ---*/
   1120 /*--- Low level access / modification of the segment array.     ---*/
   1121 /*---                                                           ---*/
   1122 /*-----------------------------------------------------------------*/
   1123 
   1124 /* Binary search the interval array for a given address.  Since the
   1125    array covers the entire address space the search cannot fail.  The
   1126    _WRK function does the real work.  Its caller (just below) caches
   1127    the results thereof, to save time.  With N_CACHE of 63 we get a hit
   1128    rate exceeding 90% when running OpenOffice.
   1129 
   1130    Re ">> 12", it doesn't matter that the page size of some targets
   1131    might be different from 12.  Really "(a >> 12) % N_CACHE" is merely
   1132    a hash function, and the actual cache entry is always validated
   1133    correctly against the selected cache entry before use.
   1134 */
   1135 /* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
   1136 __attribute__((noinline))
   1137 static Int find_nsegment_idx_WRK ( Addr a )
   1138 {
   1139    Addr a_mid_lo, a_mid_hi;
   1140    Int  mid,
   1141         lo = 0,
   1142         hi = nsegments_used-1;
   1143    while (True) {
   1144       /* current unsearched space is from lo to hi, inclusive. */
   1145       if (lo > hi) {
   1146          /* Not found.  This can't happen. */
   1147          ML_(am_barf)("find_nsegment_idx: not found");
   1148       }
   1149       mid      = (lo + hi) / 2;
   1150       a_mid_lo = nsegments[mid].start;
   1151       a_mid_hi = nsegments[mid].end;
   1152 
   1153       if (a < a_mid_lo) { hi = mid-1; continue; }
   1154       if (a > a_mid_hi) { lo = mid+1; continue; }
   1155       aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
   1156       aspacem_assert(0 <= mid && mid < nsegments_used);
   1157       return mid;
   1158    }
   1159 }
   1160 
   1161 inline static Int find_nsegment_idx ( Addr a )
   1162 {
   1163 #  define N_CACHE 131 /*prime*/
   1164    static Addr cache_pageno[N_CACHE];
   1165    static Int  cache_segidx[N_CACHE];
   1166    static Bool cache_inited = False;
   1167 
   1168    static UWord n_q = 0;
   1169    static UWord n_m = 0;
   1170 
   1171    UWord ix;
   1172 
   1173    if (LIKELY(cache_inited)) {
   1174       /* do nothing */
   1175    } else {
   1176       for (ix = 0; ix < N_CACHE; ix++) {
   1177          cache_pageno[ix] = 0;
   1178          cache_segidx[ix] = -1;
   1179       }
   1180       cache_inited = True;
   1181    }
   1182 
   1183    ix = (a >> 12) % N_CACHE;
   1184 
   1185    n_q++;
   1186    if (0 && 0 == (n_q & 0xFFFF))
   1187       VG_(debugLog)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q, n_m);
   1188 
   1189    if ((a >> 12) == cache_pageno[ix]
   1190        && cache_segidx[ix] >= 0
   1191        && cache_segidx[ix] < nsegments_used
   1192        && nsegments[cache_segidx[ix]].start <= a
   1193        && a <= nsegments[cache_segidx[ix]].end) {
   1194       /* hit */
   1195       /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
   1196       return cache_segidx[ix];
   1197    }
   1198    /* miss */
   1199    n_m++;
   1200    cache_segidx[ix] = find_nsegment_idx_WRK(a);
   1201    cache_pageno[ix] = a >> 12;
   1202    return cache_segidx[ix];
   1203 #  undef N_CACHE
   1204 }
   1205 
   1206 
   1207 
   1208 /* Finds the segment containing 'a'.  Only returns file/anon/resvn
   1209    segments.  This returns a 'NSegment const *' - a pointer to
   1210    readonly data. */
   1211 NSegment const * VG_(am_find_nsegment) ( Addr a )
   1212 {
   1213    Int i = find_nsegment_idx(a);
   1214    aspacem_assert(i >= 0 && i < nsegments_used);
   1215    aspacem_assert(nsegments[i].start <= a);
   1216    aspacem_assert(a <= nsegments[i].end);
   1217    if (nsegments[i].kind == SkFree)
   1218       return NULL;
   1219    else
   1220       return &nsegments[i];
   1221 }
   1222 
   1223 
   1224 /* Given a pointer to a seg, tries to figure out which one it is in
   1225    nsegments[..].  Very paranoid. */
   1226 static Int segAddr_to_index ( NSegment* seg )
   1227 {
   1228    Int i;
   1229    if (seg < &nsegments[0] || seg >= &nsegments[nsegments_used])
   1230       return -1;
   1231    i = ((UChar*)seg - (UChar*)(&nsegments[0])) / sizeof(NSegment);
   1232    if (i < 0 || i >= nsegments_used)
   1233       return -1;
   1234    if (seg == &nsegments[i])
   1235       return i;
   1236    return -1;
   1237 }
   1238 
   1239 
   1240 /* Find the next segment along from 'here', if it is a file/anon/resvn
   1241    segment. */
   1242 NSegment const * VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
   1243 {
   1244    Int i = segAddr_to_index(here);
   1245    if (i < 0 || i >= nsegments_used)
   1246       return NULL;
   1247    if (fwds) {
   1248       i++;
   1249       if (i >= nsegments_used)
   1250          return NULL;
   1251    } else {
   1252       i--;
   1253       if (i < 0)
   1254          return NULL;
   1255    }
   1256    switch (nsegments[i].kind) {
   1257       case SkFileC: case SkFileV: case SkShmC:
   1258       case SkAnonC: case SkAnonV: case SkResvn:
   1259          return &nsegments[i];
   1260       default:
   1261          break;
   1262    }
   1263    return NULL;
   1264 }
   1265 
   1266 
   1267 /* Trivial fn: return the total amount of space in anonymous mappings,
   1268    both for V and the client.  Is used for printing stats in
   1269    out-of-memory messages. */
   1270 ULong VG_(am_get_anonsize_total)( void )
   1271 {
   1272    Int   i;
   1273    ULong total = 0;
   1274    for (i = 0; i < nsegments_used; i++) {
   1275       if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
   1276          total += (ULong)nsegments[i].end
   1277                   - (ULong)nsegments[i].start + 1ULL;
   1278       }
   1279    }
   1280    return total;
   1281 }
   1282 
   1283 
   1284 /* Test if a piece of memory is addressable by the client with at
   1285    least the "prot" protection permissions by examining the underlying
   1286    segments.  If freeOk is True then SkFree areas are also allowed.
   1287 */
   1288 static
   1289 Bool is_valid_for_client( Addr start, SizeT len, UInt prot, Bool freeOk )
   1290 {
   1291    Int  i, iLo, iHi;
   1292    Bool needR, needW, needX;
   1293 
   1294    if (len == 0)
   1295       return True; /* somewhat dubious case */
   1296    if (start + len < start)
   1297       return False; /* reject wraparounds */
   1298 
   1299    needR = toBool(prot & VKI_PROT_READ);
   1300    needW = toBool(prot & VKI_PROT_WRITE);
   1301    needX = toBool(prot & VKI_PROT_EXEC);
   1302 
   1303    iLo = find_nsegment_idx(start);
   1304    aspacem_assert(start >= nsegments[iLo].start);
   1305 
   1306    if (start+len-1 <= nsegments[iLo].end) {
   1307       /* This is a speedup hack which avoids calling find_nsegment_idx
   1308          a second time when possible.  It is always correct to just
   1309          use the "else" clause below, but is_valid_for_client is
   1310          called a lot by the leak checker, so avoiding pointless calls
   1311          to find_nsegment_idx, which can be expensive, is helpful. */
   1312       iHi = iLo;
   1313    } else {
   1314       iHi = find_nsegment_idx(start + len - 1);
   1315    }
   1316 
   1317    for (i = iLo; i <= iHi; i++) {
   1318       if ( (nsegments[i].kind == SkFileC
   1319             || nsegments[i].kind == SkAnonC
   1320             || nsegments[i].kind == SkShmC
   1321             || (nsegments[i].kind == SkFree  && freeOk)
   1322             || (nsegments[i].kind == SkResvn && freeOk))
   1323            && (needR ? nsegments[i].hasR : True)
   1324            && (needW ? nsegments[i].hasW : True)
   1325            && (needX ? nsegments[i].hasX : True) ) {
   1326          /* ok */
   1327       } else {
   1328          return False;
   1329       }
   1330    }
   1331    return True;
   1332 }
   1333 
   1334 /* Test if a piece of memory is addressable by the client with at
   1335    least the "prot" protection permissions by examining the underlying
   1336    segments. */
   1337 Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
   1338                                   UInt prot )
   1339 {
   1340    return is_valid_for_client( start, len, prot, False/*free not OK*/ );
   1341 }
   1342 
   1343 /* Variant of VG_(am_is_valid_for_client) which allows free areas to
   1344    be consider part of the client's addressable space.  It also
   1345    considers reservations to be allowable, since from the client's
   1346    point of view they don't exist. */
   1347 Bool VG_(am_is_valid_for_client_or_free_or_resvn)
   1348    ( Addr start, SizeT len, UInt prot )
   1349 {
   1350    return is_valid_for_client( start, len, prot, True/*free is OK*/ );
   1351 }
   1352 
   1353 
   1354 /* Test if a piece of memory is addressable by valgrind with at least
   1355    PROT_NONE protection permissions by examining the underlying
   1356    segments. */
   1357 static Bool is_valid_for_valgrind( Addr start, SizeT len )
   1358 {
   1359    Int  i, iLo, iHi;
   1360 
   1361    if (len == 0)
   1362       return True; /* somewhat dubious case */
   1363    if (start + len < start)
   1364       return False; /* reject wraparounds */
   1365 
   1366    iLo = find_nsegment_idx(start);
   1367    iHi = find_nsegment_idx(start + len - 1);
   1368    for (i = iLo; i <= iHi; i++) {
   1369       if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkAnonV) {
   1370          /* ok */
   1371       } else {
   1372          return False;
   1373       }
   1374    }
   1375    return True;
   1376 }
   1377 
   1378 
   1379 /* Returns True if any part of the address range is marked as having
   1380    translations made from it.  This is used to determine when to
   1381    discard code, so if in doubt return True. */
   1382 
   1383 static Bool any_Ts_in_range ( Addr start, SizeT len )
   1384 {
   1385    Int iLo, iHi, i;
   1386    aspacem_assert(len > 0);
   1387    aspacem_assert(start + len > start);
   1388    iLo = find_nsegment_idx(start);
   1389    iHi = find_nsegment_idx(start + len - 1);
   1390    for (i = iLo; i <= iHi; i++) {
   1391       if (nsegments[i].hasT)
   1392          return True;
   1393    }
   1394    return False;
   1395 }
   1396 
   1397 
   1398 /*-----------------------------------------------------------------*/
   1399 /*---                                                           ---*/
   1400 /*--- Modifying the segment array, and constructing segments.   ---*/
   1401 /*---                                                           ---*/
   1402 /*-----------------------------------------------------------------*/
   1403 
   1404 /* Split the segment containing 'a' into two, so that 'a' is
   1405    guaranteed to be the start of a new segment.  If 'a' is already the
   1406    start of a segment, do nothing. */
   1407 
   1408 static void split_nsegment_at ( Addr a )
   1409 {
   1410    Int i, j;
   1411 
   1412    aspacem_assert(a > 0);
   1413    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
   1414 
   1415    i = find_nsegment_idx(a);
   1416    aspacem_assert(i >= 0 && i < nsegments_used);
   1417 
   1418    if (nsegments[i].start == a)
   1419       /* 'a' is already the start point of a segment, so nothing to be
   1420          done. */
   1421       return;
   1422 
   1423    /* else we have to slide the segments upwards to make a hole */
   1424    if (nsegments_used >= VG_N_SEGMENTS)
   1425       ML_(am_barf_toolow)("VG_N_SEGMENTS");
   1426    for (j = nsegments_used-1; j > i; j--)
   1427       nsegments[j+1] = nsegments[j];
   1428    nsegments_used++;
   1429 
   1430    nsegments[i+1]       = nsegments[i];
   1431    nsegments[i+1].start = a;
   1432    nsegments[i].end     = a-1;
   1433 
   1434    if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
   1435       nsegments[i+1].offset
   1436          += ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
   1437 
   1438    aspacem_assert(sane_NSegment(&nsegments[i]));
   1439    aspacem_assert(sane_NSegment(&nsegments[i+1]));
   1440 }
   1441 
   1442 
   1443 /* Do the minimum amount of segment splitting necessary to ensure that
   1444    sLo is the first address denoted by some segment and sHi is the
   1445    highest address denoted by some other segment.  Returns the indices
   1446    of the lowest and highest segments in the range. */
   1447 
   1448 static
   1449 void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
   1450                                  /*OUT*/Int* iLo,
   1451                                  /*OUT*/Int* iHi )
   1452 {
   1453    aspacem_assert(sLo < sHi);
   1454    aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
   1455    aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
   1456 
   1457    if (sLo > 0)
   1458       split_nsegment_at(sLo);
   1459    if (sHi < sHi+1)
   1460       split_nsegment_at(sHi+1);
   1461 
   1462    *iLo = find_nsegment_idx(sLo);
   1463    *iHi = find_nsegment_idx(sHi);
   1464    aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
   1465    aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
   1466    aspacem_assert(*iLo <= *iHi);
   1467    aspacem_assert(nsegments[*iLo].start == sLo);
   1468    aspacem_assert(nsegments[*iHi].end == sHi);
   1469    /* Not that I'm overly paranoid or anything, definitely not :-) */
   1470 }
   1471 
   1472 
   1473 /* Add SEG to the collection, deleting/truncating any it overlaps.
   1474    This deals with all the tricky cases of splitting up segments as
   1475    needed. */
   1476 
   1477 static void add_segment ( NSegment* seg )
   1478 {
   1479    Int  i, iLo, iHi, delta;
   1480    Bool segment_is_sane;
   1481 
   1482    Addr sStart = seg->start;
   1483    Addr sEnd   = seg->end;
   1484 
   1485    aspacem_assert(sStart <= sEnd);
   1486    aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
   1487    aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
   1488 
   1489    segment_is_sane = sane_NSegment(seg);
   1490    if (!segment_is_sane) show_nsegment_full(0,-1,seg);
   1491    aspacem_assert(segment_is_sane);
   1492 
   1493    split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
   1494 
   1495    /* Now iLo .. iHi inclusive is the range of segment indices which
   1496       seg will replace.  If we're replacing more than one segment,
   1497       slide those above the range down to fill the hole. */
   1498    delta = iHi - iLo;
   1499    aspacem_assert(delta >= 0);
   1500    if (delta > 0) {
   1501       for (i = iLo; i < nsegments_used-delta; i++)
   1502          nsegments[i] = nsegments[i+delta];
   1503       nsegments_used -= delta;
   1504    }
   1505 
   1506    nsegments[iLo] = *seg;
   1507 
   1508    (void)preen_nsegments();
   1509    if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
   1510 }
   1511 
   1512 
   1513 /* Clear out an NSegment record. */
   1514 
   1515 static void init_nsegment ( /*OUT*/NSegment* seg )
   1516 {
   1517    seg->kind     = SkFree;
   1518    seg->start    = 0;
   1519    seg->end      = 0;
   1520    seg->smode    = SmFixed;
   1521    seg->dev      = 0;
   1522    seg->ino      = 0;
   1523    seg->mode     = 0;
   1524    seg->offset   = 0;
   1525    seg->fnIdx    = -1;
   1526    seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
   1527    seg->mark = False;
   1528 }
   1529 
   1530 /* Make an NSegment which holds a reservation. */
   1531 
   1532 static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
   1533 {
   1534    aspacem_assert(start < end);
   1535    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   1536    aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
   1537    init_nsegment(seg);
   1538    seg->kind  = SkResvn;
   1539    seg->start = start;
   1540    seg->end   = end;
   1541 }
   1542 
   1543 
   1544 /*-----------------------------------------------------------------*/
   1545 /*---                                                           ---*/
   1546 /*--- Startup, including reading /proc/self/maps.               ---*/
   1547 /*---                                                           ---*/
   1548 /*-----------------------------------------------------------------*/
   1549 
   1550 static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
   1551                                  ULong dev, ULong ino, Off64T offset,
   1552                                  const UChar* filename )
   1553 {
   1554    NSegment seg;
   1555    init_nsegment( &seg );
   1556    seg.start  = addr;
   1557    seg.end    = addr+len-1;
   1558    seg.dev    = dev;
   1559    seg.ino    = ino;
   1560    seg.offset = offset;
   1561    seg.hasR   = toBool(prot & VKI_PROT_READ);
   1562    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   1563    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   1564    seg.hasT   = False;
   1565 
   1566    /* Don't use the presence of a filename to decide if a segment in
   1567       the initial /proc/self/maps to decide if the segment is an AnonV
   1568       or FileV segment as some systems don't report the filename. Use
   1569       the device and inode numbers instead. Fixes bug #124528. */
   1570    seg.kind = SkAnonV;
   1571    if (dev != 0 && ino != 0)
   1572       seg.kind = SkFileV;
   1573 
   1574 #  if defined(VGO_darwin)
   1575    // GrP fixme no dev/ino on darwin
   1576    if (offset != 0)
   1577       seg.kind = SkFileV;
   1578 #  endif // defined(VGO_darwin)
   1579 
   1580 #  if defined(VGP_arm_linux)
   1581    /* The standard handling of entries read from /proc/self/maps will
   1582       cause the faked up commpage segment to have type SkAnonV, which
   1583       is a problem because it contains code we want the client to
   1584       execute, and so later m_translate will segfault the client when
   1585       it tries to go in there.  Hence change the ownership of it here
   1586       to the client (SkAnonC).  The least-worst kludge I could think
   1587       of. */
   1588    if (addr == ARM_LINUX_FAKE_COMMPAGE_START
   1589        && addr + len == ARM_LINUX_FAKE_COMMPAGE_END1
   1590        && seg.kind == SkAnonV)
   1591       seg.kind = SkAnonC;
   1592 #  endif // defined(VGP_arm_linux)
   1593 
   1594    if (filename)
   1595       seg.fnIdx = allocate_segname( filename );
   1596 
   1597    if (0) show_nsegment( 2,0, &seg );
   1598    add_segment( &seg );
   1599 }
   1600 
   1601 /* Initialise the address space manager, setting up the initial
   1602    segment list, and reading /proc/self/maps into it.  This must
   1603    be called before any other function.
   1604 
   1605    Takes a pointer to the SP at the time V gained control.  This is
   1606    taken to be the highest usable address (more or less).  Based on
   1607    that (and general consultation of tea leaves, etc) return a
   1608    suggested end address for the client's stack. */
   1609 
   1610 Addr VG_(am_startup) ( Addr sp_at_startup )
   1611 {
   1612    NSegment seg;
   1613    Addr     suggested_clstack_top;
   1614 
   1615    aspacem_assert(sizeof(Word)   == sizeof(void*));
   1616    aspacem_assert(sizeof(Addr)   == sizeof(void*));
   1617    aspacem_assert(sizeof(SizeT)  == sizeof(void*));
   1618    aspacem_assert(sizeof(SSizeT) == sizeof(void*));
   1619 
   1620    /* Check that we can store the largest imaginable dev, ino and
   1621       offset numbers in an NSegment. */
   1622    aspacem_assert(sizeof(seg.dev)    == 8);
   1623    aspacem_assert(sizeof(seg.ino)    == 8);
   1624    aspacem_assert(sizeof(seg.offset) == 8);
   1625    aspacem_assert(sizeof(seg.mode)   == 4);
   1626 
   1627    /* Add a single interval covering the entire address space. */
   1628    init_nsegment(&seg);
   1629    seg.kind        = SkFree;
   1630    seg.start       = Addr_MIN;
   1631    seg.end         = Addr_MAX;
   1632    nsegments[0]    = seg;
   1633    nsegments_used  = 1;
   1634 
   1635 #if defined(VGO_darwin)
   1636 
   1637 # if VG_WORDSIZE == 4
   1638    aspacem_minAddr = (Addr) 0x00001000;
   1639    aspacem_maxAddr = (Addr) 0xffffffff;
   1640 
   1641    aspacem_cStart = aspacem_minAddr;
   1642    aspacem_vStart = 0xf0000000;  // 0xc0000000..0xf0000000 available
   1643 # else
   1644    aspacem_minAddr = (Addr) 0x100000000;  // 4GB page zero
   1645    aspacem_maxAddr = (Addr) 0x7fffffffffff;
   1646 
   1647    aspacem_cStart = aspacem_minAddr;
   1648    aspacem_vStart = 0x700000000000; // 0x7000:00000000..0x7fff:5c000000 avail
   1649    // 0x7fff:5c000000..0x7fff:ffe00000? is stack, dyld, shared cache
   1650 # endif
   1651 
   1652    suggested_clstack_top = -1; // ignored; Mach-O specifies its stack
   1653 
   1654 #else
   1655 
   1656    /* Establish address limits and block out unusable parts
   1657       accordingly. */
   1658 
   1659    VG_(debugLog)(2, "aspacem",
   1660                     "        sp_at_startup = 0x%010llx (supplied)\n",
   1661                     (ULong)sp_at_startup );
   1662 
   1663    aspacem_minAddr = (Addr) 0x04000000; // 64M
   1664 
   1665 #  if VG_WORDSIZE == 8
   1666      aspacem_maxAddr = (Addr)0x8000000000 - 1; // 512G
   1667 #    ifdef ENABLE_INNER
   1668      { Addr cse = VG_PGROUNDDN( sp_at_startup ) - 1;
   1669        if (aspacem_maxAddr > cse)
   1670           aspacem_maxAddr = cse;
   1671      }
   1672 #    endif
   1673 #  else
   1674      aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
   1675 #  endif
   1676 
   1677    aspacem_cStart = aspacem_minAddr; // 64M
   1678    aspacem_vStart = VG_PGROUNDUP((aspacem_minAddr + aspacem_maxAddr + 1) / 2);
   1679 #  ifdef ENABLE_INNER
   1680    aspacem_vStart -= 0x10000000; // 256M
   1681 #  endif
   1682 
   1683    suggested_clstack_top = aspacem_maxAddr - 16*1024*1024ULL
   1684                                            + VKI_PAGE_SIZE;
   1685 
   1686 #endif
   1687 
   1688    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
   1689    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
   1690    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
   1691    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
   1692    aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_top + 1));
   1693 
   1694    VG_(debugLog)(2, "aspacem",
   1695                     "              minAddr = 0x%010llx (computed)\n",
   1696                     (ULong)aspacem_minAddr);
   1697    VG_(debugLog)(2, "aspacem",
   1698                     "              maxAddr = 0x%010llx (computed)\n",
   1699                     (ULong)aspacem_maxAddr);
   1700    VG_(debugLog)(2, "aspacem",
   1701                     "               cStart = 0x%010llx (computed)\n",
   1702                     (ULong)aspacem_cStart);
   1703    VG_(debugLog)(2, "aspacem",
   1704                     "               vStart = 0x%010llx (computed)\n",
   1705                     (ULong)aspacem_vStart);
   1706    VG_(debugLog)(2, "aspacem",
   1707                     "suggested_clstack_top = 0x%010llx (computed)\n",
   1708                     (ULong)suggested_clstack_top);
   1709 
   1710    if (aspacem_cStart > Addr_MIN) {
   1711       init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
   1712       add_segment(&seg);
   1713    }
   1714    if (aspacem_maxAddr < Addr_MAX) {
   1715       init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
   1716       add_segment(&seg);
   1717    }
   1718 
   1719    /* Create a 1-page reservation at the notional initial
   1720       client/valgrind boundary.  This isn't strictly necessary, but
   1721       because the advisor does first-fit and starts searches for
   1722       valgrind allocations at the boundary, this is kind of necessary
   1723       in order to get it to start allocating in the right place. */
   1724    init_resvn(&seg, aspacem_vStart,  aspacem_vStart + VKI_PAGE_SIZE - 1);
   1725    add_segment(&seg);
   1726 
   1727    VG_(am_show_nsegments)(2, "Initial layout");
   1728 
   1729    VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
   1730    parse_procselfmaps( read_maps_callback, NULL );
   1731    /* NB: on arm-linux, parse_procselfmaps automagically kludges up
   1732       (iow, hands to its callbacks) a description of the ARM Commpage,
   1733       since that's not listed in /proc/self/maps (kernel bug IMO).  We
   1734       have to fake up its existence in parse_procselfmaps and not
   1735       merely add it here as an extra segment, because doing the latter
   1736       causes sync checking to fail: we see we have an extra segment in
   1737       the segments array, which isn't listed in /proc/self/maps.
   1738       Hence we must make it appear that /proc/self/maps contained this
   1739       segment all along.  Sigh. */
   1740 
   1741    VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
   1742 
   1743    AM_SANITY_CHECK;
   1744    return suggested_clstack_top;
   1745 }
   1746 
   1747 
   1748 /*-----------------------------------------------------------------*/
   1749 /*---                                                           ---*/
   1750 /*--- The core query-notify mechanism.                          ---*/
   1751 /*---                                                           ---*/
   1752 /*-----------------------------------------------------------------*/
   1753 
   1754 /* Query aspacem to ask where a mapping should go. */
   1755 
   1756 Addr VG_(am_get_advisory) ( MapRequest*  req,
   1757                             Bool         forClient,
   1758                             /*OUT*/Bool* ok )
   1759 {
   1760    /* This function implements allocation policy.
   1761 
   1762       The nature of the allocation request is determined by req, which
   1763       specifies the start and length of the request and indicates
   1764       whether the start address is mandatory, a hint, or irrelevant,
   1765       and by forClient, which says whether this is for the client or
   1766       for V.
   1767 
   1768       Return values: the request can be vetoed (*ok is set to False),
   1769       in which case the caller should not attempt to proceed with
   1770       making the mapping.  Otherwise, *ok is set to True, the caller
   1771       may proceed, and the preferred address at which the mapping
   1772       should happen is returned.
   1773 
   1774       Note that this is an advisory system only: the kernel can in
   1775       fact do whatever it likes as far as placement goes, and we have
   1776       no absolute control over it.
   1777 
   1778       Allocations will never be granted in a reserved area.
   1779 
   1780       The Default Policy is:
   1781 
   1782         Search the address space for two free intervals: one of them
   1783         big enough to contain the request without regard to the
   1784         specified address (viz, as if it was a floating request) and
   1785         the other being able to contain the request at the specified
   1786         address (viz, as if were a fixed request).  Then, depending on
   1787         the outcome of the search and the kind of request made, decide
   1788         whether the request is allowable and what address to advise.
   1789 
   1790       The Default Policy is overriden by Policy Exception #1:
   1791 
   1792         If the request is for a fixed client map, we are prepared to
   1793         grant it providing all areas inside the request are either
   1794         free, reservations, or mappings belonging to the client.  In
   1795         other words we are prepared to let the client trash its own
   1796         mappings if it wants to.
   1797 
   1798       The Default Policy is overriden by Policy Exception #2:
   1799 
   1800         If the request is for a hinted client map, we are prepared to
   1801         grant it providing all areas inside the request are either
   1802         free or reservations.  In other words we are prepared to let
   1803         the client have a hinted mapping anywhere it likes provided
   1804         it does not trash either any of its own mappings or any of
   1805         valgrind's mappings.
   1806    */
   1807    Int  i, j;
   1808    Addr holeStart, holeEnd, holeLen;
   1809    Bool fixed_not_required;
   1810 
   1811    Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
   1812 
   1813    Addr reqStart = req->rkind==MAny ? 0 : req->start;
   1814    Addr reqEnd   = reqStart + req->len - 1;
   1815    Addr reqLen   = req->len;
   1816 
   1817    /* These hold indices for segments found during search, or -1 if not
   1818       found. */
   1819    Int floatIdx = -1;
   1820    Int fixedIdx = -1;
   1821 
   1822    aspacem_assert(nsegments_used > 0);
   1823 
   1824    if (0) {
   1825       VG_(am_show_nsegments)(0,"getAdvisory");
   1826       VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
   1827                       (ULong)req->start, (ULong)req->len);
   1828    }
   1829 
   1830    /* Reject zero-length requests */
   1831    if (req->len == 0) {
   1832       *ok = False;
   1833       return 0;
   1834    }
   1835 
   1836    /* Reject wraparounds */
   1837    if ((req->rkind==MFixed || req->rkind==MHint)
   1838        && req->start + req->len < req->start) {
   1839       *ok = False;
   1840       return 0;
   1841    }
   1842 
   1843    /* ------ Implement Policy Exception #1 ------ */
   1844 
   1845    if (forClient && req->rkind == MFixed) {
   1846       Int  iLo   = find_nsegment_idx(reqStart);
   1847       Int  iHi   = find_nsegment_idx(reqEnd);
   1848       Bool allow = True;
   1849       for (i = iLo; i <= iHi; i++) {
   1850          if (nsegments[i].kind == SkFree
   1851              || nsegments[i].kind == SkFileC
   1852              || nsegments[i].kind == SkAnonC
   1853              || nsegments[i].kind == SkShmC
   1854              || nsegments[i].kind == SkResvn) {
   1855             /* ok */
   1856          } else {
   1857             allow = False;
   1858             break;
   1859          }
   1860       }
   1861       if (allow) {
   1862          /* Acceptable.  Granted. */
   1863          *ok = True;
   1864          return reqStart;
   1865       }
   1866       /* Not acceptable.  Fail. */
   1867       *ok = False;
   1868       return 0;
   1869    }
   1870 
   1871    /* ------ Implement Policy Exception #2 ------ */
   1872 
   1873    if (forClient && req->rkind == MHint) {
   1874       Int  iLo   = find_nsegment_idx(reqStart);
   1875       Int  iHi   = find_nsegment_idx(reqEnd);
   1876       Bool allow = True;
   1877       for (i = iLo; i <= iHi; i++) {
   1878          if (nsegments[i].kind == SkFree
   1879              || nsegments[i].kind == SkResvn) {
   1880             /* ok */
   1881          } else {
   1882             allow = False;
   1883             break;
   1884          }
   1885       }
   1886       if (allow) {
   1887          /* Acceptable.  Granted. */
   1888          *ok = True;
   1889          return reqStart;
   1890       }
   1891       /* Not acceptable.  Fall through to the default policy. */
   1892    }
   1893 
   1894    /* ------ Implement the Default Policy ------ */
   1895 
   1896    /* Don't waste time looking for a fixed match if not requested to. */
   1897    fixed_not_required = req->rkind == MAny;
   1898 
   1899    i = find_nsegment_idx(startPoint);
   1900 
   1901    /* Examine holes from index i back round to i-1.  Record the
   1902       index first fixed hole and the first floating hole which would
   1903       satisfy the request. */
   1904    for (j = 0; j < nsegments_used; j++) {
   1905 
   1906       if (nsegments[i].kind != SkFree) {
   1907          i++;
   1908          if (i >= nsegments_used) i = 0;
   1909          continue;
   1910       }
   1911 
   1912       holeStart = nsegments[i].start;
   1913       holeEnd   = nsegments[i].end;
   1914 
   1915       /* Stay sane .. */
   1916       aspacem_assert(holeStart <= holeEnd);
   1917       aspacem_assert(aspacem_minAddr <= holeStart);
   1918       aspacem_assert(holeEnd <= aspacem_maxAddr);
   1919 
   1920       /* See if it's any use to us. */
   1921       holeLen = holeEnd - holeStart + 1;
   1922 
   1923       if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
   1924          fixedIdx = i;
   1925 
   1926       if (floatIdx == -1 && holeLen >= reqLen)
   1927          floatIdx = i;
   1928 
   1929       /* Don't waste time searching once we've found what we wanted. */
   1930       if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
   1931          break;
   1932 
   1933       i++;
   1934       if (i >= nsegments_used) i = 0;
   1935    }
   1936 
   1937    aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
   1938    if (fixedIdx >= 0)
   1939       aspacem_assert(nsegments[fixedIdx].kind == SkFree);
   1940 
   1941    aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
   1942    if (floatIdx >= 0)
   1943       aspacem_assert(nsegments[floatIdx].kind == SkFree);
   1944 
   1945    AM_SANITY_CHECK;
   1946 
   1947    /* Now see if we found anything which can satisfy the request. */
   1948    switch (req->rkind) {
   1949       case MFixed:
   1950          if (fixedIdx >= 0) {
   1951             *ok = True;
   1952             return req->start;
   1953          } else {
   1954             *ok = False;
   1955             return 0;
   1956          }
   1957          break;
   1958       case MHint:
   1959          if (fixedIdx >= 0) {
   1960             *ok = True;
   1961             return req->start;
   1962          }
   1963          if (floatIdx >= 0) {
   1964             *ok = True;
   1965             return nsegments[floatIdx].start;
   1966          }
   1967          *ok = False;
   1968          return 0;
   1969       case MAny:
   1970          if (floatIdx >= 0) {
   1971             *ok = True;
   1972             return nsegments[floatIdx].start;
   1973          }
   1974          *ok = False;
   1975          return 0;
   1976       default:
   1977          break;
   1978    }
   1979 
   1980    /*NOTREACHED*/
   1981    ML_(am_barf)("getAdvisory: unknown request kind");
   1982    *ok = False;
   1983    return 0;
   1984 }
   1985 
   1986 /* Convenience wrapper for VG_(am_get_advisory) for client floating or
   1987    fixed requests.  If start is zero, a floating request is issued; if
   1988    nonzero, a fixed request at that address is issued.  Same comments
   1989    about return values apply. */
   1990 
   1991 Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
   1992                                           /*OUT*/Bool* ok )
   1993 {
   1994    MapRequest mreq;
   1995    mreq.rkind = start==0 ? MAny : MFixed;
   1996    mreq.start = start;
   1997    mreq.len   = len;
   1998    return VG_(am_get_advisory)( &mreq, True/*client*/, ok );
   1999 }
   2000 
   2001 
   2002 /* Notifies aspacem that the client completed an mmap successfully.
   2003    The segment array is updated accordingly.  If the returned Bool is
   2004    True, the caller should immediately discard translations from the
   2005    specified address range. */
   2006 
   2007 Bool
   2008 VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
   2009                             Int fd, Off64T offset )
   2010 {
   2011    HChar    buf[VKI_PATH_MAX];
   2012    ULong    dev, ino;
   2013    UInt     mode;
   2014    NSegment seg;
   2015    Bool     needDiscard;
   2016 
   2017    aspacem_assert(len > 0);
   2018    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
   2019    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2020    aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
   2021 
   2022    /* Discard is needed if any of the just-trashed range had T. */
   2023    needDiscard = any_Ts_in_range( a, len );
   2024 
   2025    init_nsegment( &seg );
   2026    seg.kind   = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
   2027    seg.start  = a;
   2028    seg.end    = a + len - 1;
   2029    seg.hasR   = toBool(prot & VKI_PROT_READ);
   2030    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   2031    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   2032    if (!(flags & VKI_MAP_ANONYMOUS)) {
   2033       // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
   2034       seg.offset = offset;
   2035       if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
   2036          seg.dev = dev;
   2037          seg.ino = ino;
   2038          seg.mode = mode;
   2039       }
   2040       if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
   2041          seg.fnIdx = allocate_segname( buf );
   2042       }
   2043    }
   2044    add_segment( &seg );
   2045    AM_SANITY_CHECK;
   2046    return needDiscard;
   2047 }
   2048 
   2049 Bool
   2050 VG_(am_notify_fake_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
   2051                             HChar* fileName, Off64T offset )
   2052 {
   2053    HChar    buf[VKI_PATH_MAX];
   2054    ULong    dev, ino;
   2055    UInt     mode;
   2056    NSegment seg;
   2057    Bool     needDiscard;
   2058 
   2059    aspacem_assert(len > 0);
   2060    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
   2061    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2062    aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
   2063 
   2064    /* Discard is needed if any of the just-trashed range had T. */
   2065    needDiscard = any_Ts_in_range( a, len );
   2066 
   2067    init_nsegment( &seg );
   2068    seg.kind   = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
   2069    seg.start  = a;
   2070    seg.end    = a + len - 1;
   2071    seg.hasR   = toBool(prot & VKI_PROT_READ);
   2072    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   2073    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   2074    if (!(flags & VKI_MAP_ANONYMOUS)) {
   2075       // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
   2076       seg.offset = offset;
   2077       seg.fnIdx = allocate_segname( fileName );
   2078    }
   2079    add_segment( &seg );
   2080    AM_SANITY_CHECK;
   2081    return needDiscard;
   2082 }
   2083 
   2084 /* Notifies aspacem that the client completed a shmat successfully.
   2085    The segment array is updated accordingly.  If the returned Bool is
   2086    True, the caller should immediately discard translations from the
   2087    specified address range. */
   2088 
   2089 Bool
   2090 VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
   2091 {
   2092    NSegment seg;
   2093    Bool     needDiscard;
   2094 
   2095    aspacem_assert(len > 0);
   2096    aspacem_assert(VG_IS_PAGE_ALIGNED(a));
   2097    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2098 
   2099    /* Discard is needed if any of the just-trashed range had T. */
   2100    needDiscard = any_Ts_in_range( a, len );
   2101 
   2102    init_nsegment( &seg );
   2103    seg.kind   = SkShmC;
   2104    seg.start  = a;
   2105    seg.end    = a + len - 1;
   2106    seg.offset = 0;
   2107    seg.hasR   = toBool(prot & VKI_PROT_READ);
   2108    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   2109    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   2110    add_segment( &seg );
   2111    AM_SANITY_CHECK;
   2112    return needDiscard;
   2113 }
   2114 
   2115 /* Notifies aspacem that an mprotect was completed successfully.  The
   2116    segment array is updated accordingly.  Note, as with
   2117    VG_(am_notify_munmap), it is not the job of this function to reject
   2118    stupid mprotects, for example the client doing mprotect of
   2119    non-client areas.  Such requests should be intercepted earlier, by
   2120    the syscall wrapper for mprotect.  This function merely records
   2121    whatever it is told.  If the returned Bool is True, the caller
   2122    should immediately discard translations from the specified address
   2123    range. */
   2124 
   2125 Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
   2126 {
   2127    Int  i, iLo, iHi;
   2128    Bool newR, newW, newX, needDiscard;
   2129 
   2130    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   2131    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2132 
   2133    if (len == 0)
   2134       return False;
   2135 
   2136    newR = toBool(prot & VKI_PROT_READ);
   2137    newW = toBool(prot & VKI_PROT_WRITE);
   2138    newX = toBool(prot & VKI_PROT_EXEC);
   2139 
   2140    /* Discard is needed if we're dumping X permission */
   2141    needDiscard = any_Ts_in_range( start, len ) && !newX;
   2142 
   2143    split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
   2144 
   2145    iLo = find_nsegment_idx(start);
   2146    iHi = find_nsegment_idx(start + len - 1);
   2147 
   2148    for (i = iLo; i <= iHi; i++) {
   2149       /* Apply the permissions to all relevant segments. */
   2150       switch (nsegments[i].kind) {
   2151          case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
   2152             nsegments[i].hasR = newR;
   2153             nsegments[i].hasW = newW;
   2154             nsegments[i].hasX = newX;
   2155             aspacem_assert(sane_NSegment(&nsegments[i]));
   2156             break;
   2157          default:
   2158             break;
   2159       }
   2160    }
   2161 
   2162    /* Changing permissions could have made previously un-mergable
   2163       segments mergeable.  Therefore have to re-preen them. */
   2164    (void)preen_nsegments();
   2165    AM_SANITY_CHECK;
   2166    return needDiscard;
   2167 }
   2168 
   2169 
   2170 /* Notifies aspacem that an munmap completed successfully.  The
   2171    segment array is updated accordingly.  As with
   2172    VG_(am_notify_munmap), we merely record the given info, and don't
   2173    check it for sensibleness.  If the returned Bool is True, the
   2174    caller should immediately discard translations from the specified
   2175    address range. */
   2176 
   2177 Bool VG_(am_notify_munmap)( Addr start, SizeT len )
   2178 {
   2179    NSegment seg;
   2180    Bool     needDiscard;
   2181    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   2182    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2183 
   2184    if (len == 0)
   2185       return False;
   2186 
   2187    needDiscard = any_Ts_in_range( start, len );
   2188 
   2189    init_nsegment( &seg );
   2190    seg.start = start;
   2191    seg.end   = start + len - 1;
   2192 
   2193    /* The segment becomes unused (free).  Segments from above
   2194       aspacem_maxAddr were originally SkResvn and so we make them so
   2195       again.  Note, this isn't really right when the segment straddles
   2196       the aspacem_maxAddr boundary - then really it should be split in
   2197       two, the lower part marked as SkFree and the upper part as
   2198       SkResvn.  Ah well. */
   2199    if (start > aspacem_maxAddr
   2200        && /* check previous comparison is meaningful */
   2201           aspacem_maxAddr < Addr_MAX)
   2202       seg.kind = SkResvn;
   2203    else
   2204    /* Ditto for segments from below aspacem_minAddr. */
   2205    if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
   2206       seg.kind = SkResvn;
   2207    else
   2208       seg.kind = SkFree;
   2209 
   2210    add_segment( &seg );
   2211 
   2212    /* Unmapping could create two adjacent free segments, so a preen is
   2213       needed.  add_segment() will do that, so no need to here. */
   2214    AM_SANITY_CHECK;
   2215    return needDiscard;
   2216 }
   2217 
   2218 
   2219 /*-----------------------------------------------------------------*/
   2220 /*---                                                           ---*/
   2221 /*--- Handling mappings which do not arise directly from the    ---*/
   2222 /*--- simulation of the client.                                 ---*/
   2223 /*---                                                           ---*/
   2224 /*-----------------------------------------------------------------*/
   2225 
   2226 /* --- --- --- map, unmap, protect  --- --- --- */
   2227 
   2228 /* Map a file at a fixed address for the client, and update the
   2229    segment array accordingly. */
   2230 
   2231 SysRes VG_(am_mmap_file_fixed_client)
   2232      ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
   2233 {
   2234    return VG_(am_mmap_named_file_fixed_client)(start, length, prot, fd, offset, NULL);
   2235 }
   2236 
   2237 SysRes VG_(am_mmap_named_file_fixed_client)
   2238      ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset, const HChar *name )
   2239 {
   2240    SysRes     sres;
   2241    NSegment   seg;
   2242    Addr       advised;
   2243    Bool       ok;
   2244    MapRequest req;
   2245    ULong      dev, ino;
   2246    UInt       mode;
   2247    HChar      buf[VKI_PATH_MAX];
   2248 
   2249    /* Not allowable. */
   2250    if (length == 0
   2251        || !VG_IS_PAGE_ALIGNED(start)
   2252        || !VG_IS_PAGE_ALIGNED(offset))
   2253       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2254 
   2255    /* Ask for an advisory.  If it's negative, fail immediately. */
   2256    req.rkind = MFixed;
   2257    req.start = start;
   2258    req.len   = length;
   2259    advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
   2260    if (!ok || advised != start)
   2261       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2262 
   2263    /* We have been advised that the mapping is allowable at the
   2264       specified address.  So hand it off to the kernel, and propagate
   2265       any resulting failure immediately. */
   2266    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2267    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2268              start, length, prot,
   2269              VKI_MAP_FIXED|VKI_MAP_PRIVATE,
   2270              fd, offset
   2271           );
   2272    if (sr_isError(sres))
   2273       return sres;
   2274 
   2275    if (sr_Res(sres) != start) {
   2276       /* I don't think this can happen.  It means the kernel made a
   2277          fixed map succeed but not at the requested location.  Try to
   2278          repair the damage, then return saying the mapping failed. */
   2279       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2280       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2281    }
   2282 
   2283    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2284    init_nsegment( &seg );
   2285    seg.kind   = SkFileC;
   2286    seg.start  = start;
   2287    seg.end    = seg.start + VG_PGROUNDUP(length) - 1;
   2288    seg.offset = offset;
   2289    seg.hasR   = toBool(prot & VKI_PROT_READ);
   2290    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   2291    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   2292    if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
   2293       seg.dev = dev;
   2294       seg.ino = ino;
   2295       seg.mode = mode;
   2296    }
   2297    if (name) {
   2298       seg.fnIdx = allocate_segname( name );
   2299    } else if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
   2300       seg.fnIdx = allocate_segname( buf );
   2301    }
   2302    add_segment( &seg );
   2303 
   2304    AM_SANITY_CHECK;
   2305    return sres;
   2306 }
   2307 
   2308 
   2309 /* Map anonymously at a fixed address for the client, and update
   2310    the segment array accordingly. */
   2311 
   2312 SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
   2313 {
   2314    SysRes     sres;
   2315    NSegment   seg;
   2316    Addr       advised;
   2317    Bool       ok;
   2318    MapRequest req;
   2319 
   2320    /* Not allowable. */
   2321    if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
   2322       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2323 
   2324    /* Ask for an advisory.  If it's negative, fail immediately. */
   2325    req.rkind = MFixed;
   2326    req.start = start;
   2327    req.len   = length;
   2328    advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
   2329    if (!ok || advised != start)
   2330       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2331 
   2332    /* We have been advised that the mapping is allowable at the
   2333       specified address.  So hand it off to the kernel, and propagate
   2334       any resulting failure immediately. */
   2335    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2336    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2337              start, length, prot,
   2338              VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2339              0, 0
   2340           );
   2341    if (sr_isError(sres))
   2342       return sres;
   2343 
   2344    if (sr_Res(sres) != start) {
   2345       /* I don't think this can happen.  It means the kernel made a
   2346          fixed map succeed but not at the requested location.  Try to
   2347          repair the damage, then return saying the mapping failed. */
   2348       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2349       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2350    }
   2351 
   2352    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2353    init_nsegment( &seg );
   2354    seg.kind  = SkAnonC;
   2355    seg.start = start;
   2356    seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
   2357    seg.hasR  = toBool(prot & VKI_PROT_READ);
   2358    seg.hasW  = toBool(prot & VKI_PROT_WRITE);
   2359    seg.hasX  = toBool(prot & VKI_PROT_EXEC);
   2360    add_segment( &seg );
   2361 
   2362    AM_SANITY_CHECK;
   2363    return sres;
   2364 }
   2365 
   2366 
   2367 /* Map anonymously at an unconstrained address for the client, and
   2368    update the segment array accordingly.  */
   2369 
   2370 SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
   2371 {
   2372    SysRes     sres;
   2373    NSegment   seg;
   2374    Addr       advised;
   2375    Bool       ok;
   2376    MapRequest req;
   2377 
   2378    /* Not allowable. */
   2379    if (length == 0)
   2380       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2381 
   2382    /* Ask for an advisory.  If it's negative, fail immediately. */
   2383    req.rkind = MAny;
   2384    req.start = 0;
   2385    req.len   = length;
   2386    advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
   2387    if (!ok)
   2388       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2389 
   2390    /* We have been advised that the mapping is allowable at the
   2391       advised address.  So hand it off to the kernel, and propagate
   2392       any resulting failure immediately. */
   2393    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2394    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2395              advised, length, prot,
   2396              VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2397              0, 0
   2398           );
   2399    if (sr_isError(sres))
   2400       return sres;
   2401 
   2402    if (sr_Res(sres) != advised) {
   2403       /* I don't think this can happen.  It means the kernel made a
   2404          fixed map succeed but not at the requested location.  Try to
   2405          repair the damage, then return saying the mapping failed. */
   2406       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2407       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2408    }
   2409 
   2410    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2411    init_nsegment( &seg );
   2412    seg.kind  = SkAnonC;
   2413    seg.start = advised;
   2414    seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
   2415    seg.hasR  = toBool(prot & VKI_PROT_READ);
   2416    seg.hasW  = toBool(prot & VKI_PROT_WRITE);
   2417    seg.hasX  = toBool(prot & VKI_PROT_EXEC);
   2418    add_segment( &seg );
   2419 
   2420    AM_SANITY_CHECK;
   2421    return sres;
   2422 }
   2423 
   2424 
   2425 /* Similarly, acquire new address space for the client but with
   2426    considerable restrictions on what can be done with it: (1) the
   2427    actual protections may exceed those stated in 'prot', (2) the
   2428    area's protections cannot be later changed using any form of
   2429    mprotect, and (3) the area cannot be freed using any form of
   2430    munmap.  On Linux this behaves the same as
   2431    VG_(am_mmap_anon_float_client).  On AIX5 this *may* allocate memory
   2432    by using sbrk, so as to make use of large pages on AIX. */
   2433 
   2434 SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot )
   2435 {
   2436    return VG_(am_mmap_anon_float_client) ( length, prot );
   2437 }
   2438 
   2439 
   2440 /* Map anonymously at an unconstrained address for V, and update the
   2441    segment array accordingly.  This is fundamentally how V allocates
   2442    itself more address space when needed. */
   2443 
   2444 SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
   2445 {
   2446    SysRes     sres;
   2447    NSegment   seg;
   2448    Addr       advised;
   2449    Bool       ok;
   2450    MapRequest req;
   2451 
   2452    /* Not allowable. */
   2453    if (length == 0)
   2454       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2455 
   2456    /* Ask for an advisory.  If it's negative, fail immediately. */
   2457    req.rkind = MAny;
   2458    req.start = 0;
   2459    req.len   = length;
   2460    advised = VG_(am_get_advisory)( &req, False/*valgrind*/, &ok );
   2461    if (!ok)
   2462       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2463 
   2464 // On Darwin, for anonymous maps you can pass in a tag which is used by
   2465 // programs like vmmap for statistical purposes.
   2466 #ifndef VM_TAG_VALGRIND
   2467 #  define VM_TAG_VALGRIND 0
   2468 #endif
   2469 
   2470    /* We have been advised that the mapping is allowable at the
   2471       specified address.  So hand it off to the kernel, and propagate
   2472       any resulting failure immediately. */
   2473    /* GrP fixme darwin: use advisory as a hint only, otherwise syscall in
   2474       another thread can pre-empt our spot.  [At one point on the DARWIN
   2475       branch the VKI_MAP_FIXED was commented out;  unclear if this is
   2476       necessary or not given the second Darwin-only call that immediately
   2477       follows if this one fails.  --njn] */
   2478    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2479              advised, length,
   2480              VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
   2481              VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2482              VM_TAG_VALGRIND, 0
   2483           );
   2484 #if defined(VGO_darwin)
   2485    if (sr_isError(sres)) {
   2486        /* try again, ignoring the advisory */
   2487        sres = VG_(am_do_mmap_NO_NOTIFY)(
   2488              0, length,
   2489              VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
   2490              /*VKI_MAP_FIXED|*/VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2491              VM_TAG_VALGRIND, 0
   2492           );
   2493    }
   2494 #endif
   2495    if (sr_isError(sres))
   2496       return sres;
   2497 
   2498 #if defined(VGO_linux)
   2499    if (sr_Res(sres) != advised) {
   2500       /* I don't think this can happen.  It means the kernel made a
   2501          fixed map succeed but not at the requested location.  Try to
   2502          repair the damage, then return saying the mapping failed. */
   2503       (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2504       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2505    }
   2506 #endif
   2507 
   2508    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2509    init_nsegment( &seg );
   2510    seg.kind  = SkAnonV;
   2511    seg.start = sr_Res(sres);
   2512    seg.end   = seg.start + VG_PGROUNDUP(length) - 1;
   2513    seg.hasR  = True;
   2514    seg.hasW  = True;
   2515    seg.hasX  = True;
   2516    add_segment( &seg );
   2517 
   2518    AM_SANITY_CHECK;
   2519    return sres;
   2520 }
   2521 
   2522 /* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
   2523 
   2524 void* VG_(am_shadow_alloc)(SizeT size)
   2525 {
   2526    SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
   2527    return sr_isError(sres) ? NULL : (void*)sr_Res(sres);
   2528 }
   2529 
   2530 /* Same comments apply as per VG_(am_sbrk_anon_float_client).  On
   2531    Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
   2532 
   2533 SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT cszB )
   2534 {
   2535    return VG_(am_mmap_anon_float_valgrind)( cszB );
   2536 }
   2537 
   2538 
   2539 /* Map a file at an unconstrained address for V, and update the
   2540    segment array accordingly.  This is used by V for transiently
   2541    segment array accordingly. Use the provided flags */
   2542 
   2543 SysRes VG_(am_mmap_file_float_valgrind_flags) ( SizeT length, UInt prot,
   2544                                                 UInt flags,
   2545                                                 Int fd, Off64T offset )
   2546 {
   2547    SysRes     sres;
   2548    NSegment   seg;
   2549    Addr       advised;
   2550    Bool       ok;
   2551    MapRequest req;
   2552    ULong      dev, ino;
   2553    UInt       mode;
   2554    HChar      buf[VKI_PATH_MAX];
   2555 
   2556    /* Not allowable. */
   2557    if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
   2558       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2559 
   2560    /* Ask for an advisory.  If it's negative, fail immediately. */
   2561    req.rkind = MAny;
   2562    req.start = 0;
   2563    req.len   = length;
   2564    advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
   2565    if (!ok)
   2566       return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2567 
   2568    /* We have been advised that the mapping is allowable at the
   2569       specified address.  So hand it off to the kernel, and propagate
   2570       any resulting failure immediately. */
   2571    sres = VG_(am_do_mmap_NO_NOTIFY)(
   2572              advised, length, prot,
   2573              flags,
   2574              fd, offset
   2575           );
   2576    if (sr_isError(sres))
   2577       return sres;
   2578 
   2579    if (sr_Res(sres) != advised) {
   2580       /* I don't think this can happen.  It means the kernel made a
   2581          fixed map succeed but not at the requested location.  Try to
   2582          repair the damage, then return saying the mapping failed. */
   2583        /*TODO(kcc): it apprers this may actually happen if allocating
   2584         in hugetlbfs. No idea why. */
   2585 //      (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
   2586 //      return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2587    }
   2588 
   2589    /* Ok, the mapping succeeded.  Now notify the interval map. */
   2590    init_nsegment( &seg );
   2591    seg.kind   = SkFileV;
   2592    seg.start  = sr_Res(sres);
   2593    seg.end    = seg.start + VG_PGROUNDUP(length) - 1;
   2594    seg.offset = offset;
   2595    seg.hasR   = toBool(prot & VKI_PROT_READ);
   2596    seg.hasW   = toBool(prot & VKI_PROT_WRITE);
   2597    seg.hasX   = toBool(prot & VKI_PROT_EXEC);
   2598    if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
   2599       seg.dev  = dev;
   2600       seg.ino  = ino;
   2601       seg.mode = mode;
   2602    }
   2603    if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
   2604       seg.fnIdx = allocate_segname( buf );
   2605    }
   2606    add_segment( &seg );
   2607 
   2608    AM_SANITY_CHECK;
   2609    return sres;
   2610 }
   2611 
   2612 /* Map privately a file at an unconstrained address for V, and update the
   2613    segment array accordingly.  This is used by V for transiently
   2614    mapping in object files to read their debug info.  */
   2615 
   2616 SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
   2617                                           Int fd, Off64T offset )
   2618 {
   2619    return VG_(am_mmap_file_float_valgrind_flags) (length, prot,
   2620                                                   VKI_MAP_FIXED|VKI_MAP_PRIVATE,
   2621                                                   fd, offset );
   2622 }
   2623 
   2624 extern SysRes VG_(am_shared_mmap_file_float_valgrind)
   2625    ( SizeT length, UInt prot, Int fd, Off64T offset )
   2626 {
   2627    return VG_(am_mmap_file_float_valgrind_flags) (length, prot,
   2628                                                   VKI_MAP_FIXED|VKI_MAP_SHARED,
   2629                                                   fd, offset );
   2630 }
   2631 
   2632 /* --- --- munmap helper --- --- */
   2633 
   2634 static
   2635 SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
   2636                             Addr start, SizeT len, Bool forClient )
   2637 {
   2638    Bool   d;
   2639    SysRes sres;
   2640 
   2641    if (!VG_IS_PAGE_ALIGNED(start))
   2642       goto eINVAL;
   2643 
   2644    if (len == 0) {
   2645       *need_discard = False;
   2646       return VG_(mk_SysRes_Success)( 0 );
   2647    }
   2648 
   2649    if (start + len < len)
   2650       goto eINVAL;
   2651 
   2652    len = VG_PGROUNDUP(len);
   2653    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   2654    aspacem_assert(VG_IS_PAGE_ALIGNED(len));
   2655 
   2656    if (forClient) {
   2657       if (!VG_(am_is_valid_for_client_or_free_or_resvn)
   2658             ( start, len, VKI_PROT_NONE ))
   2659          goto eINVAL;
   2660    } else {
   2661       if (!is_valid_for_valgrind( start, len ))
   2662          goto eINVAL;
   2663    }
   2664 
   2665    d = any_Ts_in_range( start, len );
   2666 
   2667    sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
   2668    if (sr_isError(sres))
   2669       return sres;
   2670 
   2671    VG_(am_notify_munmap)( start, len );
   2672    AM_SANITY_CHECK;
   2673    *need_discard = d;
   2674    return sres;
   2675 
   2676   eINVAL:
   2677    return VG_(mk_SysRes_Error)( VKI_EINVAL );
   2678 }
   2679 
   2680 /* Unmap the given address range and update the segment array
   2681    accordingly.  This fails if the range isn't valid for the client.
   2682    If *need_discard is True after a successful return, the caller
   2683    should immediately discard translations from the specified address
   2684    range. */
   2685 
   2686 SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
   2687                               Addr start, SizeT len )
   2688 {
   2689    return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
   2690 }
   2691 
   2692 /* Unmap the given address range and update the segment array
   2693    accordingly.  This fails if the range isn't valid for valgrind. */
   2694 
   2695 SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
   2696 {
   2697    Bool need_discard;
   2698    SysRes r = am_munmap_both_wrk( &need_discard,
   2699                                   start, len, False/*valgrind*/ );
   2700    /* If this assertion fails, it means we allowed translations to be
   2701       made from a V-owned section.  Which shouldn't happen. */
   2702    if (!sr_isError(r))
   2703       aspacem_assert(!need_discard);
   2704    return r;
   2705 }
   2706 
   2707 /* Let (start,len) denote an area within a single Valgrind-owned
   2708   segment (anon or file).  Change the ownership of [start, start+len)
   2709   to the client instead.  Fails if (start,len) does not denote a
   2710   suitable segment. */
   2711 
   2712 Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
   2713 {
   2714    Int i, iLo, iHi;
   2715 
   2716    if (len == 0)
   2717       return True;
   2718    if (start + len < start)
   2719       return False;
   2720    if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
   2721       return False;
   2722 
   2723    i = find_nsegment_idx(start);
   2724    if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
   2725       return False;
   2726    if (start+len-1 > nsegments[i].end)
   2727       return False;
   2728 
   2729    aspacem_assert(start >= nsegments[i].start);
   2730    aspacem_assert(start+len-1 <= nsegments[i].end);
   2731 
   2732    /* This scheme is like how mprotect works: split the to-be-changed
   2733       range into its own segment(s), then mess with them (it).  There
   2734       should be only one. */
   2735    split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
   2736    aspacem_assert(iLo == iHi);
   2737    switch (nsegments[iLo].kind) {
   2738       case SkFileV: nsegments[iLo].kind = SkFileC; break;
   2739       case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
   2740       default: aspacem_assert(0); /* can't happen - guarded above */
   2741    }
   2742 
   2743    preen_nsegments();
   2744    return True;
   2745 }
   2746 
   2747 /* 'seg' must be NULL or have been obtained from
   2748    VG_(am_find_nsegment), and still valid.  If non-NULL, and if it
   2749    denotes a SkAnonC (anonymous client mapping) area, set the .isCH
   2750    (is-client-heap) flag for that area.  Otherwise do nothing.
   2751    (Bizarre interface so that the same code works for both Linux and
   2752    AIX and does not impose inefficiencies on the Linux version.) */
   2753 void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg )
   2754 {
   2755    Int i = segAddr_to_index( seg );
   2756    aspacem_assert(i >= 0 && i < nsegments_used);
   2757    if (nsegments[i].kind == SkAnonC) {
   2758       nsegments[i].isCH = True;
   2759    } else {
   2760       aspacem_assert(nsegments[i].isCH == False);
   2761    }
   2762 }
   2763 
   2764 /* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
   2765    segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
   2766    segment. */
   2767 void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* seg )
   2768 {
   2769    Int i = segAddr_to_index( seg );
   2770    aspacem_assert(i >= 0 && i < nsegments_used);
   2771    if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkFileC) {
   2772       nsegments[i].hasT = True;
   2773    }
   2774 }
   2775 
   2776 
   2777 /* --- --- --- reservations --- --- --- */
   2778 
   2779 /* Create a reservation from START .. START+LENGTH-1, with the given
   2780    ShrinkMode.  When checking whether the reservation can be created,
   2781    also ensure that at least abs(EXTRA) extra free bytes will remain
   2782    above (> 0) or below (< 0) the reservation.
   2783 
   2784    The reservation will only be created if it, plus the extra-zone,
   2785    falls entirely within a single free segment.  The returned Bool
   2786    indicates whether the creation succeeded. */
   2787 
   2788 Bool VG_(am_create_reservation) ( Addr start, SizeT length,
   2789                                   ShrinkMode smode, SSizeT extra )
   2790 {
   2791    Int      startI, endI;
   2792    NSegment seg;
   2793 
   2794    /* start and end, not taking into account the extra space. */
   2795    Addr start1 = start;
   2796    Addr end1   = start + length - 1;
   2797 
   2798    /* start and end, taking into account the extra space. */
   2799    Addr start2 = start1;
   2800    Addr end2   = end1;
   2801 
   2802    if (extra < 0) start2 += extra; // this moves it down :-)
   2803    if (extra > 0) end2 += extra;
   2804 
   2805    aspacem_assert(VG_IS_PAGE_ALIGNED(start));
   2806    aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
   2807    aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
   2808    aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
   2809 
   2810    startI = find_nsegment_idx( start2 );
   2811    endI = find_nsegment_idx( end2 );
   2812 
   2813    /* If the start and end points don't fall within the same (free)
   2814       segment, we're hosed.  This does rely on the assumption that all
   2815       mergeable adjacent segments can be merged, but add_segment()
   2816       should ensure that. */
   2817    if (startI != endI)
   2818       return False;
   2819 
   2820    if (nsegments[startI].kind != SkFree)
   2821       return False;
   2822 
   2823    /* Looks good - make the reservation. */
   2824    aspacem_assert(nsegments[startI].start <= start2);
   2825    aspacem_assert(end2 <= nsegments[startI].end);
   2826 
   2827    init_nsegment( &seg );
   2828    seg.kind  = SkResvn;
   2829    seg.start = start1;  /* NB: extra space is not included in the
   2830                            reservation. */
   2831    seg.end   = end1;
   2832    seg.smode = smode;
   2833    add_segment( &seg );
   2834 
   2835    AM_SANITY_CHECK;
   2836    return True;
   2837 }
   2838 
   2839 
   2840 /* Let SEG be an anonymous client mapping.  This fn extends the
   2841    mapping by DELTA bytes, taking the space from a reservation section
   2842    which must be adjacent.  If DELTA is positive, the segment is
   2843    extended forwards in the address space, and the reservation must be
   2844    the next one along.  If DELTA is negative, the segment is extended
   2845    backwards in the address space and the reservation must be the
   2846    previous one.  DELTA must be page aligned.  abs(DELTA) must not
   2847    exceed the size of the reservation segment minus one page, that is,
   2848    the reservation segment after the operation must be at least one
   2849    page long. */
   2850 
   2851 Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
   2852                                                        SSizeT    delta )
   2853 {
   2854    Int    segA, segR;
   2855    UInt   prot;
   2856    SysRes sres;
   2857 
   2858    /* Find the segment array index for SEG.  If the assertion fails it
   2859       probably means you passed in a bogus SEG. */
   2860    segA = segAddr_to_index( seg );
   2861    aspacem_assert(segA >= 0 && segA < nsegments_used);
   2862 
   2863    if (nsegments[segA].kind != SkAnonC)
   2864       return False;
   2865 
   2866    if (delta == 0)
   2867       return True;
   2868 
   2869    prot =   (nsegments[segA].hasR ? VKI_PROT_READ : 0)
   2870           | (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
   2871           | (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
   2872 
   2873    aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
   2874 
   2875    if (delta > 0) {
   2876 
   2877       /* Extending the segment forwards. */
   2878       segR = segA+1;
   2879       if (segR >= nsegments_used
   2880           || nsegments[segR].kind != SkResvn
   2881           || nsegments[segR].smode != SmLower
   2882           || nsegments[segR].start != nsegments[segA].end + 1
   2883           || delta + VKI_PAGE_SIZE
   2884                 > (nsegments[segR].end - nsegments[segR].start + 1))
   2885         return False;
   2886 
   2887       /* Extend the kernel's mapping. */
   2888       // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2889       sres = VG_(am_do_mmap_NO_NOTIFY)(
   2890                 nsegments[segR].start, delta,
   2891                 prot,
   2892                 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2893                 0, 0
   2894              );
   2895       if (sr_isError(sres))
   2896          return False; /* kernel bug if this happens? */
   2897       if (sr_Res(sres) != nsegments[segR].start) {
   2898          /* kernel bug if this happens? */
   2899         (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
   2900         return False;
   2901       }
   2902 
   2903       /* Ok, success with the kernel.  Update our structures. */
   2904       nsegments[segR].start += delta;
   2905       nsegments[segA].end += delta;
   2906       aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
   2907 
   2908    } else {
   2909 
   2910       /* Extending the segment backwards. */
   2911       delta = -delta;
   2912       aspacem_assert(delta > 0);
   2913 
   2914       segR = segA-1;
   2915       if (segR < 0
   2916           || nsegments[segR].kind != SkResvn
   2917           || nsegments[segR].smode != SmUpper
   2918           || nsegments[segR].end + 1 != nsegments[segA].start
   2919           || delta + VKI_PAGE_SIZE
   2920                 > (nsegments[segR].end - nsegments[segR].start + 1))
   2921         return False;
   2922 
   2923       /* Extend the kernel's mapping. */
   2924       // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
   2925       sres = VG_(am_do_mmap_NO_NOTIFY)(
   2926                 nsegments[segA].start-delta, delta,
   2927                 prot,
   2928                 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
   2929                 0, 0
   2930              );
   2931       if (sr_isError(sres))
   2932          return False; /* kernel bug if this happens? */
   2933       if (sr_Res(sres) != nsegments[segA].start-delta) {
   2934          /* kernel bug if this happens? */
   2935         (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
   2936         return False;
   2937       }
   2938 
   2939       /* Ok, success with the kernel.  Update our structures. */
   2940       nsegments[segR].end -= delta;
   2941       nsegments[segA].start -= delta;
   2942       aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
   2943 
   2944    }
   2945 
   2946    AM_SANITY_CHECK;
   2947    return True;
   2948 }
   2949 
   2950 
   2951 /* --- --- --- resizing/move a mapping --- --- --- */
   2952 
   2953 #if HAVE_MREMAP
   2954 
   2955 /* Let SEG be a client mapping (anonymous or file).  This fn extends
   2956    the mapping forwards only by DELTA bytes, and trashes whatever was
   2957    in the new area.  Fails if SEG is not a single client mapping or if
   2958    the new area is not accessible to the client.  Fails if DELTA is
   2959    not page aligned.  *seg is invalid after a successful return.  If
   2960    *need_discard is True after a successful return, the caller should
   2961    immediately discard translations from the new area. */
   2962 
   2963 Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
   2964                                 NSegment* seg, SizeT delta )
   2965 {
   2966    Addr     xStart;
   2967    SysRes   sres;
   2968    NSegment seg_copy = *seg;
   2969    SizeT    seg_old_len = seg->end + 1 - seg->start;
   2970 
   2971    if (0)
   2972       VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
   2973 
   2974    if (seg->kind != SkFileC && seg->kind != SkAnonC)
   2975       return False;
   2976 
   2977    if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
   2978       return False;
   2979 
   2980    xStart = seg->end+1;
   2981    if (xStart + delta < delta)
   2982       return False;
   2983 
   2984    if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
   2985                                                       VKI_PROT_NONE ))
   2986       return False;
   2987 
   2988    AM_SANITY_CHECK;
   2989    sres = ML_(am_do_extend_mapping_NO_NOTIFY)( seg->start,
   2990                                                seg_old_len,
   2991                                                seg_old_len + delta );
   2992    if (sr_isError(sres)) {
   2993       AM_SANITY_CHECK;
   2994       return False;
   2995    } else {
   2996       /* the area must not have moved */
   2997       aspacem_assert(sr_Res(sres) == seg->start);
   2998    }
   2999 
   3000    *need_discard = any_Ts_in_range( seg_copy.end+1, delta );
   3001 
   3002    seg_copy.end += delta;
   3003    add_segment( &seg_copy );
   3004 
   3005    if (0)
   3006       VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
   3007 
   3008    AM_SANITY_CHECK;
   3009    return True;
   3010 }
   3011 
   3012 
   3013 /* Remap the old address range to the new address range.  Fails if any
   3014    parameter is not page aligned, if the either size is zero, if any
   3015    wraparound is implied, if the old address range does not fall
   3016    entirely within a single segment, if the new address range overlaps
   3017    with the old one, or if the old address range is not a valid client
   3018    mapping.  If *need_discard is True after a successful return, the
   3019    caller should immediately discard translations from both specified
   3020    address ranges.  */
   3021 
   3022 Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
   3023                                         Addr old_addr, SizeT old_len,
   3024                                         Addr new_addr, SizeT new_len )
   3025 {
   3026    Int      iLo, iHi;
   3027    SysRes   sres;
   3028    NSegment seg;
   3029 
   3030    if (old_len == 0 || new_len == 0)
   3031       return False;
   3032 
   3033    if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
   3034        || !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
   3035       return False;
   3036 
   3037    if (old_addr + old_len < old_addr
   3038        || new_addr + new_len < new_addr)
   3039       return False;
   3040 
   3041    if (old_addr + old_len - 1 < new_addr
   3042        || new_addr + new_len - 1 < old_addr) {
   3043       /* no overlap */
   3044    } else
   3045       return False;
   3046 
   3047    iLo = find_nsegment_idx( old_addr );
   3048    iHi = find_nsegment_idx( old_addr + old_len - 1 );
   3049    if (iLo != iHi)
   3050       return False;
   3051 
   3052    if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
   3053       return False;
   3054 
   3055    sres = ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)
   3056              ( old_addr, old_len, new_addr, new_len );
   3057    if (sr_isError(sres)) {
   3058       AM_SANITY_CHECK;
   3059       return False;
   3060    } else {
   3061       aspacem_assert(sr_Res(sres) == new_addr);
   3062    }
   3063 
   3064    *need_discard = any_Ts_in_range( old_addr, old_len )
   3065                    || any_Ts_in_range( new_addr, new_len );
   3066 
   3067    seg = nsegments[iLo];
   3068 
   3069    /* Mark the new area based on the old seg. */
   3070    if (seg.kind == SkFileC) {
   3071       seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
   3072    } else {
   3073       aspacem_assert(seg.kind == SkAnonC);
   3074       aspacem_assert(seg.offset == 0);
   3075    }
   3076    seg.start = new_addr;
   3077    seg.end   = new_addr + new_len - 1;
   3078    add_segment( &seg );
   3079 
   3080    /* Create a free hole in the old location. */
   3081    init_nsegment( &seg );
   3082    seg.start = old_addr;
   3083    seg.end   = old_addr + old_len - 1;
   3084    /* See comments in VG_(am_notify_munmap) about this SkResvn vs
   3085       SkFree thing. */
   3086    if (old_addr > aspacem_maxAddr
   3087        && /* check previous comparison is meaningful */
   3088           aspacem_maxAddr < Addr_MAX)
   3089       seg.kind = SkResvn;
   3090    else
   3091       seg.kind = SkFree;
   3092 
   3093    add_segment( &seg );
   3094 
   3095    AM_SANITY_CHECK;
   3096    return True;
   3097 }
   3098 
   3099 #endif // HAVE_MREMAP
   3100 
   3101 
   3102 #if defined(VGO_linux)
   3103 
   3104 /*-----------------------------------------------------------------*/
   3105 /*---                                                           ---*/
   3106 /*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
   3107 /*--- Almost completely independent of the stuff above.  The    ---*/
   3108 /*--- only function it 'exports' to the code above this comment ---*/
   3109 /*--- is parse_procselfmaps.                                    ---*/
   3110 /*---                                                           ---*/
   3111 /*-----------------------------------------------------------------*/
   3112 
   3113 /*------BEGIN-procmaps-parser-for-Linux--------------------------*/
   3114 
   3115 /* Size of a smallish table used to read /proc/self/map entries. */
   3116 #define M_PROCMAP_BUF 100000
   3117 
   3118 /* static ... to keep it out of the stack frame. */
   3119 static Char procmap_buf[M_PROCMAP_BUF];
   3120 
   3121 /* Records length of /proc/self/maps read into procmap_buf. */
   3122 static Int  buf_n_tot;
   3123 
   3124 /* Helper fns. */
   3125 
   3126 static Int hexdigit ( Char c )
   3127 {
   3128    if (c >= '0' && c <= '9') return (Int)(c - '0');
   3129    if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
   3130    if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
   3131    return -1;
   3132 }
   3133 
   3134 static Int decdigit ( Char c )
   3135 {
   3136    if (c >= '0' && c <= '9') return (Int)(c - '0');
   3137    return -1;
   3138 }
   3139 
   3140 static Int readchar ( const Char* buf, Char* ch )
   3141 {
   3142    if (*buf == 0) return 0;
   3143    *ch = *buf;
   3144    return 1;
   3145 }
   3146 
   3147 static Int readhex ( const Char* buf, UWord* val )
   3148 {
   3149    /* Read a word-sized hex number. */
   3150    Int n = 0;
   3151    *val = 0;
   3152    while (hexdigit(*buf) >= 0) {
   3153       *val = (*val << 4) + hexdigit(*buf);
   3154       n++; buf++;
   3155    }
   3156    return n;
   3157 }
   3158 
   3159 static Int readhex64 ( const Char* buf, ULong* val )
   3160 {
   3161    /* Read a potentially 64-bit hex number. */
   3162    Int n = 0;
   3163    *val = 0;
   3164    while (hexdigit(*buf) >= 0) {
   3165       *val = (*val << 4) + hexdigit(*buf);
   3166       n++; buf++;
   3167    }
   3168    return n;
   3169 }
   3170 
   3171 static Int readdec64 ( const Char* buf, ULong* val )
   3172 {
   3173    Int n = 0;
   3174    *val = 0;
   3175    while (hexdigit(*buf) >= 0) {
   3176       *val = (*val * 10) + decdigit(*buf);
   3177       n++; buf++;
   3178    }
   3179    return n;
   3180 }
   3181 
   3182 
   3183 /* Get the contents of /proc/self/maps into a static buffer.  If
   3184    there's a syntax error, it won't fit, or other failure, just
   3185    abort. */
   3186 
   3187 static void read_procselfmaps_into_buf ( void )
   3188 {
   3189    Int    n_chunk;
   3190    SysRes fd;
   3191 
   3192    /* Read the initial memory mapping from the /proc filesystem. */
   3193    fd = ML_(am_open)( "/proc/self/maps", VKI_O_RDONLY, 0 );
   3194    if (sr_isError(fd))
   3195       ML_(am_barf)("can't open /proc/self/maps");
   3196 
   3197    buf_n_tot = 0;
   3198    do {
   3199       n_chunk = ML_(am_read)( sr_Res(fd), &procmap_buf[buf_n_tot],
   3200                               M_PROCMAP_BUF - buf_n_tot );
   3201       if (n_chunk >= 0)
   3202          buf_n_tot += n_chunk;
   3203    } while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
   3204 
   3205    ML_(am_close)(sr_Res(fd));
   3206 
   3207    if (buf_n_tot >= M_PROCMAP_BUF-5)
   3208       ML_(am_barf_toolow)("M_PROCMAP_BUF");
   3209    if (buf_n_tot == 0)
   3210       ML_(am_barf)("I/O error on /proc/self/maps");
   3211 
   3212    procmap_buf[buf_n_tot] = 0;
   3213 }
   3214 
   3215 /* Parse /proc/self/maps.  For each map entry, call
   3216    record_mapping, passing it, in this order:
   3217 
   3218       start address in memory
   3219       length
   3220       page protections (using the VKI_PROT_* flags)
   3221       mapped file device and inode
   3222       offset in file, or zero if no file
   3223       filename, zero terminated, or NULL if no file
   3224 
   3225    So the sig of the called fn might be
   3226 
   3227       void (*record_mapping)( Addr start, SizeT size, UInt prot,
   3228 			      UInt dev, UInt info,
   3229                               ULong foffset, UChar* filename )
   3230 
   3231    Note that the supplied filename is transiently stored; record_mapping
   3232    should make a copy if it wants to keep it.
   3233 
   3234    Nb: it is important that this function does not alter the contents of
   3235        procmap_buf!
   3236 */
   3237 static void parse_procselfmaps (
   3238       void (*record_mapping)( Addr addr, SizeT len, UInt prot,
   3239                               ULong dev, ULong ino, Off64T offset,
   3240                               const UChar* filename ),
   3241       void (*record_gap)( Addr addr, SizeT len )
   3242    )
   3243 {
   3244    Int    i, j, i_eol;
   3245    Addr   start, endPlusOne, gapStart;
   3246    UChar* filename;
   3247    UChar  rr, ww, xx, pp, ch, tmp;
   3248    UInt	  prot;
   3249    UWord  maj, min;
   3250    ULong  foffset, dev, ino;
   3251 
   3252    foffset = ino = 0; /* keep gcc-4.1.0 happy */
   3253 
   3254    read_procselfmaps_into_buf();
   3255 
   3256    aspacem_assert('\0' != procmap_buf[0] && 0 != buf_n_tot);
   3257 
   3258    if (0)
   3259       VG_(debugLog)(0, "procselfmaps", "raw:\n%s\n", procmap_buf);
   3260 
   3261    /* Ok, it's safely aboard.  Parse the entries. */
   3262    i = 0;
   3263    gapStart = Addr_MIN;
   3264    while (True) {
   3265       if (i >= buf_n_tot) break;
   3266 
   3267       /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
   3268       j = readhex(&procmap_buf[i], &start);
   3269       if (j > 0) i += j; else goto syntaxerror;
   3270       j = readchar(&procmap_buf[i], &ch);
   3271       if (j == 1 && ch == '-') i += j; else goto syntaxerror;
   3272       j = readhex(&procmap_buf[i], &endPlusOne);
   3273       if (j > 0) i += j; else goto syntaxerror;
   3274 
   3275       j = readchar(&procmap_buf[i], &ch);
   3276       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
   3277 
   3278       j = readchar(&procmap_buf[i], &rr);
   3279       if (j == 1 && (rr == 'r' || rr == '-')) i += j; else goto syntaxerror;
   3280       j = readchar(&procmap_buf[i], &ww);
   3281       if (j == 1 && (ww == 'w' || ww == '-')) i += j; else goto syntaxerror;
   3282       j = readchar(&procmap_buf[i], &xx);
   3283       if (j == 1 && (xx == 'x' || xx == '-')) i += j; else goto syntaxerror;
   3284       /* This field is the shared/private flag */
   3285       j = readchar(&procmap_buf[i], &pp);
   3286       if (j == 1 && (pp == 'p' || pp == '-' || pp == 's'))
   3287                                               i += j; else goto syntaxerror;
   3288 
   3289       j = readchar(&procmap_buf[i], &ch);
   3290       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
   3291 
   3292       j = readhex64(&procmap_buf[i], &foffset);
   3293       if (j > 0) i += j; else goto syntaxerror;
   3294 
   3295       j = readchar(&procmap_buf[i], &ch);
   3296       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
   3297 
   3298       j = readhex(&procmap_buf[i], &maj);
   3299       if (j > 0) i += j; else goto syntaxerror;
   3300       j = readchar(&procmap_buf[i], &ch);
   3301       if (j == 1 && ch == ':') i += j; else goto syntaxerror;
   3302       j = readhex(&procmap_buf[i], &min);
   3303       if (j > 0) i += j; else goto syntaxerror;
   3304 
   3305       j = readchar(&procmap_buf[i], &ch);
   3306       if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
   3307 
   3308       j = readdec64(&procmap_buf[i], &ino);
   3309       if (j > 0) i += j; else goto syntaxerror;
   3310 
   3311       goto read_line_ok;
   3312 
   3313     syntaxerror:
   3314       VG_(debugLog)(0, "Valgrind:",
   3315                        "FATAL: syntax error reading /proc/self/maps\n");
   3316       { Int k, m;
   3317         HChar buf50[51];
   3318         m = 0;
   3319         buf50[m] = 0;
   3320         k = i - 50;
   3321         if (k < 0) k = 0;
   3322         for (; k <= i; k++) {
   3323            buf50[m] = procmap_buf[k];
   3324            buf50[m+1] = 0;
   3325            if (m < 50-1) m++;
   3326         }
   3327         VG_(debugLog)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50);
   3328       }
   3329       ML_(am_exit)(1);
   3330 
   3331     read_line_ok:
   3332 
   3333       /* Try and find the name of the file mapped to this segment, if
   3334          it exists.  Note that files can contains spaces. */
   3335 
   3336       // Move i to the next non-space char, which should be either a '/' or
   3337       // a newline.
   3338       while (procmap_buf[i] == ' ' && i < buf_n_tot-1) i++;
   3339 
   3340       // Move i_eol to the end of the line.
   3341       i_eol = i;
   3342       while (procmap_buf[i_eol] != '\n' && i_eol < buf_n_tot-1) i_eol++;
   3343 
   3344       // If there's a filename...
   3345       if (i < i_eol-1 && procmap_buf[i] == '/') {
   3346          /* Minor hack: put a '\0' at the filename end for the call to
   3347             'record_mapping', then restore the old char with 'tmp'. */
   3348          filename = &procmap_buf[i];
   3349          tmp = filename[i_eol - i];
   3350          filename[i_eol - i] = '\0';
   3351       } else {
   3352 	 tmp = 0;
   3353          filename = NULL;
   3354          foffset = 0;
   3355       }
   3356 
   3357       prot = 0;
   3358       if (rr == 'r') prot |= VKI_PROT_READ;
   3359       if (ww == 'w') prot |= VKI_PROT_WRITE;
   3360       if (xx == 'x') prot |= VKI_PROT_EXEC;
   3361 
   3362       /* Linux has two ways to encode a device number when it
   3363          is exposed to user space (via fstat etc). The old way
   3364          is the traditional unix scheme that produces a 16 bit
   3365          device number with the top 8 being the major number and
   3366          the bottom 8 the minor number.
   3367 
   3368          The new scheme allows for a 12 bit major number and
   3369          a 20 bit minor number by using a 32 bit device number
   3370          and putting the top 12 bits of the minor number into
   3371          the top 12 bits of the device number thus leaving an
   3372          extra 4 bits for the major number.
   3373 
   3374          If the minor and major number are both single byte
   3375          values then both schemes give the same result so we
   3376          use the new scheme here in case either number is
   3377          outside the 0-255 range and then use fstat64 when
   3378          available (or fstat on 64 bit systems) so that we
   3379          should always have a new style device number and
   3380          everything should match. */
   3381       dev = (min & 0xff) | (maj << 8) | ((min & ~0xff) << 12);
   3382 
   3383       if (record_gap && gapStart < start)
   3384          (*record_gap) ( gapStart, start-gapStart );
   3385 
   3386       if (record_mapping && start < endPlusOne)
   3387          (*record_mapping) ( start, endPlusOne-start,
   3388                              prot, dev, ino,
   3389                              foffset, filename );
   3390 
   3391       if ('\0' != tmp) {
   3392          filename[i_eol - i] = tmp;
   3393       }
   3394 
   3395       i = i_eol + 1;
   3396       gapStart = endPlusOne;
   3397    }
   3398 
   3399 #  if defined(VGP_arm_linux)
   3400    /* ARM puts code at the end of memory that contains processor
   3401       specific stuff (cmpxchg, getting the thread local storage, etc.)
   3402       This isn't specified in /proc/self/maps, so do it here.  This
   3403       kludgery causes the view of memory, as presented to
   3404       record_gap/record_mapping, to actually reflect reality.  IMO
   3405       (JRS, 2010-Jan-03) the fact that /proc/.../maps does not list
   3406       the commpage should be regarded as a bug in the kernel. */
   3407    { const Addr commpage_start = ARM_LINUX_FAKE_COMMPAGE_START;
   3408      const Addr commpage_end1  = ARM_LINUX_FAKE_COMMPAGE_END1;
   3409      if (gapStart < commpage_start) {
   3410         if (record_gap)
   3411            (*record_gap)( gapStart, commpage_start - gapStart );
   3412         if (record_mapping)
   3413            (*record_mapping)( commpage_start, commpage_end1 - commpage_start,
   3414                               VKI_PROT_READ|VKI_PROT_EXEC,
   3415                               0/*dev*/, 0/*ino*/, 0/*foffset*/,
   3416                               NULL);
   3417         gapStart = commpage_end1;
   3418      }
   3419    }
   3420 #  endif
   3421 
   3422    if (record_gap && gapStart < Addr_MAX)
   3423       (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
   3424 }
   3425 
   3426 /*------END-procmaps-parser-for-Linux----------------------------*/
   3427 
   3428 /*------BEGIN-procmaps-parser-for-Darwin-------------------------*/
   3429 
   3430 #elif defined(VGO_darwin)
   3431 #include <mach/mach.h>
   3432 #include <mach/mach_vm.h>
   3433 
   3434 static unsigned int mach2vki(unsigned int vm_prot)
   3435 {
   3436    return
   3437       ((vm_prot & VM_PROT_READ)    ? VKI_PROT_READ    : 0) |
   3438       ((vm_prot & VM_PROT_WRITE)   ? VKI_PROT_WRITE   : 0) |
   3439       ((vm_prot & VM_PROT_EXECUTE) ? VKI_PROT_EXEC    : 0) ;
   3440 }
   3441 
   3442 static UInt stats_machcalls = 0;
   3443 
   3444 static void parse_procselfmaps (
   3445       void (*record_mapping)( Addr addr, SizeT len, UInt prot,
   3446                               ULong dev, ULong ino, Off64T offset,
   3447                               const UChar* filename ),
   3448       void (*record_gap)( Addr addr, SizeT len )
   3449    )
   3450 {
   3451    vm_address_t iter;
   3452    unsigned int depth;
   3453    vm_address_t last;
   3454 
   3455    iter = 0;
   3456    depth = 0;
   3457    last = 0;
   3458    while (1) {
   3459       mach_vm_address_t addr = iter;
   3460       mach_vm_size_t size;
   3461       vm_region_submap_short_info_data_64_t info;
   3462       kern_return_t kr;
   3463 
   3464       while (1) {
   3465          mach_msg_type_number_t info_count
   3466             = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
   3467          stats_machcalls++;
   3468          kr = mach_vm_region_recurse(mach_task_self(), &addr, &size, &depth,
   3469                                      (vm_region_info_t)&info, &info_count);
   3470          if (kr)
   3471             return;
   3472          if (info.is_submap) {
   3473             depth++;
   3474             continue;
   3475          }
   3476          break;
   3477       }
   3478       iter = addr + size;
   3479 
   3480       if (addr > last  &&  record_gap) {
   3481          (*record_gap)(last, addr - last);
   3482       }
   3483       if (record_mapping) {
   3484          (*record_mapping)(addr, size, mach2vki(info.protection),
   3485                            0, 0, info.offset, NULL);
   3486       }
   3487       last = addr + size;
   3488    }
   3489 
   3490    if ((Addr)-1 > last  &&  record_gap)
   3491       (*record_gap)(last, (Addr)-1 - last);
   3492 }
   3493 
   3494 // Urr.  So much for thread safety.
   3495 static Bool        css_overflowed;
   3496 static ChangedSeg* css_local;
   3497 static Int         css_size_local;
   3498 static Int         css_used_local;
   3499 
   3500 static void add_mapping_callback(Addr addr, SizeT len, UInt prot,
   3501                                  ULong dev, ULong ino, Off64T offset,
   3502                                  const UChar *filename)
   3503 {
   3504    // derived from sync_check_mapping_callback()
   3505 
   3506    Int iLo, iHi, i;
   3507 
   3508    if (len == 0) return;
   3509 
   3510    /* The kernel should not give us wraparounds. */
   3511    aspacem_assert(addr <= addr + len - 1);
   3512 
   3513    iLo = find_nsegment_idx( addr );
   3514    iHi = find_nsegment_idx( addr + len - 1 );
   3515 
   3516 
   3517    /* NSegments iLo .. iHi inclusive should agree with the presented
   3518       data. */
   3519    for (i = iLo; i <= iHi; i++) {
   3520 
   3521       UInt seg_prot;
   3522 
   3523       if (nsegments[i].kind == SkAnonV  ||  nsegments[i].kind == SkFileV) {
   3524          /* Ignore V regions */
   3525          continue;
   3526       }
   3527       else if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn) {
   3528           /* Add mapping for SkResvn regions */
   3529          ChangedSeg* cs = &css_local[css_used_local];
   3530          if (css_used_local < css_size_local) {
   3531             cs->is_added = True;
   3532             cs->start    = addr;
   3533             cs->end      = addr + len - 1;
   3534             cs->prot     = prot;
   3535             cs->offset   = offset;
   3536             css_used_local++;
   3537          } else {
   3538             css_overflowed = True;
   3539          }
   3540          return;
   3541 
   3542       } else if (nsegments[i].kind == SkAnonC ||
   3543                  nsegments[i].kind == SkFileC ||
   3544                  nsegments[i].kind == SkShmC)
   3545       {
   3546          /* Check permissions on client regions */
   3547          // GrP fixme
   3548          seg_prot = 0;
   3549          if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
   3550          if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
   3551 #        if defined(VGA_x86)
   3552          // GrP fixme sloppyXcheck
   3553          // darwin: kernel X ignored and spuriously changes? (vm_copy)
   3554          seg_prot |= (prot & VKI_PROT_EXEC);
   3555 #        else
   3556          if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
   3557 #        endif
   3558          if (seg_prot != prot) {
   3559              if (VG_(clo_trace_syscalls))
   3560                  VG_(debugLog)(0,"aspacem","region %p..%p permission "
   3561                                  "mismatch (kernel %x, V %x)\n",
   3562                                  (void*)nsegments[i].start,
   3563                                  (void*)(nsegments[i].end+1), prot, seg_prot);
   3564          }
   3565 
   3566       } else {
   3567          aspacem_assert(0);
   3568       }
   3569    }
   3570 }
   3571 
   3572 static void remove_mapping_callback(Addr addr, SizeT len)
   3573 {
   3574    // derived from sync_check_gap_callback()
   3575 
   3576    Int iLo, iHi, i;
   3577 
   3578    if (len == 0)
   3579       return;
   3580 
   3581    /* The kernel should not give us wraparounds. */
   3582    aspacem_assert(addr <= addr + len - 1);
   3583 
   3584    iLo = find_nsegment_idx( addr );
   3585    iHi = find_nsegment_idx( addr + len - 1 );
   3586 
   3587    /* NSegments iLo .. iHi inclusive should agree with the presented data. */
   3588    for (i = iLo; i <= iHi; i++) {
   3589       if (nsegments[i].kind != SkFree  &&  nsegments[i].kind != SkResvn) {
   3590          // V has a mapping, kernel doesn't
   3591          ChangedSeg* cs = &css_local[css_used_local];
   3592          if (css_used_local < css_size_local) {
   3593             cs->is_added = False;
   3594             cs->start    = nsegments[i].start;
   3595             cs->end      = nsegments[i].end;
   3596             cs->prot     = 0;
   3597             cs->offset   = 0;
   3598             css_used_local++;
   3599          } else {
   3600             css_overflowed = True;
   3601          }
   3602          return;
   3603       }
   3604    }
   3605 }
   3606 
   3607 
   3608 // Returns False if 'css' wasn't big enough.
   3609 Bool VG_(get_changed_segments)(
   3610       const HChar* when, const HChar* where, /*OUT*/ChangedSeg* css,
   3611       Int css_size, /*OUT*/Int* css_used)
   3612 {
   3613    static UInt stats_synccalls = 1;
   3614    aspacem_assert(when && where);
   3615 
   3616    if (0)
   3617       VG_(debugLog)(0,"aspacem",
   3618          "[%u,%u] VG_(get_changed_segments)(%s, %s)\n",
   3619          stats_synccalls++, stats_machcalls, when, where
   3620       );
   3621 
   3622    css_overflowed = False;
   3623    css_local = css;
   3624    css_size_local = css_size;
   3625    css_used_local = 0;
   3626 
   3627    // Get the list of segs that need to be added/removed.
   3628    parse_procselfmaps(&add_mapping_callback, &remove_mapping_callback);
   3629 
   3630    *css_used = css_used_local;
   3631 
   3632    if (css_overflowed) {
   3633       aspacem_assert(css_used_local == css_size_local);
   3634    }
   3635 
   3636    return !css_overflowed;
   3637 }
   3638 
   3639 #endif // defined(VGO_darwin)
   3640 
   3641 /*------END-procmaps-parser-for-Darwin---------------------------*/
   3642 
   3643 #endif // defined(VGO_linux) || defined(VGO_darwin)
   3644 
   3645 /*--------------------------------------------------------------------*/
   3646 /*--- end                                                          ---*/
   3647 /*--------------------------------------------------------------------*/
   3648