Home | History | Annotate | Download | only in memcheck
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
      4 /*---                                         mc_malloc_wrappers.c ---*/
      5 /*--------------------------------------------------------------------*/
      6 
      7 /*
      8    This file is part of MemCheck, a heavyweight Valgrind tool for
      9    detecting memory errors.
     10 
     11    Copyright (C) 2000-2010 Julian Seward
     12       jseward (at) acm.org
     13 
     14    This program is free software; you can redistribute it and/or
     15    modify it under the terms of the GNU General Public License as
     16    published by the Free Software Foundation; either version 2 of the
     17    License, or (at your option) any later version.
     18 
     19    This program is distributed in the hope that it will be useful, but
     20    WITHOUT ANY WARRANTY; without even the implied warranty of
     21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     22    General Public License for more details.
     23 
     24    You should have received a copy of the GNU General Public License
     25    along with this program; if not, write to the Free Software
     26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     27    02111-1307, USA.
     28 
     29    The GNU General Public License is contained in the file COPYING.
     30 */
     31 
     32 #include "pub_tool_basics.h"
     33 #include "pub_tool_execontext.h"
     34 #include "pub_tool_hashtable.h"
     35 #include "pub_tool_libcbase.h"
     36 #include "pub_tool_libcassert.h"
     37 #include "pub_tool_libcprint.h"
     38 #include "pub_tool_mallocfree.h"
     39 #include "pub_tool_options.h"
     40 #include "pub_tool_replacemalloc.h"
     41 #include "pub_tool_threadstate.h"
     42 #include "pub_tool_tooliface.h"     // Needed for mc_include.h
     43 #include "pub_tool_stacktrace.h"    // For VG_(get_and_pp_StackTrace)
     44 
     45 #include "mc_include.h"
     46 
     47 /*------------------------------------------------------------*/
     48 /*--- Defns                                                ---*/
     49 /*------------------------------------------------------------*/
     50 
     51 /* Stats ... */
     52 static SizeT cmalloc_n_mallocs  = 0;
     53 static SizeT cmalloc_n_frees    = 0;
     54 static ULong cmalloc_bs_mallocd = 0;
     55 
     56 /* For debug printing to do with mempools: what stack trace
     57    depth to show. */
     58 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
     59 
     60 
     61 /*------------------------------------------------------------*/
     62 /*--- Tracking malloc'd and free'd blocks                  ---*/
     63 /*------------------------------------------------------------*/
     64 
     65 /* Record malloc'd blocks. */
     66 VgHashTable MC_(malloc_list) = NULL;
     67 
     68 /* Memory pools: a hash table of MC_Mempools.  Search key is
     69    MC_Mempool::pool. */
     70 VgHashTable MC_(mempool_list) = NULL;
     71 
     72 /* Records blocks after freeing. */
     73 static MC_Chunk* freed_list_start  = NULL;
     74 static MC_Chunk* freed_list_end    = NULL;
     75 
     76 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
     77    some of the oldest blocks in the queue at the same time. */
     78 static void add_to_freed_queue ( MC_Chunk* mc )
     79 {
     80    const Bool show = False;
     81 
     82    /* Put it at the end of the freed list */
     83    if (freed_list_end == NULL) {
     84       tl_assert(freed_list_start == NULL);
     85       freed_list_end    = freed_list_start = mc;
     86       VG_(free_queue_volume) = (Long)mc->szB;
     87    } else {
     88       tl_assert(freed_list_end->next == NULL);
     89       freed_list_end->next = mc;
     90       freed_list_end       = mc;
     91       VG_(free_queue_volume) += (Long)mc->szB;
     92       if (show)
     93          VG_(printf)("mc_freelist: acquire: volume now %lld\n",
     94                      VG_(free_queue_volume));
     95    }
     96    VG_(free_queue_length)++;
     97    mc->next = NULL;
     98 
     99    /* Release enough of the oldest blocks to bring the free queue
    100       volume below vg_clo_freelist_vol. */
    101 
    102    while (VG_(free_queue_volume) > MC_(clo_freelist_vol)) {
    103       MC_Chunk* mc1;
    104 
    105       tl_assert(freed_list_start != NULL);
    106       tl_assert(freed_list_end != NULL);
    107 
    108       mc1 = freed_list_start;
    109       VG_(free_queue_volume) -= (Long)mc1->szB;
    110       VG_(free_queue_length)--;
    111       if (show)
    112          VG_(printf)("mc_freelist: discard: volume now %lld\n",
    113                      VG_(free_queue_volume));
    114       tl_assert(VG_(free_queue_volume) >= 0);
    115 
    116       if (freed_list_start == freed_list_end) {
    117          freed_list_start = freed_list_end = NULL;
    118       } else {
    119          freed_list_start = mc1->next;
    120       }
    121       mc1->next = NULL; /* just paranoia */
    122 
    123       /* free MC_Chunk */
    124       if (MC_AllocCustom != mc1->allockind) {
    125          VG_(cli_free) ( (void*)(mc1->data) );
    126       }
    127       VG_(free) ( mc1 );
    128    }
    129 }
    130 
    131 MC_Chunk* MC_(get_freed_list_head)(void)
    132 {
    133    return freed_list_start;
    134 }
    135 
    136 /* Allocate its shadow chunk, put it on the appropriate list. */
    137 static
    138 MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
    139                             MC_AllocKind kind)
    140 {
    141    MC_Chunk* mc  = VG_(malloc)("mc.cMC.1 (a MC_Chunk)", sizeof(MC_Chunk));
    142    mc->data      = p;
    143    mc->szB       = szB;
    144    mc->allockind = kind;
    145    mc->where     = ec;
    146 
    147    /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
    148       the mc->data field isn't visible to the leak checker.  If memory
    149       management is working correctly, any pointer returned by VG_(malloc)
    150       should be noaccess as far as the client is concerned. */
    151    if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
    152       VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
    153    }
    154    return mc;
    155 }
    156 
    157 /*------------------------------------------------------------*/
    158 /*--- client_malloc(), etc                                 ---*/
    159 /*------------------------------------------------------------*/
    160 
    161 // XXX: should make this a proper error (bug #79311).
    162 static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
    163 {
    164    // Cast to a signed type to catch any unexpectedly negative args.  We're
    165    // assuming here that the size asked for is not greater than 2^31 bytes
    166    // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
    167    if ((SSizeT)sizeB < 0) {
    168       if (!VG_(clo_xml))
    169          VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
    170                       (SSizeT)sizeB, fn );
    171       return True;
    172    }
    173    return False;
    174 }
    175 
    176 static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
    177 {
    178    if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
    179       if (!VG_(clo_xml))
    180          VG_(message)(Vg_UserMsg,
    181                       "Warning: silly args (%ld,%ld) to calloc()\n",
    182                       (SSizeT)n, (SSizeT)sizeB);
    183       return True;
    184    }
    185    return False;
    186 }
    187 
    188 /* Allocate memory and note change in memory available */
    189 void* MC_(new_block) ( ThreadId tid,
    190                        Addr p, SizeT szB, SizeT alignB,
    191                        Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
    192 {
    193    ExeContext* ec;
    194 
    195    cmalloc_n_mallocs ++;
    196 
    197    // Allocate and zero if necessary
    198    if (p) {
    199       tl_assert(MC_AllocCustom == kind);
    200    } else {
    201       tl_assert(MC_AllocCustom != kind);
    202       p = (Addr)VG_(cli_malloc)( alignB, szB );
    203       if (!p) {
    204          return NULL;
    205       }
    206       if (is_zeroed) {
    207          VG_(memset)((void*)p, 0, szB);
    208       } else
    209       if (MC_(clo_malloc_fill) != -1) {
    210          tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
    211          VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
    212       }
    213    }
    214 
    215    // Only update this stat if allocation succeeded.
    216    cmalloc_bs_mallocd += (ULong)szB;
    217 
    218    ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
    219    tl_assert(ec);
    220 
    221    VG_(HT_add_node)( table, create_MC_Chunk(ec, p, szB, kind) );
    222 
    223    if (is_zeroed)
    224       MC_(make_mem_defined)( p, szB );
    225    else {
    226       UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
    227       tl_assert(VG_(is_plausible_ECU)(ecu));
    228       MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
    229    }
    230 
    231    return (void*)p;
    232 }
    233 
    234 void* MC_(malloc) ( ThreadId tid, SizeT n )
    235 {
    236    if (complain_about_silly_args(n, "malloc")) {
    237       return NULL;
    238    } else {
    239       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
    240          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
    241    }
    242 }
    243 
    244 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
    245 {
    246    if (complain_about_silly_args(n, "__builtin_new")) {
    247       return NULL;
    248    } else {
    249       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
    250          /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
    251    }
    252 }
    253 
    254 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
    255 {
    256    if (complain_about_silly_args(n, "__builtin_vec_new")) {
    257       return NULL;
    258    } else {
    259       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
    260          /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
    261    }
    262 }
    263 
    264 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
    265 {
    266    if (complain_about_silly_args(n, "memalign")) {
    267       return NULL;
    268    } else {
    269       return MC_(new_block) ( tid, 0, n, alignB,
    270          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
    271    }
    272 }
    273 
    274 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
    275 {
    276    if (complain_about_silly_args2(nmemb, size1)) {
    277       return NULL;
    278    } else {
    279       return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
    280          /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
    281    }
    282 }
    283 
    284 static
    285 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
    286 {
    287    if (MC_(clo_free_fill) != -1) {
    288       tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
    289       VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
    290    }
    291 
    292    /* Note: make redzones noaccess again -- just in case user made them
    293       accessible with a client request... */
    294    MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
    295 
    296    /* Record where freed */
    297    mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
    298    /* Put it out of harm's way for a while */
    299    add_to_freed_queue ( mc );
    300 }
    301 
    302 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
    303 {
    304    MC_Chunk* mc;
    305 
    306    cmalloc_n_frees++;
    307 
    308    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
    309    if (mc == NULL) {
    310       MC_(record_free_error) ( tid, p );
    311    } else {
    312       /* check if it is a matching free() / delete / delete [] */
    313       if (kind != mc->allockind) {
    314          tl_assert(p == mc->data);
    315          MC_(record_freemismatch_error) ( tid, mc );
    316       }
    317       die_and_free_mem ( tid, mc, rzB );
    318    }
    319 }
    320 
    321 void MC_(free) ( ThreadId tid, void* p )
    322 {
    323    MC_(handle_free)(
    324       tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
    325 }
    326 
    327 void MC_(__builtin_delete) ( ThreadId tid, void* p )
    328 {
    329    MC_(handle_free)(
    330       tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
    331 }
    332 
    333 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
    334 {
    335    MC_(handle_free)(
    336       tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
    337 }
    338 
    339 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
    340 {
    341    MC_Chunk* mc;
    342    void*     p_new;
    343    SizeT     old_szB;
    344 
    345    cmalloc_n_frees ++;
    346    cmalloc_n_mallocs ++;
    347    cmalloc_bs_mallocd += (ULong)new_szB;
    348 
    349    if (complain_about_silly_args(new_szB, "realloc"))
    350       return NULL;
    351 
    352    /* Remove the old block */
    353    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
    354    if (mc == NULL) {
    355       MC_(record_free_error) ( tid, (Addr)p_old );
    356       /* We return to the program regardless. */
    357       return NULL;
    358    }
    359 
    360    /* check if its a matching free() / delete / delete [] */
    361    if (MC_AllocMalloc != mc->allockind) {
    362       /* can not realloc a range that was allocated with new or new [] */
    363       tl_assert((Addr)p_old == mc->data);
    364       MC_(record_freemismatch_error) ( tid, mc );
    365       /* but keep going anyway */
    366    }
    367 
    368    old_szB = mc->szB;
    369 
    370    /* In all cases, even when the new size is smaller or unchanged, we
    371       reallocate and copy the contents, and make the old block
    372       inaccessible.  This is so as to guarantee to catch all cases of
    373       accesses via the old address after reallocation, regardless of
    374       the change in size.  (Of course the ability to detect accesses
    375       to the old block also depends on the size of the freed blocks
    376       queue). */
    377 
    378    if (new_szB <= old_szB) {
    379       /* new size is smaller or the same */
    380       Addr a_new;
    381       /* Get new memory */
    382       a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
    383 
    384       if (a_new) {
    385          ExeContext* ec;
    386 
    387          ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
    388          tl_assert(ec);
    389 
    390          /* Retained part is copied, red zones set as normal */
    391          MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
    392                                  MC_MALLOC_REDZONE_SZB );
    393          MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
    394          MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
    395 
    396          /* Copy from old to new */
    397          VG_(memcpy)((void*)a_new, p_old, new_szB);
    398 
    399          /* Possibly fill freed area with specified junk. */
    400          if (MC_(clo_free_fill) != -1) {
    401             tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
    402             VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
    403          }
    404 
    405          /* Free old memory */
    406          /* Nb: we have to allocate a new MC_Chunk for the new memory rather
    407             than recycling the old one, so that any erroneous accesses to the
    408             old memory are reported. */
    409          die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
    410 
    411          // Allocate a new chunk.
    412          mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
    413       }
    414 
    415       p_new = (void*)a_new;
    416 
    417    } else {
    418       /* new size is bigger */
    419       Addr a_new;
    420       tl_assert(old_szB < new_szB);
    421       /* Get new memory */
    422       a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
    423 
    424       if (a_new) {
    425          UInt        ecu;
    426          ExeContext* ec;
    427 
    428          ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
    429          tl_assert(ec);
    430          ecu = VG_(get_ECU_from_ExeContext)(ec);
    431          tl_assert(VG_(is_plausible_ECU)(ecu));
    432 
    433          /* First half kept and copied, second half new, red zones as normal */
    434          MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
    435                                  MC_MALLOC_REDZONE_SZB );
    436          MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
    437          MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
    438                                                         ecu | MC_OKIND_HEAP );
    439          MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
    440 
    441          /* Possibly fill new area with specified junk */
    442          if (MC_(clo_malloc_fill) != -1) {
    443             tl_assert(MC_(clo_malloc_fill) >= 0x00
    444                       && MC_(clo_malloc_fill) <= 0xFF);
    445             VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
    446                                                 new_szB-old_szB);
    447          }
    448 
    449          /* Copy from old to new */
    450          VG_(memcpy)((void*)a_new, p_old, mc->szB);
    451 
    452          /* Possibly fill freed area with specified junk. */
    453          if (MC_(clo_free_fill) != -1) {
    454             tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
    455             VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
    456          }
    457 
    458          /* Free old memory */
    459          /* Nb: we have to allocate a new MC_Chunk for the new memory rather
    460             than recycling the old one, so that any erroneous accesses to the
    461             old memory are reported. */
    462          die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
    463 
    464          // Allocate a new chunk.
    465          mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
    466       }
    467 
    468       p_new = (void*)a_new;
    469    }
    470 
    471    // Now insert the new mc (with a possibly new 'data' field) into
    472    // malloc_list.  If this realloc() did not increase the memory size, we
    473    // will have removed and then re-added mc unnecessarily.  But that's ok
    474    // because shrinking a block with realloc() is (presumably) much rarer
    475    // than growing it, and this way simplifies the growing case.
    476    VG_(HT_add_node)( MC_(malloc_list), mc );
    477 
    478    return p_new;
    479 }
    480 
    481 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
    482 {
    483    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
    484 
    485    // There may be slop, but pretend there isn't because only the asked-for
    486    // area will be marked as addressable.
    487    return ( mc ? mc->szB : 0 );
    488 }
    489 
    490 
    491 /*------------------------------------------------------------*/
    492 /*--- Memory pool stuff.                                   ---*/
    493 /*------------------------------------------------------------*/
    494 
    495 /* Set to 1 for intensive sanity checking.  Is very expensive though
    496    and should not be used in production scenarios.  See #255966. */
    497 #define MP_DETAILED_SANITY_CHECKS 0
    498 
    499 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
    500 
    501 
    502 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
    503 {
    504    MC_Mempool* mp;
    505 
    506    if (VG_(clo_verbosity) > 2) {
    507       VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
    508                                pool, rzB, is_zeroed);
    509       VG_(get_and_pp_StackTrace)
    510          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    511    }
    512 
    513    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    514    if (mp != NULL) {
    515      VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
    516    }
    517 
    518    mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
    519    mp->pool       = pool;
    520    mp->rzB        = rzB;
    521    mp->is_zeroed  = is_zeroed;
    522    mp->chunks     = VG_(HT_construct)( "MC_(create_mempool)" );
    523    check_mempool_sane(mp);
    524 
    525    /* Paranoia ... ensure this area is off-limits to the client, so
    526       the mp->data field isn't visible to the leak checker.  If memory
    527       management is working correctly, anything pointer returned by
    528       VG_(malloc) should be noaccess as far as the client is
    529       concerned. */
    530    if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
    531       VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
    532    }
    533 
    534    VG_(HT_add_node)( MC_(mempool_list), mp );
    535 }
    536 
    537 void MC_(destroy_mempool)(Addr pool)
    538 {
    539    MC_Chunk*   mc;
    540    MC_Mempool* mp;
    541 
    542    if (VG_(clo_verbosity) > 2) {
    543       VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
    544       VG_(get_and_pp_StackTrace)
    545          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    546    }
    547 
    548    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
    549 
    550    if (mp == NULL) {
    551       ThreadId tid = VG_(get_running_tid)();
    552       MC_(record_illegal_mempool_error) ( tid, pool );
    553       return;
    554    }
    555    check_mempool_sane(mp);
    556 
    557    // Clean up the chunks, one by one
    558    VG_(HT_ResetIter)(mp->chunks);
    559    while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
    560       /* Note: make redzones noaccess again -- just in case user made them
    561          accessible with a client request... */
    562       MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
    563    }
    564    // Destroy the chunk table
    565    VG_(HT_destruct)(mp->chunks);
    566 
    567    VG_(free)(mp);
    568 }
    569 
    570 static Int
    571 mp_compar(void* n1, void* n2)
    572 {
    573    MC_Chunk* mc1 = *(MC_Chunk**)n1;
    574    MC_Chunk* mc2 = *(MC_Chunk**)n2;
    575    if (mc1->data < mc2->data) return -1;
    576    if (mc1->data > mc2->data) return  1;
    577    return 0;
    578 }
    579 
    580 static void
    581 check_mempool_sane(MC_Mempool* mp)
    582 {
    583    UInt n_chunks, i, bad = 0;
    584    static UInt tick = 0;
    585 
    586    MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
    587    if (!chunks)
    588       return;
    589 
    590    if (VG_(clo_verbosity) > 1) {
    591      if (tick++ >= 10000)
    592        {
    593 	 UInt total_pools = 0, total_chunks = 0;
    594 	 MC_Mempool* mp2;
    595 
    596 	 VG_(HT_ResetIter)(MC_(mempool_list));
    597 	 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
    598 	   total_pools++;
    599 	   VG_(HT_ResetIter)(mp2->chunks);
    600 	   while (VG_(HT_Next)(mp2->chunks)) {
    601 	     total_chunks++;
    602 	   }
    603 	 }
    604 
    605          VG_(message)(Vg_UserMsg,
    606                       "Total mempools active: %d pools, %d chunks\n",
    607 		      total_pools, total_chunks);
    608 	 tick = 0;
    609        }
    610    }
    611 
    612 
    613    VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
    614 
    615    /* Sanity check; assert that the blocks are now in order */
    616    for (i = 0; i < n_chunks-1; i++) {
    617       if (chunks[i]->data > chunks[i+1]->data) {
    618          VG_(message)(Vg_UserMsg,
    619                       "Mempool chunk %d / %d is out of order "
    620                       "wrt. its successor\n",
    621                       i+1, n_chunks);
    622          bad = 1;
    623       }
    624    }
    625 
    626    /* Sanity check -- make sure they don't overlap */
    627    for (i = 0; i < n_chunks-1; i++) {
    628       if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
    629          VG_(message)(Vg_UserMsg,
    630                       "Mempool chunk %d / %d overlaps with its successor\n",
    631                       i+1, n_chunks);
    632          bad = 1;
    633       }
    634    }
    635 
    636    if (bad) {
    637          VG_(message)(Vg_UserMsg,
    638                 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
    639                 n_chunks);
    640          for (i = 0; i < n_chunks; ++i) {
    641             VG_(message)(Vg_UserMsg,
    642                          "Mempool chunk %d / %d: %ld bytes "
    643                          "[%lx,%lx), allocated:\n",
    644                          i+1,
    645                          n_chunks,
    646                          chunks[i]->szB + 0UL,
    647                          chunks[i]->data,
    648                          chunks[i]->data + chunks[i]->szB);
    649 
    650             VG_(pp_ExeContext)(chunks[i]->where);
    651          }
    652    }
    653    VG_(free)(chunks);
    654 }
    655 
    656 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
    657 {
    658    MC_Mempool* mp;
    659 
    660    if (VG_(clo_verbosity) > 2) {
    661       VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
    662                                pool, addr, szB);
    663       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    664    }
    665 
    666    mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
    667    if (mp == NULL) {
    668       MC_(record_illegal_mempool_error) ( tid, pool );
    669    } else {
    670       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    671       MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
    672                      MC_AllocCustom, mp->chunks);
    673       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    674    }
    675 }
    676 
    677 void MC_(mempool_free)(Addr pool, Addr addr)
    678 {
    679    MC_Mempool*  mp;
    680    MC_Chunk*    mc;
    681    ThreadId     tid = VG_(get_running_tid)();
    682 
    683    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    684    if (mp == NULL) {
    685       MC_(record_illegal_mempool_error)(tid, pool);
    686       return;
    687    }
    688 
    689    if (VG_(clo_verbosity) > 2) {
    690       VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
    691       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    692    }
    693 
    694    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    695    mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
    696    if (mc == NULL) {
    697       MC_(record_free_error)(tid, (Addr)addr);
    698       return;
    699    }
    700 
    701    if (VG_(clo_verbosity) > 2) {
    702       VG_(message)(Vg_UserMsg,
    703 		   "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
    704 		   pool, addr, mc->szB + 0UL);
    705    }
    706 
    707    die_and_free_mem ( tid, mc, mp->rzB );
    708    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    709 }
    710 
    711 
    712 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
    713 {
    714    MC_Mempool*  mp;
    715    MC_Chunk*    mc;
    716    ThreadId     tid = VG_(get_running_tid)();
    717    UInt         n_shadows, i;
    718    VgHashNode** chunks;
    719 
    720    if (VG_(clo_verbosity) > 2) {
    721       VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
    722                                pool, addr, szB);
    723       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    724    }
    725 
    726    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    727    if (mp == NULL) {
    728       MC_(record_illegal_mempool_error)(tid, pool);
    729       return;
    730    }
    731 
    732    check_mempool_sane(mp);
    733    chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
    734    if (n_shadows == 0) {
    735      tl_assert(chunks == NULL);
    736      return;
    737    }
    738 
    739    tl_assert(chunks != NULL);
    740    for (i = 0; i < n_shadows; ++i) {
    741 
    742       Addr lo, hi, min, max;
    743 
    744       mc = (MC_Chunk*) chunks[i];
    745 
    746       lo = mc->data;
    747       hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
    748 
    749 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
    750 
    751       if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
    752 
    753          /* The current chunk is entirely within the trim extent: keep
    754             it. */
    755 
    756          continue;
    757 
    758       } else if ( (! EXTENT_CONTAINS(lo)) &&
    759                   (! EXTENT_CONTAINS(hi)) ) {
    760 
    761          /* The current chunk is entirely outside the trim extent:
    762             delete it. */
    763 
    764          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
    765             MC_(record_free_error)(tid, (Addr)mc->data);
    766             VG_(free)(chunks);
    767             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    768             return;
    769          }
    770          die_and_free_mem ( tid, mc, mp->rzB );
    771 
    772       } else {
    773 
    774          /* The current chunk intersects the trim extent: remove,
    775             trim, and reinsert it. */
    776 
    777          tl_assert(EXTENT_CONTAINS(lo) ||
    778                    EXTENT_CONTAINS(hi));
    779          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
    780             MC_(record_free_error)(tid, (Addr)mc->data);
    781             VG_(free)(chunks);
    782             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    783             return;
    784          }
    785 
    786          if (mc->data < addr) {
    787            min = mc->data;
    788            lo = addr;
    789          } else {
    790            min = addr;
    791            lo = mc->data;
    792          }
    793 
    794          if (mc->data + szB > addr + szB) {
    795            max = mc->data + szB;
    796            hi = addr + szB;
    797          } else {
    798            max = addr + szB;
    799            hi = mc->data + szB;
    800          }
    801 
    802          tl_assert(min <= lo);
    803          tl_assert(lo < hi);
    804          tl_assert(hi <= max);
    805 
    806          if (min < lo && !EXTENT_CONTAINS(min)) {
    807            MC_(make_mem_noaccess)( min, lo - min);
    808          }
    809 
    810          if (hi < max && !EXTENT_CONTAINS(max)) {
    811            MC_(make_mem_noaccess)( hi, max - hi );
    812          }
    813 
    814          mc->data = lo;
    815          mc->szB = (UInt) (hi - lo);
    816          VG_(HT_add_node)( mp->chunks, mc );
    817       }
    818 
    819 #undef EXTENT_CONTAINS
    820 
    821    }
    822    check_mempool_sane(mp);
    823    VG_(free)(chunks);
    824 }
    825 
    826 void MC_(move_mempool)(Addr poolA, Addr poolB)
    827 {
    828    MC_Mempool* mp;
    829 
    830    if (VG_(clo_verbosity) > 2) {
    831       VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
    832       VG_(get_and_pp_StackTrace)
    833          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    834    }
    835 
    836    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
    837 
    838    if (mp == NULL) {
    839       ThreadId tid = VG_(get_running_tid)();
    840       MC_(record_illegal_mempool_error) ( tid, poolA );
    841       return;
    842    }
    843 
    844    mp->pool = poolB;
    845    VG_(HT_add_node)( MC_(mempool_list), mp );
    846 }
    847 
    848 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
    849 {
    850    MC_Mempool*  mp;
    851    MC_Chunk*    mc;
    852    ThreadId     tid = VG_(get_running_tid)();
    853 
    854    if (VG_(clo_verbosity) > 2) {
    855       VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
    856                    pool, addrA, addrB, szB);
    857       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    858    }
    859 
    860    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    861    if (mp == NULL) {
    862       MC_(record_illegal_mempool_error)(tid, pool);
    863       return;
    864    }
    865 
    866    check_mempool_sane(mp);
    867 
    868    mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
    869    if (mc == NULL) {
    870       MC_(record_free_error)(tid, (Addr)addrA);
    871       return;
    872    }
    873 
    874    mc->data = addrB;
    875    mc->szB  = szB;
    876    VG_(HT_add_node)( mp->chunks, mc );
    877 
    878    check_mempool_sane(mp);
    879 }
    880 
    881 Bool MC_(mempool_exists)(Addr pool)
    882 {
    883    MC_Mempool*  mp;
    884 
    885    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    886    if (mp == NULL) {
    887        return False;
    888    }
    889    return True;
    890 }
    891 
    892 
    893 /*------------------------------------------------------------*/
    894 /*--- Statistics printing                                  ---*/
    895 /*------------------------------------------------------------*/
    896 
    897 void MC_(print_malloc_stats) ( void )
    898 {
    899    MC_Chunk* mc;
    900    SizeT     nblocks = 0;
    901    ULong     nbytes  = 0;
    902 
    903    if (VG_(clo_verbosity) == 0)
    904       return;
    905    if (VG_(clo_xml))
    906       return;
    907 
    908    /* Count memory still in use. */
    909    VG_(HT_ResetIter)(MC_(malloc_list));
    910    while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
    911       nblocks++;
    912       nbytes += (ULong)mc->szB;
    913    }
    914 
    915    VG_(umsg)(
    916       "HEAP SUMMARY:\n"
    917       "    in use at exit: %'llu bytes in %'lu blocks\n"
    918       "  total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
    919       "\n",
    920       nbytes, nblocks,
    921       cmalloc_n_mallocs,
    922       cmalloc_n_frees, cmalloc_bs_mallocd
    923    );
    924 }
    925 
    926 /*--------------------------------------------------------------------*/
    927 /*--- end                                                          ---*/
    928 /*--------------------------------------------------------------------*/
    929