Home | History | Annotate | Download | only in memcheck
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
      4 /*---                                         mc_malloc_wrappers.c ---*/
      5 /*--------------------------------------------------------------------*/
      6 
      7 /*
      8    This file is part of MemCheck, a heavyweight Valgrind tool for
      9    detecting memory errors.
     10 
     11    Copyright (C) 2000-2011 Julian Seward
     12       jseward (at) acm.org
     13 
     14    This program is free software; you can redistribute it and/or
     15    modify it under the terms of the GNU General Public License as
     16    published by the Free Software Foundation; either version 2 of the
     17    License, or (at your option) any later version.
     18 
     19    This program is distributed in the hope that it will be useful, but
     20    WITHOUT ANY WARRANTY; without even the implied warranty of
     21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     22    General Public License for more details.
     23 
     24    You should have received a copy of the GNU General Public License
     25    along with this program; if not, write to the Free Software
     26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     27    02111-1307, USA.
     28 
     29    The GNU General Public License is contained in the file COPYING.
     30 */
     31 
     32 #include "pub_tool_basics.h"
     33 #include "pub_tool_execontext.h"
     34 #include "pub_tool_hashtable.h"
     35 #include "pub_tool_libcbase.h"
     36 #include "pub_tool_libcassert.h"
     37 #include "pub_tool_libcprint.h"
     38 #include "pub_tool_mallocfree.h"
     39 #include "pub_tool_options.h"
     40 #include "pub_tool_replacemalloc.h"
     41 #include "pub_tool_threadstate.h"
     42 #include "pub_tool_tooliface.h"     // Needed for mc_include.h
     43 #include "pub_tool_stacktrace.h"    // For VG_(get_and_pp_StackTrace)
     44 
     45 #include "mc_include.h"
     46 
     47 /*------------------------------------------------------------*/
     48 /*--- Defns                                                ---*/
     49 /*------------------------------------------------------------*/
     50 
     51 /* Stats ... */
     52 static SizeT cmalloc_n_mallocs  = 0;
     53 static SizeT cmalloc_n_frees    = 0;
     54 static ULong cmalloc_bs_mallocd = 0;
     55 
     56 /* For debug printing to do with mempools: what stack trace
     57    depth to show. */
     58 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
     59 
     60 
     61 /*------------------------------------------------------------*/
     62 /*--- Tracking malloc'd and free'd blocks                  ---*/
     63 /*------------------------------------------------------------*/
     64 
     65 /* Record malloc'd blocks. */
     66 VgHashTable MC_(malloc_list) = NULL;
     67 
     68 /* Memory pools: a hash table of MC_Mempools.  Search key is
     69    MC_Mempool::pool. */
     70 VgHashTable MC_(mempool_list) = NULL;
     71 
     72 /* Records blocks after freeing. */
     73 /* Blocks freed by the client are queued in one of two lists of
     74    freed blocks not yet physically freed:
     75    "big blocks" freed list.
     76    "small blocks" freed list
     77    The blocks with a size >= MC_(clo_freelist_big_blocks)
     78    are linked in the big blocks freed list.
     79    This allows a client to allocate and free big blocks
     80    (e.g. bigger than VG_(clo_freelist_vol)) without losing
     81    immediately all protection against dangling pointers.
     82    position [0] is for big blocks, [1] is for small blocks. */
     83 static MC_Chunk* freed_list_start[2]  = {NULL, NULL};
     84 static MC_Chunk* freed_list_end[2]    = {NULL, NULL};
     85 
     86 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
     87    some of the oldest blocks in the queue at the same time. */
     88 static void add_to_freed_queue ( MC_Chunk* mc )
     89 {
     90    const Bool show = False;
     91    const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
     92 
     93    /* Put it at the end of the freed list, unless the block
     94       would be directly released any way : in this case, we
     95       put it at the head of the freed list. */
     96    if (freed_list_end[l] == NULL) {
     97       tl_assert(freed_list_start[l] == NULL);
     98       mc->next = NULL;
     99       freed_list_end[l]    = freed_list_start[l] = mc;
    100    } else {
    101       tl_assert(freed_list_end[l]->next == NULL);
    102       if (mc->szB >= MC_(clo_freelist_vol)) {
    103          mc->next = freed_list_start[l];
    104          freed_list_start[l] = mc;
    105       } else {
    106          mc->next = NULL;
    107          freed_list_end[l]->next = mc;
    108          freed_list_end[l]       = mc;
    109       }
    110    }
    111    VG_(free_queue_volume) += (Long)mc->szB;
    112    if (show)
    113       VG_(printf)("mc_freelist: acquire: volume now %lld\n",
    114                   VG_(free_queue_volume));
    115    VG_(free_queue_length)++;
    116 }
    117 
    118 /* Release enough of the oldest blocks to bring the free queue
    119    volume below vg_clo_freelist_vol.
    120    Start with big block list first.
    121    On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
    122    On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
    123 static void release_oldest_block(void)
    124 {
    125    const Bool show = False;
    126    int i;
    127    tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
    128    tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
    129 
    130    for (i = 0; i < 2; i++) {
    131       while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
    132              && freed_list_start[i] != NULL) {
    133          MC_Chunk* mc1;
    134          tl_assert(freed_list_end[i] != NULL);
    135 
    136          mc1 = freed_list_start[i];
    137          VG_(free_queue_volume) -= (Long)mc1->szB;
    138          VG_(free_queue_length)--;
    139          if (show)
    140             VG_(printf)("mc_freelist: discard: volume now %lld\n",
    141                         VG_(free_queue_volume));
    142          tl_assert(VG_(free_queue_volume) >= 0);
    143 
    144          if (freed_list_start[i] == freed_list_end[i]) {
    145             freed_list_start[i] = freed_list_end[i] = NULL;
    146          } else {
    147             freed_list_start[i] = mc1->next;
    148          }
    149          mc1->next = NULL; /* just paranoia */
    150          /* free MC_Chunk */
    151          if (MC_AllocCustom != mc1->allockind)
    152             VG_(cli_free) ( (void*)(mc1->data) );
    153          VG_(free) ( mc1 );
    154        }
    155    }
    156 }
    157 
    158 MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
    159 {
    160    int i;
    161    for (i = 0; i < 2; i++) {
    162       MC_Chunk*  mc;
    163       mc = freed_list_start[i];
    164       while (mc) {
    165          if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
    166                                     MC_MALLOC_REDZONE_SZB ))
    167             return mc;
    168          mc = mc->next;
    169       }
    170    }
    171    return NULL;
    172 }
    173 
    174 /* Allocate a shadow chunk, put it on the appropriate list.
    175    If needed, release oldest blocks from freed list. */
    176 static
    177 MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
    178                             MC_AllocKind kind)
    179 {
    180    MC_Chunk* mc  = VG_(malloc)("mc.cMC.1 (a MC_Chunk)", sizeof(MC_Chunk));
    181    mc->data      = p;
    182    mc->szB       = szB;
    183    mc->allockind = kind;
    184    mc->where     = ec;
    185 
    186    /* Each time a new MC_Chunk is created, release oldest blocks
    187       if the free list volume is exceeded. */
    188    if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
    189       release_oldest_block();
    190 
    191    /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
    192       the mc->data field isn't visible to the leak checker.  If memory
    193       management is working correctly, any pointer returned by VG_(malloc)
    194       should be noaccess as far as the client is concerned. */
    195    if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
    196       VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
    197    }
    198    return mc;
    199 }
    200 
    201 /*------------------------------------------------------------*/
    202 /*--- client_malloc(), etc                                 ---*/
    203 /*------------------------------------------------------------*/
    204 
    205 // XXX: should make this a proper error (bug #79311).
    206 static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
    207 {
    208    // Cast to a signed type to catch any unexpectedly negative args.  We're
    209    // assuming here that the size asked for is not greater than 2^31 bytes
    210    // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
    211    if ((SSizeT)sizeB < 0) {
    212       if (!VG_(clo_xml))
    213          VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
    214                       (SSizeT)sizeB, fn );
    215       return True;
    216    }
    217    return False;
    218 }
    219 
    220 static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
    221 {
    222    if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
    223       if (!VG_(clo_xml))
    224          VG_(message)(Vg_UserMsg,
    225                       "Warning: silly args (%ld,%ld) to calloc()\n",
    226                       (SSizeT)n, (SSizeT)sizeB);
    227       return True;
    228    }
    229    return False;
    230 }
    231 
    232 /* Allocate memory and note change in memory available */
    233 void* MC_(new_block) ( ThreadId tid,
    234                        Addr p, SizeT szB, SizeT alignB,
    235                        Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
    236 {
    237    ExeContext* ec;
    238 
    239    cmalloc_n_mallocs ++;
    240 
    241    // Allocate and zero if necessary
    242    if (p) {
    243       tl_assert(MC_AllocCustom == kind);
    244    } else {
    245       tl_assert(MC_AllocCustom != kind);
    246       p = (Addr)VG_(cli_malloc)( alignB, szB );
    247       if (!p) {
    248          return NULL;
    249       }
    250       if (is_zeroed) {
    251          VG_(memset)((void*)p, 0, szB);
    252       } else
    253       if (MC_(clo_malloc_fill) != -1) {
    254          tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
    255          VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
    256       }
    257    }
    258 
    259    // Only update this stat if allocation succeeded.
    260    cmalloc_bs_mallocd += (ULong)szB;
    261 
    262    ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
    263    tl_assert(ec);
    264 
    265    VG_(HT_add_node)( table, create_MC_Chunk(ec, p, szB, kind) );
    266 
    267    if (is_zeroed)
    268       MC_(make_mem_defined)( p, szB );
    269    else {
    270       UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
    271       tl_assert(VG_(is_plausible_ECU)(ecu));
    272       MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
    273    }
    274 
    275    return (void*)p;
    276 }
    277 
    278 void* MC_(malloc) ( ThreadId tid, SizeT n )
    279 {
    280    if (complain_about_silly_args(n, "malloc")) {
    281       return NULL;
    282    } else {
    283       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
    284          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
    285    }
    286 }
    287 
    288 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
    289 {
    290    if (complain_about_silly_args(n, "__builtin_new")) {
    291       return NULL;
    292    } else {
    293       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
    294          /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
    295    }
    296 }
    297 
    298 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
    299 {
    300    if (complain_about_silly_args(n, "__builtin_vec_new")) {
    301       return NULL;
    302    } else {
    303       return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
    304          /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
    305    }
    306 }
    307 
    308 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
    309 {
    310    if (complain_about_silly_args(n, "memalign")) {
    311       return NULL;
    312    } else {
    313       return MC_(new_block) ( tid, 0, n, alignB,
    314          /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
    315    }
    316 }
    317 
    318 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
    319 {
    320    if (complain_about_silly_args2(nmemb, size1)) {
    321       return NULL;
    322    } else {
    323       return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
    324          /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
    325    }
    326 }
    327 
    328 static
    329 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
    330 {
    331    if (MC_(clo_free_fill) != -1) {
    332       tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
    333       VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
    334    }
    335 
    336    /* Note: make redzones noaccess again -- just in case user made them
    337       accessible with a client request... */
    338    MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
    339 
    340    /* Record where freed */
    341    mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
    342    /* Put it out of harm's way for a while */
    343    add_to_freed_queue ( mc );
    344    /* If the free list volume is bigger than MC_(clo_freelist_vol),
    345       we wait till the next block allocation to release blocks.
    346       This increase the chance to discover dangling pointer usage,
    347       even for big blocks being freed by the client. */
    348 }
    349 
    350 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
    351 {
    352    MC_Chunk* mc;
    353 
    354    cmalloc_n_frees++;
    355 
    356    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
    357    if (mc == NULL) {
    358       MC_(record_free_error) ( tid, p );
    359    } else {
    360       /* check if it is a matching free() / delete / delete [] */
    361       if (kind != mc->allockind) {
    362          tl_assert(p == mc->data);
    363          MC_(record_freemismatch_error) ( tid, mc );
    364       }
    365       die_and_free_mem ( tid, mc, rzB );
    366    }
    367 }
    368 
    369 void MC_(free) ( ThreadId tid, void* p )
    370 {
    371    MC_(handle_free)(
    372       tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
    373 }
    374 
    375 void MC_(__builtin_delete) ( ThreadId tid, void* p )
    376 {
    377    MC_(handle_free)(
    378       tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
    379 }
    380 
    381 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
    382 {
    383    MC_(handle_free)(
    384       tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
    385 }
    386 
    387 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
    388 {
    389    MC_Chunk* mc;
    390    void*     p_new;
    391    SizeT     old_szB;
    392 
    393    cmalloc_n_frees ++;
    394    cmalloc_n_mallocs ++;
    395    cmalloc_bs_mallocd += (ULong)new_szB;
    396 
    397    if (complain_about_silly_args(new_szB, "realloc"))
    398       return NULL;
    399 
    400    /* Remove the old block */
    401    mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
    402    if (mc == NULL) {
    403       MC_(record_free_error) ( tid, (Addr)p_old );
    404       /* We return to the program regardless. */
    405       return NULL;
    406    }
    407 
    408    /* check if its a matching free() / delete / delete [] */
    409    if (MC_AllocMalloc != mc->allockind) {
    410       /* can not realloc a range that was allocated with new or new [] */
    411       tl_assert((Addr)p_old == mc->data);
    412       MC_(record_freemismatch_error) ( tid, mc );
    413       /* but keep going anyway */
    414    }
    415 
    416    old_szB = mc->szB;
    417 
    418    /* In all cases, even when the new size is smaller or unchanged, we
    419       reallocate and copy the contents, and make the old block
    420       inaccessible.  This is so as to guarantee to catch all cases of
    421       accesses via the old address after reallocation, regardless of
    422       the change in size.  (Of course the ability to detect accesses
    423       to the old block also depends on the size of the freed blocks
    424       queue). */
    425 
    426    if (new_szB <= old_szB) {
    427       /* new size is smaller or the same */
    428       Addr a_new;
    429       /* Get new memory */
    430       a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
    431 
    432       if (a_new) {
    433          ExeContext* ec;
    434 
    435          ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
    436          tl_assert(ec);
    437 
    438          /* Retained part is copied, red zones set as normal */
    439          MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
    440                                  MC_MALLOC_REDZONE_SZB );
    441          MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
    442          MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
    443 
    444          /* Copy from old to new */
    445          VG_(memcpy)((void*)a_new, p_old, new_szB);
    446 
    447          /* Possibly fill freed area with specified junk. */
    448          if (MC_(clo_free_fill) != -1) {
    449             tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
    450             VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
    451          }
    452 
    453          /* Free old memory */
    454          /* Nb: we have to allocate a new MC_Chunk for the new memory rather
    455             than recycling the old one, so that any erroneous accesses to the
    456             old memory are reported. */
    457          die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
    458 
    459          // Allocate a new chunk.
    460          mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
    461       }
    462 
    463       p_new = (void*)a_new;
    464 
    465    } else {
    466       /* new size is bigger */
    467       Addr a_new;
    468       tl_assert(old_szB < new_szB);
    469       /* Get new memory */
    470       a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
    471 
    472       if (a_new) {
    473          UInt        ecu;
    474          ExeContext* ec;
    475 
    476          ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
    477          tl_assert(ec);
    478          ecu = VG_(get_ECU_from_ExeContext)(ec);
    479          tl_assert(VG_(is_plausible_ECU)(ecu));
    480 
    481          /* First half kept and copied, second half new, red zones as normal */
    482          MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
    483                                  MC_MALLOC_REDZONE_SZB );
    484          MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
    485          MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
    486                                                         ecu | MC_OKIND_HEAP );
    487          MC_(make_mem_noaccess)        ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
    488 
    489          /* Possibly fill new area with specified junk */
    490          if (MC_(clo_malloc_fill) != -1) {
    491             tl_assert(MC_(clo_malloc_fill) >= 0x00
    492                       && MC_(clo_malloc_fill) <= 0xFF);
    493             VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
    494                                                 new_szB-old_szB);
    495          }
    496 
    497          /* Copy from old to new */
    498          VG_(memcpy)((void*)a_new, p_old, mc->szB);
    499 
    500          /* Possibly fill freed area with specified junk. */
    501          if (MC_(clo_free_fill) != -1) {
    502             tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
    503             VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
    504          }
    505 
    506          /* Free old memory */
    507          /* Nb: we have to allocate a new MC_Chunk for the new memory rather
    508             than recycling the old one, so that any erroneous accesses to the
    509             old memory are reported. */
    510          die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
    511 
    512          // Allocate a new chunk.
    513          mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
    514       }
    515 
    516       p_new = (void*)a_new;
    517    }
    518 
    519    // Now insert the new mc (with a possibly new 'data' field) into
    520    // malloc_list.  If this realloc() did not increase the memory size, we
    521    // will have removed and then re-added mc unnecessarily.  But that's ok
    522    // because shrinking a block with realloc() is (presumably) much rarer
    523    // than growing it, and this way simplifies the growing case.
    524    VG_(HT_add_node)( MC_(malloc_list), mc );
    525 
    526    return p_new;
    527 }
    528 
    529 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
    530 {
    531    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
    532 
    533    // There may be slop, but pretend there isn't because only the asked-for
    534    // area will be marked as addressable.
    535    return ( mc ? mc->szB : 0 );
    536 }
    537 
    538 /* This handles the in place resize of a block, as performed by the
    539    VALGRIND_RESIZEINPLACE_BLOCK client request.  It is unrelated to,
    540    and not used for, handling of the normal libc realloc()
    541    function. */
    542 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
    543                                SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
    544 {
    545    MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
    546    if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
    547       /* Reject if: p is not found, or oldSizeB is wrong,
    548          or new block would be empty. */
    549       MC_(record_free_error) ( tid, p );
    550       return;
    551    }
    552 
    553    if (oldSizeB == newSizeB)
    554       return;
    555 
    556    mc->szB = newSizeB;
    557    if (newSizeB < oldSizeB) {
    558       MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
    559    } else {
    560       ExeContext* ec  = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
    561       UInt        ecu = VG_(get_ECU_from_ExeContext)(ec);
    562       MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
    563                                       ecu | MC_OKIND_HEAP );
    564       if (rzB > 0)
    565          MC_(make_mem_noaccess)( p + newSizeB, rzB );
    566    }
    567 }
    568 
    569 
    570 /*------------------------------------------------------------*/
    571 /*--- Memory pool stuff.                                   ---*/
    572 /*------------------------------------------------------------*/
    573 
    574 /* Set to 1 for intensive sanity checking.  Is very expensive though
    575    and should not be used in production scenarios.  See #255966. */
    576 #define MP_DETAILED_SANITY_CHECKS 0
    577 
    578 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
    579 
    580 
    581 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
    582 {
    583    MC_Mempool* mp;
    584 
    585    if (VG_(clo_verbosity) > 2) {
    586       VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
    587                                pool, rzB, is_zeroed);
    588       VG_(get_and_pp_StackTrace)
    589          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    590    }
    591 
    592    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    593    if (mp != NULL) {
    594      VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
    595    }
    596 
    597    mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
    598    mp->pool       = pool;
    599    mp->rzB        = rzB;
    600    mp->is_zeroed  = is_zeroed;
    601    mp->chunks     = VG_(HT_construct)( "MC_(create_mempool)" );
    602    check_mempool_sane(mp);
    603 
    604    /* Paranoia ... ensure this area is off-limits to the client, so
    605       the mp->data field isn't visible to the leak checker.  If memory
    606       management is working correctly, anything pointer returned by
    607       VG_(malloc) should be noaccess as far as the client is
    608       concerned. */
    609    if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
    610       VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
    611    }
    612 
    613    VG_(HT_add_node)( MC_(mempool_list), mp );
    614 }
    615 
    616 void MC_(destroy_mempool)(Addr pool)
    617 {
    618    MC_Chunk*   mc;
    619    MC_Mempool* mp;
    620 
    621    if (VG_(clo_verbosity) > 2) {
    622       VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
    623       VG_(get_and_pp_StackTrace)
    624          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    625    }
    626 
    627    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
    628 
    629    if (mp == NULL) {
    630       ThreadId tid = VG_(get_running_tid)();
    631       MC_(record_illegal_mempool_error) ( tid, pool );
    632       return;
    633    }
    634    check_mempool_sane(mp);
    635 
    636    // Clean up the chunks, one by one
    637    VG_(HT_ResetIter)(mp->chunks);
    638    while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
    639       /* Note: make redzones noaccess again -- just in case user made them
    640          accessible with a client request... */
    641       MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
    642    }
    643    // Destroy the chunk table
    644    VG_(HT_destruct)(mp->chunks);
    645 
    646    VG_(free)(mp);
    647 }
    648 
    649 static Int
    650 mp_compar(void* n1, void* n2)
    651 {
    652    MC_Chunk* mc1 = *(MC_Chunk**)n1;
    653    MC_Chunk* mc2 = *(MC_Chunk**)n2;
    654    if (mc1->data < mc2->data) return -1;
    655    if (mc1->data > mc2->data) return  1;
    656    return 0;
    657 }
    658 
    659 static void
    660 check_mempool_sane(MC_Mempool* mp)
    661 {
    662    UInt n_chunks, i, bad = 0;
    663    static UInt tick = 0;
    664 
    665    MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
    666    if (!chunks)
    667       return;
    668 
    669    if (VG_(clo_verbosity) > 1) {
    670      if (tick++ >= 10000)
    671        {
    672 	 UInt total_pools = 0, total_chunks = 0;
    673 	 MC_Mempool* mp2;
    674 
    675 	 VG_(HT_ResetIter)(MC_(mempool_list));
    676 	 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
    677 	   total_pools++;
    678 	   VG_(HT_ResetIter)(mp2->chunks);
    679 	   while (VG_(HT_Next)(mp2->chunks)) {
    680 	     total_chunks++;
    681 	   }
    682 	 }
    683 
    684          VG_(message)(Vg_UserMsg,
    685                       "Total mempools active: %d pools, %d chunks\n",
    686 		      total_pools, total_chunks);
    687 	 tick = 0;
    688        }
    689    }
    690 
    691 
    692    VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
    693 
    694    /* Sanity check; assert that the blocks are now in order */
    695    for (i = 0; i < n_chunks-1; i++) {
    696       if (chunks[i]->data > chunks[i+1]->data) {
    697          VG_(message)(Vg_UserMsg,
    698                       "Mempool chunk %d / %d is out of order "
    699                       "wrt. its successor\n",
    700                       i+1, n_chunks);
    701          bad = 1;
    702       }
    703    }
    704 
    705    /* Sanity check -- make sure they don't overlap */
    706    for (i = 0; i < n_chunks-1; i++) {
    707       if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
    708          VG_(message)(Vg_UserMsg,
    709                       "Mempool chunk %d / %d overlaps with its successor\n",
    710                       i+1, n_chunks);
    711          bad = 1;
    712       }
    713    }
    714 
    715    if (bad) {
    716          VG_(message)(Vg_UserMsg,
    717                 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
    718                 n_chunks);
    719          for (i = 0; i < n_chunks; ++i) {
    720             VG_(message)(Vg_UserMsg,
    721                          "Mempool chunk %d / %d: %ld bytes "
    722                          "[%lx,%lx), allocated:\n",
    723                          i+1,
    724                          n_chunks,
    725                          chunks[i]->szB + 0UL,
    726                          chunks[i]->data,
    727                          chunks[i]->data + chunks[i]->szB);
    728 
    729             VG_(pp_ExeContext)(chunks[i]->where);
    730          }
    731    }
    732    VG_(free)(chunks);
    733 }
    734 
    735 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
    736 {
    737    MC_Mempool* mp;
    738 
    739    if (VG_(clo_verbosity) > 2) {
    740       VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
    741                                pool, addr, szB);
    742       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    743    }
    744 
    745    mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
    746    if (mp == NULL) {
    747       MC_(record_illegal_mempool_error) ( tid, pool );
    748    } else {
    749       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    750       MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
    751                      MC_AllocCustom, mp->chunks);
    752       if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    753    }
    754 }
    755 
    756 void MC_(mempool_free)(Addr pool, Addr addr)
    757 {
    758    MC_Mempool*  mp;
    759    MC_Chunk*    mc;
    760    ThreadId     tid = VG_(get_running_tid)();
    761 
    762    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    763    if (mp == NULL) {
    764       MC_(record_illegal_mempool_error)(tid, pool);
    765       return;
    766    }
    767 
    768    if (VG_(clo_verbosity) > 2) {
    769       VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
    770       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    771    }
    772 
    773    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    774    mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
    775    if (mc == NULL) {
    776       MC_(record_free_error)(tid, (Addr)addr);
    777       return;
    778    }
    779 
    780    if (VG_(clo_verbosity) > 2) {
    781       VG_(message)(Vg_UserMsg,
    782 		   "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
    783 		   pool, addr, mc->szB + 0UL);
    784    }
    785 
    786    die_and_free_mem ( tid, mc, mp->rzB );
    787    if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    788 }
    789 
    790 
    791 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
    792 {
    793    MC_Mempool*  mp;
    794    MC_Chunk*    mc;
    795    ThreadId     tid = VG_(get_running_tid)();
    796    UInt         n_shadows, i;
    797    VgHashNode** chunks;
    798 
    799    if (VG_(clo_verbosity) > 2) {
    800       VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
    801                                pool, addr, szB);
    802       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    803    }
    804 
    805    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    806    if (mp == NULL) {
    807       MC_(record_illegal_mempool_error)(tid, pool);
    808       return;
    809    }
    810 
    811    check_mempool_sane(mp);
    812    chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
    813    if (n_shadows == 0) {
    814      tl_assert(chunks == NULL);
    815      return;
    816    }
    817 
    818    tl_assert(chunks != NULL);
    819    for (i = 0; i < n_shadows; ++i) {
    820 
    821       Addr lo, hi, min, max;
    822 
    823       mc = (MC_Chunk*) chunks[i];
    824 
    825       lo = mc->data;
    826       hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
    827 
    828 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
    829 
    830       if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
    831 
    832          /* The current chunk is entirely within the trim extent: keep
    833             it. */
    834 
    835          continue;
    836 
    837       } else if ( (! EXTENT_CONTAINS(lo)) &&
    838                   (! EXTENT_CONTAINS(hi)) ) {
    839 
    840          /* The current chunk is entirely outside the trim extent:
    841             delete it. */
    842 
    843          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
    844             MC_(record_free_error)(tid, (Addr)mc->data);
    845             VG_(free)(chunks);
    846             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    847             return;
    848          }
    849          die_and_free_mem ( tid, mc, mp->rzB );
    850 
    851       } else {
    852 
    853          /* The current chunk intersects the trim extent: remove,
    854             trim, and reinsert it. */
    855 
    856          tl_assert(EXTENT_CONTAINS(lo) ||
    857                    EXTENT_CONTAINS(hi));
    858          if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
    859             MC_(record_free_error)(tid, (Addr)mc->data);
    860             VG_(free)(chunks);
    861             if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
    862             return;
    863          }
    864 
    865          if (mc->data < addr) {
    866            min = mc->data;
    867            lo = addr;
    868          } else {
    869            min = addr;
    870            lo = mc->data;
    871          }
    872 
    873          if (mc->data + szB > addr + szB) {
    874            max = mc->data + szB;
    875            hi = addr + szB;
    876          } else {
    877            max = addr + szB;
    878            hi = mc->data + szB;
    879          }
    880 
    881          tl_assert(min <= lo);
    882          tl_assert(lo < hi);
    883          tl_assert(hi <= max);
    884 
    885          if (min < lo && !EXTENT_CONTAINS(min)) {
    886            MC_(make_mem_noaccess)( min, lo - min);
    887          }
    888 
    889          if (hi < max && !EXTENT_CONTAINS(max)) {
    890            MC_(make_mem_noaccess)( hi, max - hi );
    891          }
    892 
    893          mc->data = lo;
    894          mc->szB = (UInt) (hi - lo);
    895          VG_(HT_add_node)( mp->chunks, mc );
    896       }
    897 
    898 #undef EXTENT_CONTAINS
    899 
    900    }
    901    check_mempool_sane(mp);
    902    VG_(free)(chunks);
    903 }
    904 
    905 void MC_(move_mempool)(Addr poolA, Addr poolB)
    906 {
    907    MC_Mempool* mp;
    908 
    909    if (VG_(clo_verbosity) > 2) {
    910       VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
    911       VG_(get_and_pp_StackTrace)
    912          (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    913    }
    914 
    915    mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
    916 
    917    if (mp == NULL) {
    918       ThreadId tid = VG_(get_running_tid)();
    919       MC_(record_illegal_mempool_error) ( tid, poolA );
    920       return;
    921    }
    922 
    923    mp->pool = poolB;
    924    VG_(HT_add_node)( MC_(mempool_list), mp );
    925 }
    926 
    927 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
    928 {
    929    MC_Mempool*  mp;
    930    MC_Chunk*    mc;
    931    ThreadId     tid = VG_(get_running_tid)();
    932 
    933    if (VG_(clo_verbosity) > 2) {
    934       VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
    935                    pool, addrA, addrB, szB);
    936       VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
    937    }
    938 
    939    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    940    if (mp == NULL) {
    941       MC_(record_illegal_mempool_error)(tid, pool);
    942       return;
    943    }
    944 
    945    check_mempool_sane(mp);
    946 
    947    mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
    948    if (mc == NULL) {
    949       MC_(record_free_error)(tid, (Addr)addrA);
    950       return;
    951    }
    952 
    953    mc->data = addrB;
    954    mc->szB  = szB;
    955    VG_(HT_add_node)( mp->chunks, mc );
    956 
    957    check_mempool_sane(mp);
    958 }
    959 
    960 Bool MC_(mempool_exists)(Addr pool)
    961 {
    962    MC_Mempool*  mp;
    963 
    964    mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
    965    if (mp == NULL) {
    966        return False;
    967    }
    968    return True;
    969 }
    970 
    971 
    972 /*------------------------------------------------------------*/
    973 /*--- Statistics printing                                  ---*/
    974 /*------------------------------------------------------------*/
    975 
    976 void MC_(print_malloc_stats) ( void )
    977 {
    978    MC_Chunk* mc;
    979    SizeT     nblocks = 0;
    980    ULong     nbytes  = 0;
    981 
    982    if (VG_(clo_verbosity) == 0)
    983       return;
    984    if (VG_(clo_xml))
    985       return;
    986 
    987    /* Count memory still in use. */
    988    VG_(HT_ResetIter)(MC_(malloc_list));
    989    while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
    990       nblocks++;
    991       nbytes += (ULong)mc->szB;
    992    }
    993 
    994    VG_(umsg)(
    995       "HEAP SUMMARY:\n"
    996       "    in use at exit: %'llu bytes in %'lu blocks\n"
    997       "  total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
    998       "\n",
    999       nbytes, nblocks,
   1000       cmalloc_n_mallocs,
   1001       cmalloc_n_frees, cmalloc_bs_mallocd
   1002    );
   1003 }
   1004 
   1005 /*--------------------------------------------------------------------*/
   1006 /*--- end                                                          ---*/
   1007 /*--------------------------------------------------------------------*/
   1008