Home | History | Annotate | Download | only in memcheck
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- A header file for all parts of the MemCheck tool.            ---*/
      4 /*---                                                 mc_include.h ---*/
      5 /*--------------------------------------------------------------------*/
      6 
      7 /*
      8    This file is part of MemCheck, a heavyweight Valgrind tool for
      9    detecting memory errors.
     10 
     11    Copyright (C) 2000-2017 Julian Seward
     12       jseward (at) acm.org
     13 
     14    This program is free software; you can redistribute it and/or
     15    modify it under the terms of the GNU General Public License as
     16    published by the Free Software Foundation; either version 2 of the
     17    License, or (at your option) any later version.
     18 
     19    This program is distributed in the hope that it will be useful, but
     20    WITHOUT ANY WARRANTY; without even the implied warranty of
     21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     22    General Public License for more details.
     23 
     24    You should have received a copy of the GNU General Public License
     25    along with this program; if not, write to the Free Software
     26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     27    02111-1307, USA.
     28 
     29    The GNU General Public License is contained in the file COPYING.
     30 */
     31 
     32 #ifndef __MC_INCLUDE_H
     33 #define __MC_INCLUDE_H
     34 
     35 #define MC_(str)    VGAPPEND(vgMemCheck_,str)
     36 
     37 
     38 /* This is a private header file for use only within the
     39    memcheck/ directory. */
     40 
     41 /*------------------------------------------------------------*/
     42 /*--- Tracking the heap                                    ---*/
     43 /*------------------------------------------------------------*/
     44 
     45 /* By default, we want at least a 16B redzone on client heap blocks
     46    for Memcheck.
     47    The default can be modified by --redzone-size. */
     48 #define MC_MALLOC_DEFAULT_REDZONE_SZB    16
     49 // effective redzone, as (possibly) modified by --redzone-size:
     50 extern SizeT MC_(Malloc_Redzone_SzB);
     51 
     52 /* For malloc()/new/new[] vs. free()/delete/delete[] mismatch checking. */
     53 typedef
     54    enum {
     55       MC_AllocMalloc = 0,
     56       MC_AllocNew    = 1,
     57       MC_AllocNewVec = 2,
     58       MC_AllocCustom = 3
     59    }
     60    MC_AllocKind;
     61 
     62 /* This describes a heap block. Nb: first two fields must match core's
     63  * VgHashNode. */
     64 typedef
     65    struct _MC_Chunk {
     66       struct _MC_Chunk* next;
     67       Addr         data;            // Address of the actual block.
     68       SizeT        szB : (sizeof(SizeT)*8)-2; // Size requested; 30 or 62 bits.
     69       MC_AllocKind allockind : 2;   // Which operation did the allocation.
     70       ExeContext*  where[0];
     71       /* Variable-length array. The size depends on MC_(clo_keep_stacktraces).
     72          This array optionally stores the alloc and/or free stack trace. */
     73    }
     74    MC_Chunk;
     75 
     76 /* Returns the execontext where the MC_Chunk was allocated/freed.
     77    Returns VG_(null_ExeContext)() if the execontext has not been recorded (due
     78    to MC_(clo_keep_stacktraces) and/or because block not yet freed). */
     79 ExeContext* MC_(allocated_at) (MC_Chunk*);
     80 ExeContext* MC_(freed_at) (MC_Chunk*);
     81 
     82 /* Records and sets execontext according to MC_(clo_keep_stacktraces) */
     83 void  MC_(set_allocated_at) (ThreadId, MC_Chunk*);
     84 void  MC_(set_freed_at) (ThreadId, MC_Chunk*);
     85 
     86 /* number of pointers needed according to MC_(clo_keep_stacktraces). */
     87 UInt MC_(n_where_pointers) (void);
     88 
     89 /* Memory pool.  Nb: first two fields must match core's VgHashNode. */
     90 typedef
     91    struct _MC_Mempool {
     92       struct _MC_Mempool* next;
     93       Addr          pool;           // pool identifier
     94       SizeT         rzB;            // pool red-zone size
     95       Bool          is_zeroed;      // allocations from this pool are zeroed
     96       Bool          auto_free;      // De-alloc block frees all chunks in block
     97       Bool          metapool;       // These chunks are VALGRIND_MALLOC_LIKE
     98                                     // memory, and used as pool.
     99       VgHashTable  *chunks;         // chunks associated with this pool
    100    }
    101    MC_Mempool;
    102 
    103 
    104 void* MC_(new_block)  ( ThreadId tid,
    105                         Addr p, SizeT size, SizeT align,
    106                         Bool is_zeroed, MC_AllocKind kind,
    107                         VgHashTable *table);
    108 void MC_(handle_free) ( ThreadId tid,
    109                         Addr p, UInt rzB, MC_AllocKind kind );
    110 
    111 void MC_(create_mempool)  ( Addr pool, UInt rzB, Bool is_zeroed,
    112                             Bool auto_free, Bool metapool );
    113 void MC_(destroy_mempool) ( Addr pool );
    114 void MC_(mempool_alloc)   ( ThreadId tid, Addr pool,
    115                             Addr addr, SizeT size );
    116 void MC_(mempool_free)    ( Addr pool, Addr addr );
    117 void MC_(mempool_trim)    ( Addr pool, Addr addr, SizeT size );
    118 void MC_(move_mempool)    ( Addr poolA, Addr poolB );
    119 void MC_(mempool_change)  ( Addr pool, Addr addrA, Addr addrB, SizeT size );
    120 Bool MC_(mempool_exists)  ( Addr pool );
    121 Bool MC_(is_mempool_block)( MC_Chunk* mc_search );
    122 
    123 /* Searches for a recently freed block which might bracket Addr a.
    124    Return the MC_Chunk* for this block or NULL if no bracketting block
    125    is found. */
    126 MC_Chunk* MC_(get_freed_block_bracketting)( Addr a );
    127 
    128 /* For efficient pooled alloc/free of the MC_Chunk. */
    129 extern PoolAlloc* MC_(chunk_poolalloc);
    130 
    131 /* For tracking malloc'd blocks.  Nb: it's quite important that it's a
    132    VgHashTable, because VgHashTable allows duplicate keys without complaint.
    133    This can occur if a user marks a malloc() block as also a custom block with
    134    MALLOCLIKE_BLOCK. */
    135 extern VgHashTable *MC_(malloc_list);
    136 
    137 /* For tracking memory pools. */
    138 extern VgHashTable *MC_(mempool_list);
    139 
    140 /* Shadow memory functions */
    141 Bool MC_(check_mem_is_noaccess)( Addr a, SizeT len, Addr* bad_addr );
    142 void MC_(make_mem_noaccess)        ( Addr a, SizeT len );
    143 void MC_(make_mem_undefined_w_otag)( Addr a, SizeT len, UInt otag );
    144 void MC_(make_mem_defined)         ( Addr a, SizeT len );
    145 void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len );
    146 
    147 void MC_(xtmemory_report) ( const HChar* filename, Bool fini );
    148 
    149 void MC_(print_malloc_stats) ( void );
    150 /* nr of free operations done */
    151 SizeT MC_(get_cmalloc_n_frees) ( void );
    152 
    153 void* MC_(malloc)               ( ThreadId tid, SizeT n );
    154 void* MC_(__builtin_new)        ( ThreadId tid, SizeT n );
    155 void* MC_(__builtin_vec_new)    ( ThreadId tid, SizeT n );
    156 void* MC_(memalign)             ( ThreadId tid, SizeT align, SizeT n );
    157 void* MC_(calloc)               ( ThreadId tid, SizeT nmemb, SizeT size1 );
    158 void  MC_(free)                 ( ThreadId tid, void* p );
    159 void  MC_(__builtin_delete)     ( ThreadId tid, void* p );
    160 void  MC_(__builtin_vec_delete) ( ThreadId tid, void* p );
    161 void* MC_(realloc)              ( ThreadId tid, void* p, SizeT new_size );
    162 SizeT MC_(malloc_usable_size)   ( ThreadId tid, void* p );
    163 
    164 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
    165                                SizeT oldSizeB, SizeT newSizeB, SizeT rzB);
    166 
    167 
    168 /*------------------------------------------------------------*/
    169 /*--- Origin tracking translate-time support               ---*/
    170 /*------------------------------------------------------------*/
    171 
    172 /* See detailed comments in mc_machine.c. */
    173 Int MC_(get_otrack_shadow_offset) ( Int offset, Int szB );
    174 IRType MC_(get_otrack_reg_array_equiv_int_type) ( IRRegArray* arr );
    175 
    176 /* Constants which are used as the lowest 2 bits in origin tags.
    177 
    178    An origin tag comprises an upper 30-bit ECU field and a lower 2-bit
    179    'kind' field.  The ECU field is a number given out by m_execontext
    180    and has a 1-1 mapping with ExeContext*s.  An ECU can be used
    181    directly as an origin tag (otag), but in fact we want to put
    182    additional information 'kind' field to indicate roughly where the
    183    tag came from.  This helps print more understandable error messages
    184    for the user -- it has no other purpose.
    185 
    186    Hence the following 2-bit constants are needed for 'kind' field.
    187 
    188    To summarise:
    189 
    190    * Both ECUs and origin tags are represented as 32-bit words
    191 
    192    * m_execontext and the core-tool interface deal purely in ECUs.
    193      They have no knowledge of origin tags - that is a purely
    194      Memcheck-internal matter.
    195 
    196    * all valid ECUs have the lowest 2 bits zero and at least
    197      one of the upper 30 bits nonzero (see VG_(is_plausible_ECU))
    198 
    199    * to convert from an ECU to an otag, OR in one of the MC_OKIND_
    200      constants below
    201 
    202    * to convert an otag back to an ECU, AND it with ~3
    203 */
    204 
    205 #define MC_OKIND_UNKNOWN  0  /* unknown origin */
    206 #define MC_OKIND_HEAP     1  /* this is a heap origin */
    207 #define MC_OKIND_STACK    2  /* this is a stack origin */
    208 #define MC_OKIND_USER     3  /* arises from user-supplied client req */
    209 
    210 
    211 /*------------------------------------------------------------*/
    212 /*--- Profiling of memory events                           ---*/
    213 /*------------------------------------------------------------*/
    214 
    215 /* Define to collect detailed performance info. */
    216 /* #define MC_PROFILE_MEMORY */
    217 #ifdef MC_PROFILE_MEMORY
    218 
    219 /* Order of enumerators does not matter. But MCPE_LAST has to be the
    220    last entry in the list as it is used as an array bound. */
    221 enum {
    222    MCPE_LOADV8,
    223    MCPE_LOADV8_SLOW1,
    224    MCPE_LOADV8_SLOW2,
    225    MCPE_LOADV16,
    226    MCPE_LOADV16_SLOW1,
    227    MCPE_LOADV16_SLOW2,
    228    MCPE_LOADV32,
    229    MCPE_LOADV32_SLOW1,
    230    MCPE_LOADV32_SLOW2,
    231    MCPE_LOADV64,
    232    MCPE_LOADV64_SLOW1,
    233    MCPE_LOADV64_SLOW2,
    234    MCPE_LOADV_128_OR_256,
    235    MCPE_LOADV_128_OR_256_SLOW_LOOP,
    236    MCPE_LOADV_128_OR_256_SLOW1,
    237    MCPE_LOADV_128_OR_256_SLOW2,
    238    MCPE_LOADVN_SLOW,
    239    MCPE_LOADVN_SLOW_LOOP,
    240    MCPE_STOREV8,
    241    MCPE_STOREV8_SLOW1,
    242    MCPE_STOREV8_SLOW2,
    243    MCPE_STOREV8_SLOW3,
    244    MCPE_STOREV8_SLOW4,
    245    MCPE_STOREV16,
    246    MCPE_STOREV16_SLOW1,
    247    MCPE_STOREV16_SLOW2,
    248    MCPE_STOREV16_SLOW3,
    249    MCPE_STOREV16_SLOW4,
    250    MCPE_STOREV32,
    251    MCPE_STOREV32_SLOW1,
    252    MCPE_STOREV32_SLOW2,
    253    MCPE_STOREV32_SLOW3,
    254    MCPE_STOREV32_SLOW4,
    255    MCPE_STOREV64,
    256    MCPE_STOREV64_SLOW1,
    257    MCPE_STOREV64_SLOW2,
    258    MCPE_STOREV64_SLOW3,
    259    MCPE_STOREV64_SLOW4,
    260    MCPE_STOREVN_SLOW,
    261    MCPE_STOREVN_SLOW_LOOP,
    262    MCPE_MAKE_ALIGNED_WORD32_UNDEFINED,
    263    MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW,
    264    MCPE_MAKE_ALIGNED_WORD64_UNDEFINED,
    265    MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW,
    266    MCPE_MAKE_ALIGNED_WORD32_NOACCESS,
    267    MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW,
    268    MCPE_MAKE_ALIGNED_WORD64_NOACCESS,
    269    MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW,
    270    MCPE_MAKE_MEM_NOACCESS,
    271    MCPE_MAKE_MEM_UNDEFINED,
    272    MCPE_MAKE_MEM_UNDEFINED_W_OTAG,
    273    MCPE_MAKE_MEM_DEFINED,
    274    MCPE_CHEAP_SANITY_CHECK,
    275    MCPE_EXPENSIVE_SANITY_CHECK,
    276    MCPE_COPY_ADDRESS_RANGE_STATE,
    277    MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1,
    278    MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2,
    279    MCPE_CHECK_MEM_IS_NOACCESS,
    280    MCPE_CHECK_MEM_IS_NOACCESS_LOOP,
    281    MCPE_IS_MEM_ADDRESSABLE,
    282    MCPE_IS_MEM_ADDRESSABLE_LOOP,
    283    MCPE_IS_MEM_DEFINED,
    284    MCPE_IS_MEM_DEFINED_LOOP,
    285    MCPE_IS_MEM_DEFINED_COMPREHENSIVE,
    286    MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP,
    287    MCPE_IS_DEFINED_ASCIIZ,
    288    MCPE_IS_DEFINED_ASCIIZ_LOOP,
    289    MCPE_FIND_CHUNK_FOR_OLD,
    290    MCPE_FIND_CHUNK_FOR_OLD_LOOP,
    291    MCPE_SET_ADDRESS_RANGE_PERMS,
    292    MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP,
    293    MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP,
    294    MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS,
    295    MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1,
    296    MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2,
    297    MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK,
    298    MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK,
    299    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A,
    300    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B,
    301    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C,
    302    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A,
    303    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B,
    304    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K,
    305    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM,
    306    MCPE_NEW_MEM_STACK,
    307    MCPE_NEW_MEM_STACK_4,
    308    MCPE_NEW_MEM_STACK_8,
    309    MCPE_NEW_MEM_STACK_12,
    310    MCPE_NEW_MEM_STACK_16,
    311    MCPE_NEW_MEM_STACK_32,
    312    MCPE_NEW_MEM_STACK_112,
    313    MCPE_NEW_MEM_STACK_128,
    314    MCPE_NEW_MEM_STACK_144,
    315    MCPE_NEW_MEM_STACK_160,
    316    MCPE_DIE_MEM_STACK,
    317    MCPE_DIE_MEM_STACK_4,
    318    MCPE_DIE_MEM_STACK_8,
    319    MCPE_DIE_MEM_STACK_12,
    320    MCPE_DIE_MEM_STACK_16,
    321    MCPE_DIE_MEM_STACK_32,
    322    MCPE_DIE_MEM_STACK_112,
    323    MCPE_DIE_MEM_STACK_128,
    324    MCPE_DIE_MEM_STACK_144,
    325    MCPE_DIE_MEM_STACK_160,
    326    MCPE_MAKE_STACK_UNINIT_W_O,
    327    MCPE_MAKE_STACK_UNINIT_NO_O,
    328    MCPE_MAKE_STACK_UNINIT_128_NO_O,
    329    MCPE_MAKE_STACK_UNINIT_128_NO_O_ALIGNED_16,
    330    MCPE_MAKE_STACK_UNINIT_128_NO_O_ALIGNED_8,
    331    MCPE_MAKE_STACK_UNINIT_128_NO_O_SLOWCASE,
    332    /* Do not add enumerators past this line. */
    333    MCPE_LAST
    334 };
    335 
    336 extern ULong MC_(event_ctr)[MCPE_LAST];
    337 
    338 #  define PROF_EVENT(ev)                           \
    339    do { tl_assert((ev) >= 0 && (ev) < MCPE_LAST);  \
    340       MC_(event_ctr)[ev]++;                        \
    341    } while (False);
    342 
    343 #else
    344 
    345 #  define PROF_EVENT(ev)    /* */
    346 
    347 #endif   /* MC_PROFILE_MEMORY */
    348 
    349 
    350 /*------------------------------------------------------------*/
    351 /*--- V and A bits (Victoria & Albert ?)                   ---*/
    352 /*------------------------------------------------------------*/
    353 
    354 /* The number of entries in the primary map can be altered.  However
    355    we hardwire the assumption that each secondary map covers precisely
    356    64k of address space. */
    357 #define SM_SIZE 65536            /* DO NOT CHANGE */
    358 #define SM_MASK (SM_SIZE-1)      /* DO NOT CHANGE */
    359 
    360 #define V_BIT_DEFINED         0
    361 #define V_BIT_UNDEFINED       1
    362 
    363 #define V_BITS8_DEFINED       0
    364 #define V_BITS8_UNDEFINED     0xFF
    365 
    366 #define V_BITS16_DEFINED      0
    367 #define V_BITS16_UNDEFINED    0xFFFF
    368 
    369 #define V_BITS32_DEFINED      0
    370 #define V_BITS32_UNDEFINED    0xFFFFFFFF
    371 
    372 #define V_BITS64_DEFINED      0ULL
    373 #define V_BITS64_UNDEFINED    0xFFFFFFFFFFFFFFFFULL
    374 
    375 
    376 /*------------------------------------------------------------*/
    377 /*--- Leak checking                                        ---*/
    378 /*------------------------------------------------------------*/
    379 
    380 typedef
    381    enum {
    382       // Nb: the order is important -- it dictates the order of loss records
    383       // of equal sizes.
    384       Reachable    =0,  // Definitely reachable from root-set.
    385       Possible     =1,  // Possibly reachable from root-set;  involves at
    386                         //   least one interior-pointer along the way.
    387       IndirectLeak =2,  // Leaked, but reachable from another leaked block
    388                         //   (be it Unreached or IndirectLeak).
    389       Unreached    =3   // Not reached, ie. leaked.
    390                         //   (At best, only reachable from itself via a cycle.)
    391   }
    392   Reachedness;
    393 
    394 // Build mask to check or set Reachedness r membership
    395 #define R2S(r) (1 << (r))
    396 // Reachedness r is member of the Set s ?
    397 #define RiS(r,s) ((s) & R2S(r))
    398 // Returns a set containing all Reachedness
    399 UInt MC_(all_Reachedness)(void);
    400 
    401 /* For VALGRIND_COUNT_LEAKS client request */
    402 extern SizeT MC_(bytes_leaked);
    403 extern SizeT MC_(bytes_indirect);
    404 extern SizeT MC_(bytes_dubious);
    405 extern SizeT MC_(bytes_reachable);
    406 extern SizeT MC_(bytes_suppressed);
    407 
    408 /* For VALGRIND_COUNT_LEAK_BLOCKS client request */
    409 extern SizeT MC_(blocks_leaked);
    410 extern SizeT MC_(blocks_indirect);
    411 extern SizeT MC_(blocks_dubious);
    412 extern SizeT MC_(blocks_reachable);
    413 extern SizeT MC_(blocks_suppressed);
    414 
    415 typedef
    416    enum {
    417       LC_Off,
    418       LC_Summary,
    419       LC_Full,
    420    }
    421    LeakCheckMode;
    422 
    423 typedef
    424    enum {
    425       LCD_Any,       // output all loss records, whatever the delta
    426       LCD_Increased, // output loss records with an increase in size or blocks
    427       LCD_Changed,   // output loss records with an increase or
    428                      //decrease in size or blocks
    429    }
    430    LeakCheckDeltaMode;
    431 
    432 /* When a LossRecord is put into an OSet, these elements represent the key. */
    433 typedef
    434    struct _LossRecordKey {
    435       Reachedness  state;        // LC_Extra.state value shared by all blocks.
    436       ExeContext*  allocated_at; // Where they were allocated.
    437    }
    438    LossRecordKey;
    439 
    440 /* A loss record, used for generating err msgs.  Multiple leaked blocks can be
    441  * merged into a single loss record if they have the same state and similar
    442  * enough allocation points (controlled by --leak-resolution). */
    443 typedef
    444    struct _LossRecord {
    445       LossRecordKey key;  // Key, when used in an OSet.
    446       SizeT szB;          // Sum of all MC_Chunk.szB values.
    447       SizeT indirect_szB; // Sum of all LC_Extra.indirect_szB values.
    448       UInt  num_blocks;   // Number of blocks represented by the record.
    449       SizeT old_szB;          // old_* values are the values found during the
    450       SizeT old_indirect_szB; // previous leak search. old_* values are used to
    451       UInt  old_num_blocks;   // output only the changed/new loss records
    452    }
    453    LossRecord;
    454 
    455 typedef
    456    struct _LeakCheckParams {
    457       LeakCheckMode mode;
    458       UInt show_leak_kinds;
    459       UInt errors_for_leak_kinds;
    460       UInt heuristics;
    461       LeakCheckDeltaMode deltamode;
    462       UInt max_loss_records_output; // limit on the nr of loss records output.
    463       Bool requested_by_monitor_command; // True when requested by gdb/vgdb.
    464       const HChar* xt_filename; // if != NULL, produce an xtree leak file.
    465    }
    466    LeakCheckParams;
    467 
    468 void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams * lcp);
    469 
    470 // Each time a leak search is done, the leak search generation
    471 // MC_(leak_search_gen) is incremented.
    472 extern UInt MC_(leak_search_gen);
    473 
    474 // maintains the lcp.deltamode given in the last call to detect_memory_leaks
    475 extern LeakCheckDeltaMode MC_(detect_memory_leaks_last_delta_mode);
    476 
    477 // prints the list of blocks corresponding to the given loss_record_nr slice
    478 // (from/to) (up to maximum max_blocks)
    479 // Returns True if loss_record_nr_from identifies a correct loss record
    480 // from last leak search, returns False otherwise.
    481 // Note that loss_record_nr_to can be bigger than the nr of loss records. All
    482 // loss records after from will then be examined and maybe printed.
    483 // If heuristics != 0, print only the loss records/blocks found via
    484 // one of the heuristics in the set.
    485 Bool MC_(print_block_list) ( UInt loss_record_nr_from, UInt loss_record_nr_to,
    486                              UInt max_blocks, UInt heuristics);
    487 
    488 // Prints the addresses/registers/... at which a pointer to
    489 // the given range [address, address+szB[ is found.
    490 void MC_(who_points_at) ( Addr address, SizeT szB);
    491 
    492 // if delta_mode == LCD_Any, prints in buf an empty string
    493 // otherwise prints a delta in the layout  " (+%'lu)" or " (-%'lu)"
    494 extern HChar * MC_(snprintf_delta) (HChar * buf, Int size,
    495                                     SizeT current_val, SizeT old_val,
    496                                     LeakCheckDeltaMode delta_mode);
    497 
    498 
    499 Bool MC_(is_valid_aligned_word)     ( Addr a );
    500 Bool MC_(is_within_valid_secondary) ( Addr a );
    501 
    502 // Prints as user msg a description of the given loss record.
    503 void MC_(pp_LossRecord)(UInt n_this_record, UInt n_total_records,
    504                         LossRecord* l);
    505 
    506 
    507 /*------------------------------------------------------------*/
    508 /*--- Errors and suppressions                              ---*/
    509 /*------------------------------------------------------------*/
    510 
    511 /* Did we show to the user, any errors for which an uninitialised
    512    value origin could have been collected (but wasn't) ?  If yes,
    513    then, at the end of the run, print a 1 line message advising that a
    514    rerun with --track-origins=yes might help. */
    515 extern Bool MC_(any_value_errors);
    516 
    517 /* Standard functions for error and suppressions as required by the
    518    core/tool iface */
    519 Bool MC_(eq_Error)           ( VgRes res, const Error* e1, const Error* e2 );
    520 void MC_(before_pp_Error)    ( const Error* err );
    521 void MC_(pp_Error)           ( const Error* err );
    522 UInt MC_(update_Error_extra) ( const Error* err );
    523 
    524 Bool MC_(is_recognised_suppression) ( const HChar* name, Supp* su );
    525 
    526 Bool MC_(read_extra_suppression_info) ( Int fd, HChar** buf,
    527                                         SizeT* nBuf, Int* lineno, Supp *su );
    528 
    529 Bool MC_(error_matches_suppression) ( const Error* err, const Supp* su );
    530 
    531 SizeT MC_(get_extra_suppression_info) ( const Error* err,
    532                                         /*OUT*/HChar* buf, Int nBuf );
    533 SizeT MC_(print_extra_suppression_use) ( const Supp* su,
    534                                          /*OUT*/HChar* buf, Int nBuf );
    535 void MC_(update_extra_suppression_use) ( const Error* err, const Supp* su );
    536 
    537 const HChar* MC_(get_error_name) ( const Error* err );
    538 
    539 /* Recording of errors */
    540 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
    541                                  Bool isWrite );
    542 void MC_(record_cond_error)    ( ThreadId tid, UInt otag );
    543 void MC_(record_value_error)   ( ThreadId tid, Int szB, UInt otag );
    544 void MC_(record_jump_error)    ( ThreadId tid, Addr a );
    545 
    546 void MC_(record_free_error)            ( ThreadId tid, Addr a );
    547 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a );
    548 void MC_(record_freemismatch_error)    ( ThreadId tid, MC_Chunk* mc );
    549 
    550 void MC_(record_overlap_error)  ( ThreadId tid, const HChar* function,
    551                                   Addr src, Addr dst, SizeT szB );
    552 void MC_(record_core_mem_error) ( ThreadId tid, const HChar* msg );
    553 void MC_(record_regparam_error) ( ThreadId tid, const HChar* msg, UInt otag );
    554 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
    555                                   Bool isAddrErr, const HChar* msg, UInt otag );
    556 void MC_(record_user_error)     ( ThreadId tid, Addr a,
    557                                   Bool isAddrErr, UInt otag );
    558 
    559 Bool MC_(record_leak_error)     ( ThreadId tid,
    560                                   UInt n_this_record,
    561                                   UInt n_total_records,
    562                                   LossRecord* lossRecord,
    563                                   Bool print_record,
    564                                   Bool count_error );
    565 
    566 Bool MC_(record_fishy_value_error)  ( ThreadId tid, const HChar* function,
    567                                       const HChar *argument_name, SizeT value );
    568 
    569 /* Leak kinds tokens to call VG_(parse_enum_set). */
    570 extern const HChar* MC_(parse_leak_kinds_tokens);
    571 
    572 /* prints a description of address a */
    573 void MC_(pp_describe_addr) (Addr a);
    574 
    575 /* Is this address in a user-specified "ignored range" ? */
    576 Bool MC_(in_ignored_range) ( Addr a );
    577 
    578 /* Is this address in a user-specified "ignored range of offsets below
    579    the current thread's stack pointer?" */
    580 Bool MC_(in_ignored_range_below_sp) ( Addr sp, Addr a, UInt szB );
    581 
    582 
    583 /*------------------------------------------------------------*/
    584 /*--- Client blocks                                        ---*/
    585 /*------------------------------------------------------------*/
    586 
    587 /* Describes a client block.  See mc_main.c.  An unused block has
    588    start == size == 0.  */
    589 typedef
    590    struct {
    591       Addr        start;
    592       SizeT       size;
    593       ExeContext* where;
    594       HChar*      desc;
    595    }
    596    CGenBlock;
    597 
    598 /* Get access to the client block array. */
    599 void MC_(get_ClientBlock_array)( /*OUT*/CGenBlock** blocks,
    600                                  /*OUT*/UWord* nBlocks );
    601 
    602 
    603 /*------------------------------------------------------------*/
    604 /*--- Command line options + defaults                      ---*/
    605 /*------------------------------------------------------------*/
    606 
    607 /* Allow loads from partially-valid addresses?  default: YES */
    608 extern Bool MC_(clo_partial_loads_ok);
    609 
    610 /* Max volume of the freed blocks queue. */
    611 extern Long MC_(clo_freelist_vol);
    612 
    613 /* Blocks with a size >= MC_(clo_freelist_big_blocks) will be put
    614    in the "big block" freed blocks queue. */
    615 extern Long MC_(clo_freelist_big_blocks);
    616 
    617 /* Do leak check at exit?  default: NO */
    618 extern LeakCheckMode MC_(clo_leak_check);
    619 
    620 /* How closely should we compare ExeContexts in leak records? default: 2 */
    621 extern VgRes MC_(clo_leak_resolution);
    622 
    623 /* In leak check, show loss records if their R2S(reachedness) is set.
    624    Default : R2S(Possible) | R2S(Unreached). */
    625 extern UInt MC_(clo_show_leak_kinds);
    626 
    627 /* In leak check, a loss record is an error if its R2S(reachedness) is set.
    628    Default : R2S(Possible) | R2S(Unreached). */
    629 extern UInt MC_(clo_errors_for_leak_kinds);
    630 
    631 /* Various leak check heuristics which can be activated/deactivated. */
    632 typedef
    633    enum {
    634       LchNone                =0,
    635       // no heuristic.
    636       LchStdString           =1,
    637       // Consider interior pointer pointing at the array of char in a
    638       // std::string as reachable.
    639       LchLength64            =2,
    640       // Consider interior pointer pointing at offset 64bit of a block as
    641       // reachable, when the first 8 bytes contains the block size - 8.
    642       // Such length+interior pointers are used by e.g. sqlite3MemMalloc.
    643       // On 64bit platforms LchNewArray will also match these blocks.
    644       LchNewArray            =3,
    645       // Consider interior pointer pointing at second word of a new[] array as
    646       // reachable. Such interior pointers are used for arrays whose elements
    647       // have a destructor.
    648       LchMultipleInheritance =4,
    649       // Conside interior pointer pointing just after what looks a vtable
    650       // as reachable.
    651   }
    652   LeakCheckHeuristic;
    653 
    654 // Nr of heuristics, including the LchNone heuristic.
    655 #define N_LEAK_CHECK_HEURISTICS 5
    656 
    657 // Build mask to check or set Heuristic h membership
    658 #define H2S(h) (1 << (h))
    659 // Heuristic h is member of the Set s ?
    660 #define HiS(h,s) ((s) & H2S(h))
    661 
    662 /* Heuristics set to use for the leak search.
    663    Default : all heuristics. */
    664 extern UInt MC_(clo_leak_check_heuristics);
    665 
    666 /* Assume accesses immediately below %esp are due to gcc-2.96 bugs.
    667  * default: NO */
    668 extern Bool MC_(clo_workaround_gcc296_bugs);
    669 
    670 /* Fill malloc-d/free-d client blocks with a specific value?  -1 if
    671    not, else 0x00 .. 0xFF indicating the fill value to use.  Can be
    672    useful for causing programs with bad heap corruption to fail in
    673    more repeatable ways.  Note that malloc-filled and free-filled
    674    areas are still undefined and noaccess respectively.  This merely
    675    causes them to contain the specified values. */
    676 extern Int MC_(clo_malloc_fill);
    677 extern Int MC_(clo_free_fill);
    678 
    679 /* Which stack trace(s) to keep for malloc'd/free'd client blocks?
    680    For each client block, the stack traces where it was allocated
    681    and/or freed are optionally kept depending on MC_(clo_keep_stacktraces). */
    682 typedef
    683    enum {                 // keep alloc stack trace ?  keep free stack trace ?
    684       KS_none,            // never                     never
    685       KS_alloc,           // always                    never
    686       KS_free,            // never                     always
    687       KS_alloc_then_free, // when still malloc'd       when free'd
    688       KS_alloc_and_free,  // always                    always
    689    }
    690    KeepStacktraces;
    691 extern KeepStacktraces MC_(clo_keep_stacktraces);
    692 
    693 /* Indicates the level of instrumentation/checking done by Memcheck.
    694 
    695    1 = No undefined value checking, Addrcheck-style behaviour only:
    696        only address checking is done.  This is faster but finds fewer
    697        errors.  Note that although Addrcheck had 1 bit per byte
    698        overhead vs the old Memcheck's 9 bits per byte, with this mode
    699        and compressed V bits, no memory is saved with this mode --
    700        it's still 2 bits per byte overhead.  This is a little wasteful
    701        -- it could be done with 1 bit per byte -- but lets us reuse
    702        the many shadow memory access functions.  Note that in this
    703        mode neither the secondary V bit table nor the origin-tag cache
    704        are used.
    705 
    706    2 = Address checking and Undefined value checking are performed,
    707        but origins are not tracked.  So the origin-tag cache is not
    708        used in this mode.  This setting is the default and corresponds
    709        to the "normal" Memcheck behaviour that has shipped for years.
    710 
    711    3 = Address checking, undefined value checking, and origins for
    712        undefined values are tracked.
    713 
    714    The default is 2.
    715 */
    716 extern Int MC_(clo_mc_level);
    717 
    718 /* Should we show mismatched frees?  Default: YES */
    719 extern Bool MC_(clo_show_mismatched_frees);
    720 
    721 /* Should we use expensive definedness checking for add/sub and compare
    722    operations? Default: NO */
    723 extern Bool MC_(clo_expensive_definedness_checks);
    724 
    725 /* Do we have a range of stack offsets to ignore?  Default: NO */
    726 extern Bool MC_(clo_ignore_range_below_sp);
    727 extern UInt MC_(clo_ignore_range_below_sp__first_offset);
    728 extern UInt MC_(clo_ignore_range_below_sp__last_offset);
    729 
    730 
    731 /*------------------------------------------------------------*/
    732 /*--- Instrumentation                                      ---*/
    733 /*------------------------------------------------------------*/
    734 
    735 /* Functions defined in mc_main.c */
    736 
    737 /* For the fail_w_o functions, the UWord arg is actually the 32-bit
    738    origin tag and should really be UInt, but to be simple and safe
    739    considering it's called from generated code, just claim it to be a
    740    UWord. */
    741 VG_REGPARM(2) void MC_(helperc_value_checkN_fail_w_o) ( HWord, UWord );
    742 VG_REGPARM(1) void MC_(helperc_value_check8_fail_w_o) ( UWord );
    743 VG_REGPARM(1) void MC_(helperc_value_check4_fail_w_o) ( UWord );
    744 VG_REGPARM(1) void MC_(helperc_value_check1_fail_w_o) ( UWord );
    745 VG_REGPARM(1) void MC_(helperc_value_check0_fail_w_o) ( UWord );
    746 
    747 /* And call these ones instead to report an uninitialised value error
    748    but with no origin available. */
    749 VG_REGPARM(1) void MC_(helperc_value_checkN_fail_no_o) ( HWord );
    750 VG_REGPARM(0) void MC_(helperc_value_check8_fail_no_o) ( void );
    751 VG_REGPARM(0) void MC_(helperc_value_check4_fail_no_o) ( void );
    752 VG_REGPARM(0) void MC_(helperc_value_check1_fail_no_o) ( void );
    753 VG_REGPARM(0) void MC_(helperc_value_check0_fail_no_o) ( void );
    754 
    755 /* V-bits load/store helpers */
    756 VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr, ULong );
    757 VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr, ULong );
    758 VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr, UWord );
    759 VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr, UWord );
    760 VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr, UWord );
    761 VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr, UWord );
    762 VG_REGPARM(2) void MC_(helperc_STOREV8)    ( Addr, UWord );
    763 
    764 VG_REGPARM(2) void  MC_(helperc_LOADV256be) ( /*OUT*/V256*, Addr );
    765 VG_REGPARM(2) void  MC_(helperc_LOADV256le) ( /*OUT*/V256*, Addr );
    766 VG_REGPARM(2) void  MC_(helperc_LOADV128be) ( /*OUT*/V128*, Addr );
    767 VG_REGPARM(2) void  MC_(helperc_LOADV128le) ( /*OUT*/V128*, Addr );
    768 VG_REGPARM(1) ULong MC_(helperc_LOADV64be)  ( Addr );
    769 VG_REGPARM(1) ULong MC_(helperc_LOADV64le)  ( Addr );
    770 VG_REGPARM(1) UWord MC_(helperc_LOADV32be)  ( Addr );
    771 VG_REGPARM(1) UWord MC_(helperc_LOADV32le)  ( Addr );
    772 VG_REGPARM(1) UWord MC_(helperc_LOADV16be)  ( Addr );
    773 VG_REGPARM(1) UWord MC_(helperc_LOADV16le)  ( Addr );
    774 VG_REGPARM(1) UWord MC_(helperc_LOADV8)     ( Addr );
    775 
    776 VG_REGPARM(3)
    777 void MC_(helperc_MAKE_STACK_UNINIT_w_o) ( Addr base, UWord len, Addr nia );
    778 
    779 VG_REGPARM(2)
    780 void MC_(helperc_MAKE_STACK_UNINIT_no_o) ( Addr base, UWord len );
    781 
    782 VG_REGPARM(1)
    783 void MC_(helperc_MAKE_STACK_UNINIT_128_no_o) ( Addr base );
    784 
    785 /* Origin tag load/store helpers */
    786 VG_REGPARM(2) void  MC_(helperc_b_store1) ( Addr a, UWord d32 );
    787 VG_REGPARM(2) void  MC_(helperc_b_store2) ( Addr a, UWord d32 );
    788 VG_REGPARM(2) void  MC_(helperc_b_store4) ( Addr a, UWord d32 );
    789 VG_REGPARM(2) void  MC_(helperc_b_store8) ( Addr a, UWord d32 );
    790 VG_REGPARM(2) void  MC_(helperc_b_store16)( Addr a, UWord d32 );
    791 VG_REGPARM(2) void  MC_(helperc_b_store32)( Addr a, UWord d32 );
    792 VG_REGPARM(1) UWord MC_(helperc_b_load1) ( Addr a );
    793 VG_REGPARM(1) UWord MC_(helperc_b_load2) ( Addr a );
    794 VG_REGPARM(1) UWord MC_(helperc_b_load4) ( Addr a );
    795 VG_REGPARM(1) UWord MC_(helperc_b_load8) ( Addr a );
    796 VG_REGPARM(1) UWord MC_(helperc_b_load16)( Addr a );
    797 VG_REGPARM(1) UWord MC_(helperc_b_load32)( Addr a );
    798 
    799 /* Functions defined in mc_translate.c */
    800 IRSB* MC_(instrument) ( VgCallbackClosure* closure,
    801                         IRSB* bb_in,
    802                         const VexGuestLayout* layout,
    803                         const VexGuestExtents* vge,
    804                         const VexArchInfo* archinfo_host,
    805                         IRType gWordTy, IRType hWordTy );
    806 
    807 IRSB* MC_(final_tidy) ( IRSB* );
    808 
    809 /* Check some assertions to do with the instrumentation machinery. */
    810 void MC_(do_instrumentation_startup_checks)( void );
    811 
    812 #endif /* ndef __MC_INCLUDE_H */
    813 
    814 /*--------------------------------------------------------------------*/
    815 /*--- end                                                          ---*/
    816 /*--------------------------------------------------------------------*/
    817