Home | History | Annotate | Download | only in memcheck
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- Management, printing, etc, of errors and suppressions.       ---*/
      4 /*---                                                  mc_errors.c ---*/
      5 /*--------------------------------------------------------------------*/
      6 
      7 /*
      8    This file is part of MemCheck, a heavyweight Valgrind tool for
      9    detecting memory errors.
     10 
     11    Copyright (C) 2000-2013 Julian Seward
     12       jseward (at) acm.org
     13 
     14    This program is free software; you can redistribute it and/or
     15    modify it under the terms of the GNU General Public License as
     16    published by the Free Software Foundation; either version 2 of the
     17    License, or (at your option) any later version.
     18 
     19    This program is distributed in the hope that it will be useful, but
     20    WITHOUT ANY WARRANTY; without even the implied warranty of
     21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     22    General Public License for more details.
     23 
     24    You should have received a copy of the GNU General Public License
     25    along with this program; if not, write to the Free Software
     26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     27    02111-1307, USA.
     28 
     29    The GNU General Public License is contained in the file COPYING.
     30 */
     31 
     32 #include "pub_tool_basics.h"
     33 #include "pub_tool_gdbserver.h"
     34 #include "pub_tool_poolalloc.h"     // For mc_include.h
     35 #include "pub_tool_hashtable.h"     // For mc_include.h
     36 #include "pub_tool_libcbase.h"
     37 #include "pub_tool_libcassert.h"
     38 #include "pub_tool_libcprint.h"
     39 #include "pub_tool_machine.h"
     40 #include "pub_tool_mallocfree.h"
     41 #include "pub_tool_options.h"
     42 #include "pub_tool_replacemalloc.h"
     43 #include "pub_tool_tooliface.h"
     44 #include "pub_tool_threadstate.h"
     45 #include "pub_tool_debuginfo.h"     // VG_(get_dataname_and_offset)
     46 #include "pub_tool_xarray.h"
     47 #include "pub_tool_addrinfo.h"
     48 
     49 #include "mc_include.h"
     50 
     51 
     52 /*------------------------------------------------------------*/
     53 /*--- Error types                                          ---*/
     54 /*------------------------------------------------------------*/
     55 
     56 /* See comment in mc_include.h */
     57 Bool MC_(any_value_errors) = False;
     58 
     59 
     60 /* ------------------ Errors ----------------------- */
     61 
     62 /* What kind of error it is. */
     63 typedef
     64    enum {
     65       Err_Value,
     66       Err_Cond,
     67       Err_CoreMem,
     68       Err_Addr,
     69       Err_Jump,
     70       Err_RegParam,
     71       Err_MemParam,
     72       Err_User,
     73       Err_Free,
     74       Err_FreeMismatch,
     75       Err_Overlap,
     76       Err_Leak,
     77       Err_IllegalMempool,
     78    }
     79    MC_ErrorTag;
     80 
     81 
     82 typedef struct _MC_Error MC_Error;
     83 
     84 struct _MC_Error {
     85    // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
     86    //MC_ErrorTag tag;
     87 
     88    union {
     89       // Use of an undefined value:
     90       // - as a pointer in a load or store
     91       // - as a jump target
     92       struct {
     93          SizeT szB;   // size of value in bytes
     94          // Origin info
     95          UInt        otag;      // origin tag
     96          ExeContext* origin_ec; // filled in later
     97       } Value;
     98 
     99       // Use of an undefined value in a conditional branch or move.
    100       struct {
    101          // Origin info
    102          UInt        otag;      // origin tag
    103          ExeContext* origin_ec; // filled in later
    104       } Cond;
    105 
    106       // Addressability error in core (signal-handling) operation.
    107       // It would be good to get rid of this error kind, merge it with
    108       // another one somehow.
    109       struct {
    110       } CoreMem;
    111 
    112       // Use of an unaddressable memory location in a load or store.
    113       struct {
    114          Bool     isWrite;    // read or write?
    115          SizeT    szB;        // not used for exec (jump) errors
    116          Bool     maybe_gcc;  // True if just below %esp -- could be a gcc bug
    117          AddrInfo ai;
    118       } Addr;
    119 
    120       // Jump to an unaddressable memory location.
    121       struct {
    122          AddrInfo ai;
    123       } Jump;
    124 
    125       // System call register input contains undefined bytes.
    126       struct {
    127          // Origin info
    128          UInt        otag;      // origin tag
    129          ExeContext* origin_ec; // filled in later
    130       } RegParam;
    131 
    132       // System call memory input contains undefined/unaddressable bytes
    133       struct {
    134          Bool     isAddrErr;  // Addressability or definedness error?
    135          AddrInfo ai;
    136          // Origin info
    137          UInt        otag;      // origin tag
    138          ExeContext* origin_ec; // filled in later
    139       } MemParam;
    140 
    141       // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
    142       struct {
    143          Bool     isAddrErr;  // Addressability or definedness error?
    144          AddrInfo ai;
    145          // Origin info
    146          UInt        otag;      // origin tag
    147          ExeContext* origin_ec; // filled in later
    148       } User;
    149 
    150       // Program tried to free() something that's not a heap block (this
    151       // covers double-frees). */
    152       struct {
    153          AddrInfo ai;
    154       } Free;
    155 
    156       // Program allocates heap block with one function
    157       // (malloc/new/new[]/custom) and deallocates with not the matching one.
    158       struct {
    159          AddrInfo ai;
    160       } FreeMismatch;
    161 
    162       // Call to strcpy, memcpy, etc, with overlapping blocks.
    163       struct {
    164          Addr  src;   // Source block
    165          Addr  dst;   // Destination block
    166          SizeT szB;   // Size in bytes;  0 if unused.
    167       } Overlap;
    168 
    169       // A memory leak.
    170       struct {
    171          UInt        n_this_record;
    172          UInt        n_total_records;
    173          LossRecord* lr;
    174       } Leak;
    175 
    176       // A memory pool error.
    177       struct {
    178          AddrInfo ai;
    179       } IllegalMempool;
    180 
    181    } Err;
    182 };
    183 
    184 
    185 /*------------------------------------------------------------*/
    186 /*--- Printing errors                                      ---*/
    187 /*------------------------------------------------------------*/
    188 
    189 /* This is the "this error is due to be printed shortly; so have a
    190    look at it any print any preamble you want" function.  Which, in
    191    Memcheck, we don't use.  Hence a no-op.
    192 */
    193 void MC_(before_pp_Error) ( Error* err ) {
    194 }
    195 
    196 /* Do a printf-style operation on either the XML or normal output
    197    channel, depending on the setting of VG_(clo_xml).
    198 */
    199 static void emit_WRK ( const HChar* format, va_list vargs )
    200 {
    201    if (VG_(clo_xml)) {
    202       VG_(vprintf_xml)(format, vargs);
    203    } else {
    204       VG_(vmessage)(Vg_UserMsg, format, vargs);
    205    }
    206 }
    207 static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
    208 static void emit ( const HChar* format, ... )
    209 {
    210    va_list vargs;
    211    va_start(vargs, format);
    212    emit_WRK(format, vargs);
    213    va_end(vargs);
    214 }
    215 
    216 
    217 static const HChar* str_leak_lossmode ( Reachedness lossmode )
    218 {
    219    const HChar *loss = "?";
    220    switch (lossmode) {
    221       case Unreached:    loss = "definitely lost"; break;
    222       case IndirectLeak: loss = "indirectly lost"; break;
    223       case Possible:     loss = "possibly lost"; break;
    224       case Reachable:    loss = "still reachable"; break;
    225    }
    226    return loss;
    227 }
    228 
    229 static const HChar* xml_leak_kind ( Reachedness lossmode )
    230 {
    231    const HChar *loss = "?";
    232    switch (lossmode) {
    233       case Unreached:    loss = "Leak_DefinitelyLost"; break;
    234       case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
    235       case Possible:     loss = "Leak_PossiblyLost"; break;
    236       case Reachable:    loss = "Leak_StillReachable"; break;
    237    }
    238    return loss;
    239 }
    240 
    241 Bool MC_(parse_leak_kinds) ( const HChar* str0, UInt* lks )
    242 {
    243    return VG_(parse_enum_set)("reachable,possible,indirect,definite",
    244                               str0, lks);
    245 }
    246 
    247 static const HChar* pp_Reachedness_for_leak_kinds(Reachedness r)
    248 {
    249    switch(r) {
    250    case Reachable:    return "reachable";
    251    case Possible:     return "possible";
    252    case IndirectLeak: return "indirect";
    253    case Unreached:    return "definite";
    254    default:           tl_assert(0);
    255    }
    256 }
    257 
    258 static void mc_pp_origin ( ExeContext* ec, UInt okind )
    259 {
    260    const HChar* src = NULL;
    261    tl_assert(ec);
    262 
    263    switch (okind) {
    264       case MC_OKIND_STACK:   src = " by a stack allocation"; break;
    265       case MC_OKIND_HEAP:    src = " by a heap allocation"; break;
    266       case MC_OKIND_USER:    src = " by a client request"; break;
    267       case MC_OKIND_UNKNOWN: src = ""; break;
    268    }
    269    tl_assert(src); /* guards against invalid 'okind' */
    270 
    271    if (VG_(clo_xml)) {
    272       emit( "  <auxwhat>Uninitialised value was created%s</auxwhat>\n",
    273             src);
    274       VG_(pp_ExeContext)( ec );
    275    } else {
    276       emit( " Uninitialised value was created%s\n", src);
    277       VG_(pp_ExeContext)( ec );
    278    }
    279 }
    280 
    281 HChar * MC_(snprintf_delta) (HChar * buf, Int size,
    282                              SizeT current_val, SizeT old_val,
    283                              LeakCheckDeltaMode delta_mode)
    284 {
    285    if (delta_mode == LCD_Any)
    286       buf[0] = '\0';
    287    else if (current_val >= old_val)
    288       VG_(snprintf) (buf, size, " (+%'lu)", current_val - old_val);
    289    else
    290       VG_(snprintf) (buf, size, " (-%'lu)", old_val - current_val);
    291 
    292    return buf;
    293 }
    294 
    295 static void pp_LossRecord(UInt n_this_record, UInt n_total_records,
    296                           LossRecord* lr, Bool xml)
    297 {
    298    // char arrays to produce the indication of increase/decrease in case
    299    // of delta_mode != LCD_Any
    300    HChar d_bytes[20];
    301    HChar d_direct_bytes[20];
    302    HChar d_indirect_bytes[20];
    303    HChar d_num_blocks[20];
    304 
    305    MC_(snprintf_delta) (d_bytes, 20,
    306                         lr->szB + lr->indirect_szB,
    307                         lr->old_szB + lr->old_indirect_szB,
    308                         MC_(detect_memory_leaks_last_delta_mode));
    309    MC_(snprintf_delta) (d_direct_bytes, 20,
    310                         lr->szB,
    311                         lr->old_szB,
    312                         MC_(detect_memory_leaks_last_delta_mode));
    313    MC_(snprintf_delta) (d_indirect_bytes, 20,
    314                         lr->indirect_szB,
    315                         lr->old_indirect_szB,
    316                         MC_(detect_memory_leaks_last_delta_mode));
    317    MC_(snprintf_delta) (d_num_blocks, 20,
    318                         (SizeT) lr->num_blocks,
    319                         (SizeT) lr->old_num_blocks,
    320                         MC_(detect_memory_leaks_last_delta_mode));
    321 
    322    if (xml) {
    323       emit("  <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
    324       if (lr->indirect_szB > 0) {
    325          emit( "  <xwhat>\n" );
    326          emit( "    <text>%'lu%s (%'lu%s direct, %'lu%s indirect) bytes "
    327                "in %'u%s blocks"
    328                " are %s in loss record %'u of %'u</text>\n",
    329                lr->szB + lr->indirect_szB, d_bytes,
    330                lr->szB, d_direct_bytes,
    331                lr->indirect_szB, d_indirect_bytes,
    332                lr->num_blocks, d_num_blocks,
    333                str_leak_lossmode(lr->key.state),
    334                n_this_record, n_total_records );
    335          // Nb: don't put commas in these XML numbers
    336          emit( "    <leakedbytes>%lu</leakedbytes>\n",
    337                lr->szB + lr->indirect_szB );
    338          emit( "    <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
    339          emit( "  </xwhat>\n" );
    340       } else {
    341          emit( "  <xwhat>\n" );
    342          emit( "    <text>%'lu%s bytes in %'u%s blocks"
    343                " are %s in loss record %'u of %'u</text>\n",
    344                lr->szB, d_direct_bytes,
    345                lr->num_blocks, d_num_blocks,
    346                str_leak_lossmode(lr->key.state),
    347                n_this_record, n_total_records );
    348          emit( "    <leakedbytes>%ld</leakedbytes>\n", lr->szB);
    349          emit( "    <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
    350          emit( "  </xwhat>\n" );
    351       }
    352       VG_(pp_ExeContext)(lr->key.allocated_at);
    353    } else { /* ! if (xml) */
    354       if (lr->indirect_szB > 0) {
    355          emit(
    356             "%'lu%s (%'lu%s direct, %'lu%s indirect) bytes in %'u%s blocks"
    357             " are %s in loss record %'u of %'u\n",
    358             lr->szB + lr->indirect_szB, d_bytes,
    359             lr->szB, d_direct_bytes,
    360             lr->indirect_szB, d_indirect_bytes,
    361             lr->num_blocks, d_num_blocks,
    362             str_leak_lossmode(lr->key.state),
    363             n_this_record, n_total_records
    364          );
    365       } else {
    366          emit(
    367             "%'lu%s bytes in %'u%s blocks are %s in loss record %'u of %'u\n",
    368             lr->szB, d_direct_bytes,
    369             lr->num_blocks, d_num_blocks,
    370             str_leak_lossmode(lr->key.state),
    371             n_this_record, n_total_records
    372          );
    373       }
    374       VG_(pp_ExeContext)(lr->key.allocated_at);
    375    } /* if (xml) */
    376 }
    377 
    378 void MC_(pp_LossRecord)(UInt n_this_record, UInt n_total_records,
    379                         LossRecord* l)
    380 {
    381    pp_LossRecord (n_this_record, n_total_records, l, /* xml */ False);
    382 }
    383 
    384 void MC_(pp_Error) ( Error* err )
    385 {
    386    const Bool xml  = VG_(clo_xml); /* a shorthand */
    387    MC_Error* extra = VG_(get_error_extra)(err);
    388 
    389    switch (VG_(get_error_kind)(err)) {
    390       case Err_CoreMem:
    391          /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
    392          /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
    393             signal handler frame.  --njn */
    394          // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
    395          // the following code is untested.  Bad.
    396          if (xml) {
    397             emit( "  <kind>CoreMemError</kind>\n" );
    398             emit( "  <what>%pS contains unaddressable byte(s)</what>\n",
    399                   VG_(get_error_string)(err));
    400             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    401          } else {
    402             emit( "%s contains unaddressable byte(s)\n",
    403                   VG_(get_error_string)(err));
    404             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    405          }
    406          break;
    407 
    408       case Err_Value:
    409          MC_(any_value_errors) = True;
    410          if (xml) {
    411             emit( "  <kind>UninitValue</kind>\n" );
    412             emit( "  <what>Use of uninitialised value of size %ld</what>\n",
    413                   extra->Err.Value.szB );
    414             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    415             if (extra->Err.Value.origin_ec)
    416                mc_pp_origin( extra->Err.Value.origin_ec,
    417                             extra->Err.Value.otag & 3 );
    418          } else {
    419             /* Could also show extra->Err.Cond.otag if debugging origin
    420                tracking */
    421             emit( "Use of uninitialised value of size %ld\n",
    422                   extra->Err.Value.szB );
    423             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    424             if (extra->Err.Value.origin_ec)
    425                mc_pp_origin( extra->Err.Value.origin_ec,
    426                             extra->Err.Value.otag & 3 );
    427          }
    428          break;
    429 
    430       case Err_Cond:
    431          MC_(any_value_errors) = True;
    432          if (xml) {
    433             emit( "  <kind>UninitCondition</kind>\n" );
    434             emit( "  <what>Conditional jump or move depends"
    435                   " on uninitialised value(s)</what>\n" );
    436             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    437             if (extra->Err.Cond.origin_ec)
    438                mc_pp_origin( extra->Err.Cond.origin_ec,
    439                              extra->Err.Cond.otag & 3 );
    440          } else {
    441             /* Could also show extra->Err.Cond.otag if debugging origin
    442                tracking */
    443             emit( "Conditional jump or move depends"
    444                   " on uninitialised value(s)\n" );
    445             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    446             if (extra->Err.Cond.origin_ec)
    447                mc_pp_origin( extra->Err.Cond.origin_ec,
    448                              extra->Err.Cond.otag & 3 );
    449          }
    450          break;
    451 
    452       case Err_RegParam:
    453          MC_(any_value_errors) = True;
    454          if (xml) {
    455             emit( "  <kind>SyscallParam</kind>\n" );
    456             emit( "  <what>Syscall param %pS contains "
    457                   "uninitialised byte(s)</what>\n",
    458                   VG_(get_error_string)(err) );
    459             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    460             if (extra->Err.RegParam.origin_ec)
    461                mc_pp_origin( extra->Err.RegParam.origin_ec,
    462                              extra->Err.RegParam.otag & 3 );
    463          } else {
    464             emit( "Syscall param %s contains uninitialised byte(s)\n",
    465                   VG_(get_error_string)(err) );
    466             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    467             if (extra->Err.RegParam.origin_ec)
    468                mc_pp_origin( extra->Err.RegParam.origin_ec,
    469                              extra->Err.RegParam.otag & 3 );
    470          }
    471          break;
    472 
    473       case Err_MemParam:
    474          if (!extra->Err.MemParam.isAddrErr)
    475             MC_(any_value_errors) = True;
    476          if (xml) {
    477             emit( "  <kind>SyscallParam</kind>\n" );
    478             emit( "  <what>Syscall param %pS points to %s byte(s)</what>\n",
    479                   VG_(get_error_string)(err),
    480                   extra->Err.MemParam.isAddrErr
    481                      ? "unaddressable" : "uninitialised" );
    482             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    483             VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
    484                                 &extra->Err.MemParam.ai, False);
    485             if (extra->Err.MemParam.origin_ec
    486                 && !extra->Err.MemParam.isAddrErr)
    487                mc_pp_origin( extra->Err.MemParam.origin_ec,
    488                              extra->Err.MemParam.otag & 3 );
    489          } else {
    490             emit( "Syscall param %s points to %s byte(s)\n",
    491                   VG_(get_error_string)(err),
    492                   extra->Err.MemParam.isAddrErr
    493                      ? "unaddressable" : "uninitialised" );
    494             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    495             VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
    496                                 &extra->Err.MemParam.ai, False);
    497             if (extra->Err.MemParam.origin_ec
    498                 && !extra->Err.MemParam.isAddrErr)
    499                mc_pp_origin( extra->Err.MemParam.origin_ec,
    500                              extra->Err.MemParam.otag & 3 );
    501          }
    502          break;
    503 
    504       case Err_User:
    505          if (!extra->Err.User.isAddrErr)
    506             MC_(any_value_errors) = True;
    507          if (xml) {
    508             emit( "  <kind>ClientCheck</kind>\n" );
    509             emit( "  <what>%s byte(s) found "
    510                   "during client check request</what>\n",
    511                    extra->Err.User.isAddrErr
    512                       ? "Unaddressable" : "Uninitialised" );
    513             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    514             VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
    515                                 False);
    516             if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
    517                mc_pp_origin( extra->Err.User.origin_ec,
    518                              extra->Err.User.otag & 3 );
    519          } else {
    520             emit( "%s byte(s) found during client check request\n",
    521                    extra->Err.User.isAddrErr
    522                       ? "Unaddressable" : "Uninitialised" );
    523             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    524             VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
    525                                 False);
    526             if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
    527                mc_pp_origin( extra->Err.User.origin_ec,
    528                              extra->Err.User.otag & 3 );
    529          }
    530          break;
    531 
    532       case Err_Free:
    533          if (xml) {
    534             emit( "  <kind>InvalidFree</kind>\n" );
    535             emit( "  <what>Invalid free() / delete / delete[]"
    536                   " / realloc()</what>\n" );
    537             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    538             VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
    539                                  &extra->Err.Free.ai, False );
    540          } else {
    541             emit( "Invalid free() / delete / delete[] / realloc()\n" );
    542             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    543             VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
    544                                  &extra->Err.Free.ai, False );
    545          }
    546          break;
    547 
    548       case Err_FreeMismatch:
    549          if (xml) {
    550             emit( "  <kind>MismatchedFree</kind>\n" );
    551             emit( "  <what>Mismatched free() / delete / delete []</what>\n" );
    552             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    553             VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
    554                                 &extra->Err.FreeMismatch.ai, False);
    555          } else {
    556             emit( "Mismatched free() / delete / delete []\n" );
    557             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    558             VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
    559                                 &extra->Err.FreeMismatch.ai, False);
    560          }
    561          break;
    562 
    563       case Err_Addr:
    564          if (xml) {
    565             emit( "  <kind>Invalid%s</kind>\n",
    566                   extra->Err.Addr.isWrite ? "Write" : "Read"  );
    567             emit( "  <what>Invalid %s of size %ld</what>\n",
    568                   extra->Err.Addr.isWrite ? "write" : "read",
    569                   extra->Err.Addr.szB );
    570             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    571             VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
    572                                  &extra->Err.Addr.ai,
    573                                  extra->Err.Addr.maybe_gcc );
    574          } else {
    575             emit( "Invalid %s of size %ld\n",
    576                   extra->Err.Addr.isWrite ? "write" : "read",
    577                   extra->Err.Addr.szB );
    578             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    579 
    580             VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
    581                                  &extra->Err.Addr.ai,
    582                                  extra->Err.Addr.maybe_gcc );
    583          }
    584          break;
    585 
    586       case Err_Jump:
    587          if (xml) {
    588             emit( "  <kind>InvalidJump</kind>\n" );
    589             emit( "  <what>Jump to the invalid address stated "
    590                   "on the next line</what>\n" );
    591             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    592             VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
    593                                  False );
    594          } else {
    595             emit( "Jump to the invalid address stated on the next line\n" );
    596             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    597             VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
    598                                  False );
    599          }
    600          break;
    601 
    602       case Err_Overlap:
    603          if (xml) {
    604             emit( "  <kind>Overlap</kind>\n" );
    605             if (extra->Err.Overlap.szB == 0) {
    606                emit( "  <what>Source and destination overlap "
    607                      "in %pS(%#lx, %#lx)\n</what>\n",
    608                      VG_(get_error_string)(err),
    609                      extra->Err.Overlap.dst, extra->Err.Overlap.src );
    610             } else {
    611                emit( "  <what>Source and destination overlap "
    612                      "in %s(%#lx, %#lx, %lu)</what>\n",
    613                      VG_(get_error_string)(err),
    614                      extra->Err.Overlap.dst, extra->Err.Overlap.src,
    615                      extra->Err.Overlap.szB );
    616             }
    617             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    618          } else {
    619             if (extra->Err.Overlap.szB == 0) {
    620                emit( "Source and destination overlap in %pS(%#lx, %#lx)\n",
    621                      VG_(get_error_string)(err),
    622                      extra->Err.Overlap.dst, extra->Err.Overlap.src );
    623             } else {
    624                emit( "Source and destination overlap in %s(%#lx, %#lx, %lu)\n",
    625                      VG_(get_error_string)(err),
    626                      extra->Err.Overlap.dst, extra->Err.Overlap.src,
    627                      extra->Err.Overlap.szB );
    628             }
    629             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    630          }
    631          break;
    632 
    633       case Err_IllegalMempool:
    634          // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
    635          // the following code is untested.  Bad.
    636          if (xml) {
    637             emit( "  <kind>InvalidMemPool</kind>\n" );
    638             emit( "  <what>Illegal memory pool address</what>\n" );
    639             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    640             VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
    641                                  &extra->Err.IllegalMempool.ai, False );
    642          } else {
    643             emit( "Illegal memory pool address\n" );
    644             VG_(pp_ExeContext)( VG_(get_error_where)(err) );
    645             VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
    646                                  &extra->Err.IllegalMempool.ai, False );
    647          }
    648          break;
    649 
    650       case Err_Leak: {
    651          UInt        n_this_record   = extra->Err.Leak.n_this_record;
    652          UInt        n_total_records = extra->Err.Leak.n_total_records;
    653          LossRecord* lr              = extra->Err.Leak.lr;
    654          pp_LossRecord (n_this_record, n_total_records, lr, xml);
    655          break;
    656       }
    657 
    658       default:
    659          VG_(printf)("Error:\n  unknown Memcheck error code %d\n",
    660                      VG_(get_error_kind)(err));
    661          VG_(tool_panic)("unknown error code in mc_pp_Error)");
    662    }
    663 }
    664 
    665 /*------------------------------------------------------------*/
    666 /*--- Recording errors                                     ---*/
    667 /*------------------------------------------------------------*/
    668 
    669 /* These many bytes below %ESP are considered addressible if we're
    670    doing the --workaround-gcc296-bugs hack. */
    671 #define VG_GCC296_BUG_STACK_SLOP 1024
    672 
    673 /* Is this address within some small distance below %ESP?  Used only
    674    for the --workaround-gcc296-bugs kludge. */
    675 static Bool is_just_below_ESP( Addr esp, Addr aa )
    676 {
    677    esp -= VG_STACK_REDZONE_SZB;
    678    if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
    679       return True;
    680    else
    681       return False;
    682 }
    683 
    684 /* --- Called from generated and non-generated code --- */
    685 
    686 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
    687                                  Bool isWrite )
    688 {
    689    MC_Error extra;
    690    Bool     just_below_esp;
    691 
    692    if (MC_(in_ignored_range)(a))
    693       return;
    694 
    695    if (VG_(is_watched)( (isWrite ? write_watchpoint : read_watchpoint), a, szB))
    696       return;
    697 
    698    just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
    699 
    700    /* If this is caused by an access immediately below %ESP, and the
    701       user asks nicely, we just ignore it. */
    702    if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
    703       return;
    704 
    705    extra.Err.Addr.isWrite   = isWrite;
    706    extra.Err.Addr.szB       = szB;
    707    extra.Err.Addr.maybe_gcc = just_below_esp;
    708    extra.Err.Addr.ai.tag    = Addr_Undescribed;
    709    VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
    710 }
    711 
    712 void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
    713 {
    714    MC_Error extra;
    715    tl_assert( MC_(clo_mc_level) >= 2 );
    716    if (otag > 0)
    717       tl_assert( MC_(clo_mc_level) == 3 );
    718    extra.Err.Value.szB       = szB;
    719    extra.Err.Value.otag      = otag;
    720    extra.Err.Value.origin_ec = NULL;  /* Filled in later */
    721    VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
    722 }
    723 
    724 void MC_(record_cond_error) ( ThreadId tid, UInt otag )
    725 {
    726    MC_Error extra;
    727    tl_assert( MC_(clo_mc_level) >= 2 );
    728    if (otag > 0)
    729       tl_assert( MC_(clo_mc_level) == 3 );
    730    extra.Err.Cond.otag      = otag;
    731    extra.Err.Cond.origin_ec = NULL;  /* Filled in later */
    732    VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
    733 }
    734 
    735 /* --- Called from non-generated code --- */
    736 
    737 /* This is for memory errors in signal-related memory. */
    738 void MC_(record_core_mem_error) ( ThreadId tid, const HChar* msg )
    739 {
    740    VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
    741 }
    742 
    743 void MC_(record_regparam_error) ( ThreadId tid, const HChar* msg, UInt otag )
    744 {
    745    MC_Error extra;
    746    tl_assert(VG_INVALID_THREADID != tid);
    747    if (otag > 0)
    748       tl_assert( MC_(clo_mc_level) == 3 );
    749    extra.Err.RegParam.otag      = otag;
    750    extra.Err.RegParam.origin_ec = NULL;  /* Filled in later */
    751    VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
    752 }
    753 
    754 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
    755                                   Bool isAddrErr, const HChar* msg, UInt otag )
    756 {
    757    MC_Error extra;
    758    tl_assert(VG_INVALID_THREADID != tid);
    759    if (!isAddrErr)
    760       tl_assert( MC_(clo_mc_level) >= 2 );
    761    if (otag != 0) {
    762       tl_assert( MC_(clo_mc_level) == 3 );
    763       tl_assert( !isAddrErr );
    764    }
    765    extra.Err.MemParam.isAddrErr = isAddrErr;
    766    extra.Err.MemParam.ai.tag    = Addr_Undescribed;
    767    extra.Err.MemParam.otag      = otag;
    768    extra.Err.MemParam.origin_ec = NULL;  /* Filled in later */
    769    VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
    770 }
    771 
    772 void MC_(record_jump_error) ( ThreadId tid, Addr a )
    773 {
    774    MC_Error extra;
    775    tl_assert(VG_INVALID_THREADID != tid);
    776    extra.Err.Jump.ai.tag = Addr_Undescribed;
    777    VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
    778 }
    779 
    780 void MC_(record_free_error) ( ThreadId tid, Addr a )
    781 {
    782    MC_Error extra;
    783    tl_assert(VG_INVALID_THREADID != tid);
    784    extra.Err.Free.ai.tag = Addr_Undescribed;
    785    VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
    786 }
    787 
    788 void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
    789 {
    790    MC_Error extra;
    791    AddrInfo* ai = &extra.Err.FreeMismatch.ai;
    792    tl_assert(VG_INVALID_THREADID != tid);
    793    ai->tag = Addr_Block;
    794    ai->Addr.Block.block_kind = Block_Mallocd;  // Nb: Not 'Block_Freed'
    795    ai->Addr.Block.block_desc = "block";
    796    ai->Addr.Block.block_szB  = mc->szB;
    797    ai->Addr.Block.rwoffset   = 0;
    798    ai->Addr.Block.allocated_at = MC_(allocated_at) (mc);
    799    ai->Addr.Block.freed_at = MC_(freed_at) (mc);
    800    VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
    801                             &extra );
    802 }
    803 
    804 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
    805 {
    806    MC_Error extra;
    807    tl_assert(VG_INVALID_THREADID != tid);
    808    extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
    809    VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
    810 }
    811 
    812 void MC_(record_overlap_error) ( ThreadId tid, const HChar* function,
    813                                  Addr src, Addr dst, SizeT szB )
    814 {
    815    MC_Error extra;
    816    tl_assert(VG_INVALID_THREADID != tid);
    817    extra.Err.Overlap.src = src;
    818    extra.Err.Overlap.dst = dst;
    819    extra.Err.Overlap.szB = szB;
    820    VG_(maybe_record_error)(
    821       tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
    822 }
    823 
    824 Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
    825                               UInt n_total_records, LossRecord* lr,
    826                               Bool print_record, Bool count_error )
    827 {
    828    MC_Error extra;
    829    extra.Err.Leak.n_this_record   = n_this_record;
    830    extra.Err.Leak.n_total_records = n_total_records;
    831    extra.Err.Leak.lr              = lr;
    832    return
    833    VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
    834                        lr->key.allocated_at, print_record,
    835                        /*allow_GDB_attach*/False, count_error );
    836 }
    837 
    838 void MC_(record_user_error) ( ThreadId tid, Addr a,
    839                               Bool isAddrErr, UInt otag )
    840 {
    841    MC_Error extra;
    842    if (otag != 0) {
    843       tl_assert(!isAddrErr);
    844       tl_assert( MC_(clo_mc_level) == 3 );
    845    }
    846    if (!isAddrErr) {
    847       tl_assert( MC_(clo_mc_level) >= 2 );
    848    }
    849    tl_assert(VG_INVALID_THREADID != tid);
    850    extra.Err.User.isAddrErr = isAddrErr;
    851    extra.Err.User.ai.tag    = Addr_Undescribed;
    852    extra.Err.User.otag      = otag;
    853    extra.Err.User.origin_ec = NULL;  /* Filled in later */
    854    VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
    855 }
    856 
    857 /*------------------------------------------------------------*/
    858 /*--- Other error operations                               ---*/
    859 /*------------------------------------------------------------*/
    860 
    861 /* Compare error contexts, to detect duplicates.  Note that if they
    862    are otherwise the same, the faulting addrs and associated rwoffsets
    863    are allowed to be different.  */
    864 Bool MC_(eq_Error) ( VgRes res, Error* e1, Error* e2 )
    865 {
    866    MC_Error* extra1 = VG_(get_error_extra)(e1);
    867    MC_Error* extra2 = VG_(get_error_extra)(e2);
    868 
    869    /* Guaranteed by calling function */
    870    tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
    871 
    872    switch (VG_(get_error_kind)(e1)) {
    873       case Err_CoreMem: {
    874          const HChar *e1s, *e2s;
    875          e1s = VG_(get_error_string)(e1);
    876          e2s = VG_(get_error_string)(e2);
    877          if (e1s == e2s)                   return True;
    878          if (VG_STREQ(e1s, e2s))           return True;
    879          return False;
    880       }
    881 
    882       case Err_RegParam:
    883          return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
    884 
    885       // Perhaps we should also check the addrinfo.akinds for equality.
    886       // That would result in more error reports, but only in cases where
    887       // a register contains uninitialised bytes and points to memory
    888       // containing uninitialised bytes.  Currently, the 2nd of those to be
    889       // detected won't be reported.  That is (nearly?) always the memory
    890       // error, which is good.
    891       case Err_MemParam:
    892          if (!VG_STREQ(VG_(get_error_string)(e1),
    893                        VG_(get_error_string)(e2))) return False;
    894          // fall through
    895       case Err_User:
    896          return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
    897                 ? True : False );
    898 
    899       case Err_Free:
    900       case Err_FreeMismatch:
    901       case Err_Jump:
    902       case Err_IllegalMempool:
    903       case Err_Overlap:
    904       case Err_Cond:
    905          return True;
    906 
    907       case Err_Addr:
    908          return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
    909                 ? True : False );
    910 
    911       case Err_Value:
    912          return ( extra1->Err.Value.szB == extra2->Err.Value.szB
    913                 ? True : False );
    914 
    915       case Err_Leak:
    916          VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
    917                          "since it's handled with VG_(unique_error)()!");
    918 
    919       default:
    920          VG_(printf)("Error:\n  unknown error code %d\n",
    921                      VG_(get_error_kind)(e1));
    922          VG_(tool_panic)("unknown error code in mc_eq_Error");
    923    }
    924 }
    925 
    926 /* Functions used when searching MC_Chunk lists */
    927 static
    928 Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
    929 {
    930    return VG_(addr_is_in_block)( a, mc->data, mc->szB,
    931                                  MC_(Malloc_Redzone_SzB) );
    932 }
    933 static
    934 Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
    935 {
    936    return VG_(addr_is_in_block)( a, mc->data, mc->szB,
    937                                  rzB );
    938 }
    939 
    940 // Forward declarations
    941 static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
    942 static Bool mempool_block_maybe_describe( Addr a, AddrInfo* ai );
    943 
    944 
    945 /* Describe an address as best you can, for error messages,
    946    putting the result in ai. */
    947 static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
    948 {
    949    MC_Chunk*  mc;
    950 
    951    tl_assert(Addr_Undescribed == ai->tag);
    952 
    953    /* -- Perhaps it's a user-named block? -- */
    954    if (client_block_maybe_describe( a, ai )) {
    955       return;
    956    }
    957    /* -- Perhaps it's in mempool block? -- */
    958    if (mempool_block_maybe_describe( a, ai )) {
    959       return;
    960    }
    961    /* Blocks allocated by memcheck malloc functions are either
    962       on the recently freed list or on the malloc-ed list.
    963       Custom blocks can be on both : a recently freed block might
    964       have been just re-allocated.
    965       So, first search the malloc-ed block, as the most recent
    966       block is the probable cause of error.
    967       We however detect and report that this is a recently re-allocated
    968       block. */
    969    /* -- Search for a currently malloc'd block which might bracket it. -- */
    970    VG_(HT_ResetIter)(MC_(malloc_list));
    971    while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
    972       if (addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
    973          ai->tag = Addr_Block;
    974          ai->Addr.Block.block_kind = Block_Mallocd;
    975          if (MC_(get_freed_block_bracketting)( a ))
    976             ai->Addr.Block.block_desc = "recently re-allocated block";
    977          else
    978             ai->Addr.Block.block_desc = "block";
    979          ai->Addr.Block.block_szB  = mc->szB;
    980          ai->Addr.Block.rwoffset   = (Word)a - (Word)mc->data;
    981          ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
    982          ai->Addr.Block.freed_at = MC_(freed_at)(mc);
    983          return;
    984       }
    985    }
    986    /* -- Search for a recently freed block which might bracket it. -- */
    987    mc = MC_(get_freed_block_bracketting)( a );
    988    if (mc) {
    989       ai->tag = Addr_Block;
    990       ai->Addr.Block.block_kind = Block_Freed;
    991       ai->Addr.Block.block_desc = "block";
    992       ai->Addr.Block.block_szB  = mc->szB;
    993       ai->Addr.Block.rwoffset   = (Word)a - (Word)mc->data;
    994       ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
    995       ai->Addr.Block.freed_at = MC_(freed_at)(mc);
    996       return;
    997    }
    998 
    999    /* No block found. Search a non-heap block description. */
   1000    VG_(describe_addr) (a, ai);
   1001 }
   1002 
   1003 void MC_(pp_describe_addr) ( Addr a )
   1004 {
   1005    AddrInfo ai;
   1006 
   1007    ai.tag = Addr_Undescribed;
   1008    describe_addr (a, &ai);
   1009    VG_(pp_addrinfo_mc) (a, &ai, /* maybe_gcc */ False);
   1010 }
   1011 
   1012 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
   1013    does not refer to a known origin. */
   1014 static void update_origin ( /*OUT*/ExeContext** origin_ec,
   1015                             UInt otag )
   1016 {
   1017    UInt ecu = otag & ~3;
   1018    *origin_ec = NULL;
   1019    if (VG_(is_plausible_ECU)(ecu)) {
   1020       *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
   1021    }
   1022 }
   1023 
   1024 /* Updates the copy with address info if necessary (but not for all errors). */
   1025 UInt MC_(update_Error_extra)( Error* err )
   1026 {
   1027    MC_Error* extra = VG_(get_error_extra)(err);
   1028 
   1029    switch (VG_(get_error_kind)(err)) {
   1030    // These ones don't have addresses associated with them, and so don't
   1031    // need any updating.
   1032    case Err_CoreMem:
   1033    //case Err_Value:
   1034    //case Err_Cond:
   1035    case Err_Overlap:
   1036    // For Err_Leaks the returned size does not matter -- they are always
   1037    // shown with VG_(unique_error)() so they 'extra' not copied.  But
   1038    // we make it consistent with the others.
   1039    case Err_Leak:
   1040       return sizeof(MC_Error);
   1041 
   1042    // For value errors, get the ExeContext corresponding to the
   1043    // origin tag.  Note that it is a kludge to assume that
   1044    // a length-1 trace indicates a stack origin.  FIXME.
   1045    case Err_Value:
   1046       update_origin( &extra->Err.Value.origin_ec,
   1047                      extra->Err.Value.otag );
   1048       return sizeof(MC_Error);
   1049    case Err_Cond:
   1050       update_origin( &extra->Err.Cond.origin_ec,
   1051                      extra->Err.Cond.otag );
   1052       return sizeof(MC_Error);
   1053    case Err_RegParam:
   1054       update_origin( &extra->Err.RegParam.origin_ec,
   1055                      extra->Err.RegParam.otag );
   1056       return sizeof(MC_Error);
   1057 
   1058    // These ones always involve a memory address.
   1059    case Err_Addr:
   1060       describe_addr ( VG_(get_error_address)(err),
   1061                       &extra->Err.Addr.ai );
   1062       return sizeof(MC_Error);
   1063    case Err_MemParam:
   1064       describe_addr ( VG_(get_error_address)(err),
   1065                       &extra->Err.MemParam.ai );
   1066       update_origin( &extra->Err.MemParam.origin_ec,
   1067                      extra->Err.MemParam.otag );
   1068       return sizeof(MC_Error);
   1069    case Err_Jump:
   1070       describe_addr ( VG_(get_error_address)(err),
   1071                       &extra->Err.Jump.ai );
   1072       return sizeof(MC_Error);
   1073    case Err_User:
   1074       describe_addr ( VG_(get_error_address)(err),
   1075                       &extra->Err.User.ai );
   1076       update_origin( &extra->Err.User.origin_ec,
   1077                      extra->Err.User.otag );
   1078       return sizeof(MC_Error);
   1079    case Err_Free:
   1080       describe_addr ( VG_(get_error_address)(err),
   1081                       &extra->Err.Free.ai );
   1082       return sizeof(MC_Error);
   1083    case Err_IllegalMempool:
   1084       describe_addr ( VG_(get_error_address)(err),
   1085                       &extra->Err.IllegalMempool.ai );
   1086       return sizeof(MC_Error);
   1087 
   1088    // Err_FreeMismatches have already had their address described;  this is
   1089    // possible because we have the MC_Chunk on hand when the error is
   1090    // detected.  However, the address may be part of a user block, and if so
   1091    // we override the pre-determined description with a user block one.
   1092    case Err_FreeMismatch: {
   1093       tl_assert(extra && Block_Mallocd ==
   1094                 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
   1095       (void)client_block_maybe_describe( VG_(get_error_address)(err),
   1096                                         &extra->Err.FreeMismatch.ai );
   1097       return sizeof(MC_Error);
   1098    }
   1099 
   1100    default: VG_(tool_panic)("mc_update_extra: bad errkind");
   1101    }
   1102 }
   1103 
   1104 
   1105 static Bool client_block_maybe_describe( Addr a,
   1106                                          /*OUT*/AddrInfo* ai )
   1107 {
   1108    UWord      i;
   1109    CGenBlock* cgbs = NULL;
   1110    UWord      cgb_used = 0;
   1111 
   1112    MC_(get_ClientBlock_array)( &cgbs, &cgb_used );
   1113    if (cgbs == NULL)
   1114       tl_assert(cgb_used == 0);
   1115 
   1116    /* Perhaps it's a general block ? */
   1117    for (i = 0; i < cgb_used; i++) {
   1118       if (cgbs[i].start == 0 && cgbs[i].size == 0)
   1119          continue;
   1120       // Use zero as the redzone for client blocks.
   1121       if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
   1122          ai->tag = Addr_Block;
   1123          ai->Addr.Block.block_kind = Block_UserG;
   1124          ai->Addr.Block.block_desc = cgbs[i].desc;
   1125          ai->Addr.Block.block_szB  = cgbs[i].size;
   1126          ai->Addr.Block.rwoffset   = (Word)(a) - (Word)(cgbs[i].start);
   1127          ai->Addr.Block.allocated_at = cgbs[i].where;
   1128          ai->Addr.Block.freed_at = VG_(null_ExeContext)();;
   1129          return True;
   1130       }
   1131    }
   1132    return False;
   1133 }
   1134 
   1135 
   1136 static Bool mempool_block_maybe_describe( Addr a,
   1137                                           /*OUT*/AddrInfo* ai )
   1138 {
   1139    MC_Mempool* mp;
   1140    tl_assert( MC_(mempool_list) );
   1141 
   1142    VG_(HT_ResetIter)( MC_(mempool_list) );
   1143    while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
   1144       if (mp->chunks != NULL) {
   1145          MC_Chunk* mc;
   1146          VG_(HT_ResetIter)(mp->chunks);
   1147          while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
   1148             if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc, a, mp->rzB)) {
   1149                ai->tag = Addr_Block;
   1150                ai->Addr.Block.block_kind = Block_MempoolChunk;
   1151                ai->Addr.Block.block_desc = "block";
   1152                ai->Addr.Block.block_szB  = mc->szB;
   1153                ai->Addr.Block.rwoffset   = (Word)a - (Word)mc->data;
   1154                ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
   1155                ai->Addr.Block.freed_at = MC_(freed_at)(mc);
   1156                return True;
   1157             }
   1158          }
   1159       }
   1160    }
   1161    return False;
   1162 }
   1163 
   1164 
   1165 /*------------------------------------------------------------*/
   1166 /*--- Suppressions                                         ---*/
   1167 /*------------------------------------------------------------*/
   1168 
   1169 typedef
   1170    enum {
   1171       ParamSupp,     // Bad syscall params
   1172       UserSupp,      // Errors arising from client-request checks
   1173       CoreMemSupp,   // Memory errors in core (pthread ops, signal handling)
   1174 
   1175       // Undefined value errors of given size
   1176       Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
   1177 
   1178       // Undefined value error in conditional.
   1179       CondSupp,
   1180 
   1181       // Unaddressable read/write attempt at given size
   1182       Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
   1183 
   1184       JumpSupp,      // Jump to unaddressable target
   1185       FreeSupp,      // Invalid or mismatching free
   1186       OverlapSupp,   // Overlapping blocks in memcpy(), strcpy(), etc
   1187       LeakSupp,      // Something to be suppressed in a leak check.
   1188       MempoolSupp,   // Memory pool suppression.
   1189    }
   1190    MC_SuppKind;
   1191 
   1192 Bool MC_(is_recognised_suppression) ( const HChar* name, Supp* su )
   1193 {
   1194    SuppKind skind;
   1195 
   1196    if      (VG_STREQ(name, "Param"))   skind = ParamSupp;
   1197    else if (VG_STREQ(name, "User"))    skind = UserSupp;
   1198    else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
   1199    else if (VG_STREQ(name, "Addr1"))   skind = Addr1Supp;
   1200    else if (VG_STREQ(name, "Addr2"))   skind = Addr2Supp;
   1201    else if (VG_STREQ(name, "Addr4"))   skind = Addr4Supp;
   1202    else if (VG_STREQ(name, "Addr8"))   skind = Addr8Supp;
   1203    else if (VG_STREQ(name, "Addr16"))  skind = Addr16Supp;
   1204    else if (VG_STREQ(name, "Jump"))    skind = JumpSupp;
   1205    else if (VG_STREQ(name, "Free"))    skind = FreeSupp;
   1206    else if (VG_STREQ(name, "Leak"))    skind = LeakSupp;
   1207    else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
   1208    else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
   1209    else if (VG_STREQ(name, "Cond"))    skind = CondSupp;
   1210    else if (VG_STREQ(name, "Value0"))  skind = CondSupp; /* backwards compat */
   1211    else if (VG_STREQ(name, "Value1"))  skind = Value1Supp;
   1212    else if (VG_STREQ(name, "Value2"))  skind = Value2Supp;
   1213    else if (VG_STREQ(name, "Value4"))  skind = Value4Supp;
   1214    else if (VG_STREQ(name, "Value8"))  skind = Value8Supp;
   1215    else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
   1216    else
   1217       return False;
   1218 
   1219    VG_(set_supp_kind)(su, skind);
   1220    return True;
   1221 }
   1222 
   1223 typedef struct _MC_LeakSuppExtra MC_LeakSuppExtra;
   1224 
   1225 struct _MC_LeakSuppExtra {
   1226    UInt match_leak_kinds;
   1227 
   1228    /* Maintains nr of blocks and bytes suppressed with this suppression
   1229       during the leak search identified by leak_search_gen.
   1230       blocks_suppressed and bytes_suppressed are reset to 0 when
   1231       used the first time during a leak search. */
   1232    SizeT blocks_suppressed;
   1233    SizeT bytes_suppressed;
   1234    UInt  leak_search_gen;
   1235 };
   1236 
   1237 Bool MC_(read_extra_suppression_info) ( Int fd, HChar** bufpp,
   1238                                         SizeT* nBufp, Int* lineno, Supp *su )
   1239 {
   1240    Bool eof;
   1241    Int i;
   1242 
   1243    if (VG_(get_supp_kind)(su) == ParamSupp) {
   1244       eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
   1245       if (eof) return False;
   1246       VG_(set_supp_string)(su, VG_(strdup)("mc.resi.1", *bufpp));
   1247    } else if (VG_(get_supp_kind)(su) == LeakSupp) {
   1248       // We might have the optional match-leak-kinds line
   1249       MC_LeakSuppExtra* lse;
   1250       lse = VG_(malloc)("mc.resi.2", sizeof(MC_LeakSuppExtra));
   1251       lse->match_leak_kinds = RallS;
   1252       lse->blocks_suppressed = 0;
   1253       lse->bytes_suppressed = 0;
   1254       lse->leak_search_gen = 0;
   1255       VG_(set_supp_extra)(su, lse); // By default, all kinds will match.
   1256       eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
   1257       if (eof) return True; // old LeakSupp style, no match-leak-kinds line.
   1258       if (0 == VG_(strncmp)(*bufpp, "match-leak-kinds:", 17)) {
   1259          i = 17;
   1260          while ((*bufpp)[i] && VG_(isspace((*bufpp)[i])))
   1261             i++;
   1262          if (!MC_(parse_leak_kinds)((*bufpp)+i, &lse->match_leak_kinds)) {
   1263             return False;
   1264          }
   1265       } else {
   1266          return False; // unknown extra line.
   1267       }
   1268    }
   1269    return True;
   1270 }
   1271 
   1272 Bool MC_(error_matches_suppression) ( Error* err, Supp* su )
   1273 {
   1274    Int       su_szB;
   1275    MC_Error* extra = VG_(get_error_extra)(err);
   1276    ErrorKind ekind = VG_(get_error_kind )(err);
   1277 
   1278    switch (VG_(get_supp_kind)(su)) {
   1279       case ParamSupp:
   1280          return ((ekind == Err_RegParam || ekind == Err_MemParam)
   1281               && VG_STREQ(VG_(get_error_string)(err),
   1282                           VG_(get_supp_string)(su)));
   1283 
   1284       case UserSupp:
   1285          return (ekind == Err_User);
   1286 
   1287       case CoreMemSupp:
   1288          return (ekind == Err_CoreMem
   1289               && VG_STREQ(VG_(get_error_string)(err),
   1290                           VG_(get_supp_string)(su)));
   1291 
   1292       case Value1Supp: su_szB = 1; goto value_case;
   1293       case Value2Supp: su_szB = 2; goto value_case;
   1294       case Value4Supp: su_szB = 4; goto value_case;
   1295       case Value8Supp: su_szB = 8; goto value_case;
   1296       case Value16Supp:su_szB =16; goto value_case;
   1297       value_case:
   1298          return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
   1299 
   1300       case CondSupp:
   1301          return (ekind == Err_Cond);
   1302 
   1303       case Addr1Supp: su_szB = 1; goto addr_case;
   1304       case Addr2Supp: su_szB = 2; goto addr_case;
   1305       case Addr4Supp: su_szB = 4; goto addr_case;
   1306       case Addr8Supp: su_szB = 8; goto addr_case;
   1307       case Addr16Supp:su_szB =16; goto addr_case;
   1308       addr_case:
   1309          return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
   1310 
   1311       case JumpSupp:
   1312          return (ekind == Err_Jump);
   1313 
   1314       case FreeSupp:
   1315          return (ekind == Err_Free || ekind == Err_FreeMismatch);
   1316 
   1317       case OverlapSupp:
   1318          return (ekind == Err_Overlap);
   1319 
   1320       case LeakSupp:
   1321          if (ekind == Err_Leak) {
   1322             MC_LeakSuppExtra* lse = (MC_LeakSuppExtra*) VG_(get_supp_extra)(su);
   1323             if (lse->leak_search_gen != MC_(leak_search_gen)) {
   1324                // First time we see this suppression during this leak search.
   1325                // => reset the counters to 0.
   1326                lse->blocks_suppressed = 0;
   1327                lse->bytes_suppressed = 0;
   1328                lse->leak_search_gen = MC_(leak_search_gen);
   1329             }
   1330             return RiS(extra->Err.Leak.lr->key.state, lse->match_leak_kinds);
   1331          } else
   1332             return False;
   1333 
   1334       case MempoolSupp:
   1335          return (ekind == Err_IllegalMempool);
   1336 
   1337       default:
   1338          VG_(printf)("Error:\n"
   1339                      "  unknown suppression type %d\n",
   1340                      VG_(get_supp_kind)(su));
   1341          VG_(tool_panic)("unknown suppression type in "
   1342                          "MC_(error_matches_suppression)");
   1343    }
   1344 }
   1345 
   1346 const HChar* MC_(get_error_name) ( Error* err )
   1347 {
   1348    switch (VG_(get_error_kind)(err)) {
   1349    case Err_RegParam:       return "Param";
   1350    case Err_MemParam:       return "Param";
   1351    case Err_User:           return "User";
   1352    case Err_FreeMismatch:   return "Free";
   1353    case Err_IllegalMempool: return "Mempool";
   1354    case Err_Free:           return "Free";
   1355    case Err_Jump:           return "Jump";
   1356    case Err_CoreMem:        return "CoreMem";
   1357    case Err_Overlap:        return "Overlap";
   1358    case Err_Leak:           return "Leak";
   1359    case Err_Cond:           return "Cond";
   1360    case Err_Addr: {
   1361       MC_Error* extra = VG_(get_error_extra)(err);
   1362       switch ( extra->Err.Addr.szB ) {
   1363       case 1:               return "Addr1";
   1364       case 2:               return "Addr2";
   1365       case 4:               return "Addr4";
   1366       case 8:               return "Addr8";
   1367       case 16:              return "Addr16";
   1368       default:              VG_(tool_panic)("unexpected size for Addr");
   1369       }
   1370    }
   1371    case Err_Value: {
   1372       MC_Error* extra = VG_(get_error_extra)(err);
   1373       switch ( extra->Err.Value.szB ) {
   1374       case 1:               return "Value1";
   1375       case 2:               return "Value2";
   1376       case 4:               return "Value4";
   1377       case 8:               return "Value8";
   1378       case 16:              return "Value16";
   1379       default:              VG_(tool_panic)("unexpected size for Value");
   1380       }
   1381    }
   1382    default:                 VG_(tool_panic)("get_error_name: unexpected type");
   1383    }
   1384 }
   1385 
   1386 Bool MC_(get_extra_suppression_info) ( Error* err,
   1387                                        /*OUT*/HChar* buf, Int nBuf )
   1388 {
   1389    ErrorKind ekind = VG_(get_error_kind )(err);
   1390    tl_assert(buf);
   1391    tl_assert(nBuf >= 16); // stay sane
   1392    if (Err_RegParam == ekind || Err_MemParam == ekind) {
   1393       const HChar* errstr = VG_(get_error_string)(err);
   1394       tl_assert(errstr);
   1395       VG_(snprintf)(buf, nBuf-1, "%s", errstr);
   1396       return True;
   1397    } else if (Err_Leak == ekind) {
   1398       MC_Error* extra = VG_(get_error_extra)(err);
   1399       VG_(snprintf)
   1400          (buf, nBuf-1, "match-leak-kinds: %s",
   1401           pp_Reachedness_for_leak_kinds(extra->Err.Leak.lr->key.state));
   1402       return True;
   1403    } else {
   1404       return False;
   1405    }
   1406 }
   1407 
   1408 Bool MC_(print_extra_suppression_use) ( Supp *su,
   1409                                         /*OUT*/HChar *buf, Int nBuf )
   1410 {
   1411    if (VG_(get_supp_kind)(su) == LeakSupp) {
   1412       MC_LeakSuppExtra *lse = (MC_LeakSuppExtra*) VG_(get_supp_extra) (su);
   1413 
   1414       if (lse->leak_search_gen == MC_(leak_search_gen)
   1415           && lse->blocks_suppressed > 0) {
   1416          VG_(snprintf) (buf, nBuf-1,
   1417                         "suppressed: %'lu bytes in %'lu blocks",
   1418                         lse->bytes_suppressed,
   1419                         lse->blocks_suppressed);
   1420          return True;
   1421       } else
   1422          return False;
   1423    } else
   1424       return False;
   1425 }
   1426 
   1427 void MC_(update_extra_suppression_use) ( Error* err, Supp* su)
   1428 {
   1429    if (VG_(get_supp_kind)(su) == LeakSupp) {
   1430       MC_LeakSuppExtra *lse = (MC_LeakSuppExtra*) VG_(get_supp_extra) (su);
   1431       MC_Error* extra = VG_(get_error_extra)(err);
   1432 
   1433       tl_assert (lse->leak_search_gen = MC_(leak_search_gen));
   1434       lse->blocks_suppressed += extra->Err.Leak.lr->num_blocks;
   1435       lse->bytes_suppressed
   1436          += extra->Err.Leak.lr->szB + extra->Err.Leak.lr->indirect_szB;
   1437    }
   1438 }
   1439 
   1440 /*--------------------------------------------------------------------*/
   1441 /*--- end                                              mc_errors.c ---*/
   1442 /*--------------------------------------------------------------------*/
   1443