Home | History | Annotate | Download | only in coregrind
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- Interface to LibVEX_Translate, and the SP-update pass        ---*/
      4 /*---                                                m_translate.c ---*/
      5 /*--------------------------------------------------------------------*/
      6 
      7 /*
      8    This file is part of Valgrind, a dynamic binary instrumentation
      9    framework.
     10 
     11    Copyright (C) 2000-2011 Julian Seward
     12       jseward (at) acm.org
     13 
     14    This program is free software; you can redistribute it and/or
     15    modify it under the terms of the GNU General Public License as
     16    published by the Free Software Foundation; either version 2 of the
     17    License, or (at your option) any later version.
     18 
     19    This program is distributed in the hope that it will be useful, but
     20    WITHOUT ANY WARRANTY; without even the implied warranty of
     21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     22    General Public License for more details.
     23 
     24    You should have received a copy of the GNU General Public License
     25    along with this program; if not, write to the Free Software
     26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     27    02111-1307, USA.
     28 
     29    The GNU General Public License is contained in the file COPYING.
     30 */
     31 
     32 #include "pub_core_basics.h"
     33 #include "pub_core_vki.h"
     34 #include "pub_core_aspacemgr.h"
     35 
     36 #include "pub_core_machine.h"    // VG_(fnptr_to_fnentry)
     37                                  // VG_(get_SP)
     38                                  // VG_(machine_get_VexArchInfo)
     39 #include "pub_core_libcbase.h"
     40 #include "pub_core_libcassert.h"
     41 #include "pub_core_libcprint.h"
     42 #include "pub_core_options.h"
     43 
     44 #include "pub_core_debuginfo.h"  // VG_(get_fnname_w_offset)
     45 #include "pub_core_redir.h"      // VG_(redir_do_lookup)
     46 
     47 #include "pub_core_signals.h"    // VG_(synth_fault_{perms,mapping}
     48 #include "pub_core_stacks.h"     // VG_(unknown_SP_update)()
     49 #include "pub_core_tooliface.h"  // VG_(tdict)
     50 
     51 #include "pub_core_translate.h"
     52 #include "pub_core_transtab.h"
     53 #include "pub_core_dispatch.h" // VG_(run_innerloop__dispatch_{un}profiled)
     54                                // VG_(run_a_noredir_translation__return_point)
     55 
     56 #include "pub_core_libcsetjmp.h"   // to keep _threadstate.h happy
     57 #include "pub_core_threadstate.h"  // VexGuestArchState
     58 #include "pub_core_trampoline.h"   // VG_(ppctoc_magic_redirect_return_stub)
     59 
     60 #include "pub_core_execontext.h"  // VG_(make_depth_1_ExeContext_from_Addr)
     61 
     62 #include "pub_core_gdbserver.h"   // VG_(tool_instrument_then_gdbserver_if_needed)
     63 
     64 /*------------------------------------------------------------*/
     65 /*--- Stats                                                ---*/
     66 /*------------------------------------------------------------*/
     67 
     68 static UInt n_SP_updates_fast            = 0;
     69 static UInt n_SP_updates_generic_known   = 0;
     70 static UInt n_SP_updates_generic_unknown = 0;
     71 
     72 void VG_(print_translation_stats) ( void )
     73 {
     74    Char buf[7];
     75    UInt n_SP_updates = n_SP_updates_fast + n_SP_updates_generic_known
     76                                          + n_SP_updates_generic_unknown;
     77    VG_(percentify)(n_SP_updates_fast, n_SP_updates, 1, 6, buf);
     78    VG_(message)(Vg_DebugMsg,
     79       "translate:            fast SP updates identified: %'u (%s)\n",
     80       n_SP_updates_fast, buf );
     81 
     82    VG_(percentify)(n_SP_updates_generic_known, n_SP_updates, 1, 6, buf);
     83    VG_(message)(Vg_DebugMsg,
     84       "translate:   generic_known SP updates identified: %'u (%s)\n",
     85       n_SP_updates_generic_known, buf );
     86 
     87    VG_(percentify)(n_SP_updates_generic_unknown, n_SP_updates, 1, 6, buf);
     88    VG_(message)(Vg_DebugMsg,
     89       "translate: generic_unknown SP updates identified: %'u (%s)\n",
     90       n_SP_updates_generic_unknown, buf );
     91 }
     92 
     93 /*------------------------------------------------------------*/
     94 /*--- %SP-update pass                                      ---*/
     95 /*------------------------------------------------------------*/
     96 
     97 static Bool need_to_handle_SP_assignment(void)
     98 {
     99    return ( VG_(tdict).track_new_mem_stack_4   ||
    100             VG_(tdict).track_die_mem_stack_4   ||
    101             VG_(tdict).track_new_mem_stack_8   ||
    102             VG_(tdict).track_die_mem_stack_8   ||
    103             VG_(tdict).track_new_mem_stack_12  ||
    104             VG_(tdict).track_die_mem_stack_12  ||
    105             VG_(tdict).track_new_mem_stack_16  ||
    106             VG_(tdict).track_die_mem_stack_16  ||
    107             VG_(tdict).track_new_mem_stack_32  ||
    108             VG_(tdict).track_die_mem_stack_32  ||
    109             VG_(tdict).track_new_mem_stack_112 ||
    110             VG_(tdict).track_die_mem_stack_112 ||
    111             VG_(tdict).track_new_mem_stack_128 ||
    112             VG_(tdict).track_die_mem_stack_128 ||
    113             VG_(tdict).track_new_mem_stack_144 ||
    114             VG_(tdict).track_die_mem_stack_144 ||
    115             VG_(tdict).track_new_mem_stack_160 ||
    116             VG_(tdict).track_die_mem_stack_160 ||
    117             VG_(tdict).track_new_mem_stack     ||
    118             VG_(tdict).track_die_mem_stack     );
    119 }
    120 
    121 // - The SP aliases are held in an array which is used as a circular buffer.
    122 //   This misses very few constant updates of SP (ie. < 0.1%) while using a
    123 //   small, constant structure that will also never fill up and cause
    124 //   execution to abort.
    125 // - Unused slots have a .temp value of 'IRTemp_INVALID'.
    126 // - 'next_SP_alias_slot' is the index where the next alias will be stored.
    127 // - If the buffer fills, we circle around and start over-writing
    128 //   non-IRTemp_INVALID values.  This is rare, and the overwriting of a
    129 //   value that would have subsequently be used is even rarer.
    130 // - Every slot below next_SP_alias_slot holds a non-IRTemp_INVALID value.
    131 //   The rest either all won't (if we haven't yet circled around) or all
    132 //   will (if we have circled around).
    133 
    134 typedef
    135    struct {
    136       IRTemp temp;
    137       Long   delta;
    138    }
    139    SP_Alias;
    140 
    141 // With 32 slots the buffer fills very rarely -- eg. once in a run of GCC.
    142 // And I've tested with smaller values and the wrap-around case works ok.
    143 #define N_ALIASES    32
    144 static SP_Alias SP_aliases[N_ALIASES];
    145 static Int      next_SP_alias_slot = 0;
    146 
    147 static void clear_SP_aliases(void)
    148 {
    149    Int i;
    150    for (i = 0; i < N_ALIASES; i++) {
    151       SP_aliases[i].temp  = IRTemp_INVALID;
    152       SP_aliases[i].delta = 0;
    153    }
    154    next_SP_alias_slot = 0;
    155 }
    156 
    157 static void add_SP_alias(IRTemp temp, Long delta)
    158 {
    159    vg_assert(temp != IRTemp_INVALID);
    160    SP_aliases[ next_SP_alias_slot ].temp  = temp;
    161    SP_aliases[ next_SP_alias_slot ].delta = delta;
    162    next_SP_alias_slot++;
    163    if (N_ALIASES == next_SP_alias_slot) next_SP_alias_slot = 0;
    164 }
    165 
    166 static Bool get_SP_delta(IRTemp temp, ULong* delta)
    167 {
    168    Int i;      // i must be signed!
    169    vg_assert(IRTemp_INVALID != temp);
    170    // Search backwards between current buffer position and the start.
    171    for (i = next_SP_alias_slot-1; i >= 0; i--) {
    172       if (temp == SP_aliases[i].temp) {
    173          *delta = SP_aliases[i].delta;
    174          return True;
    175       }
    176    }
    177    // Search backwards between the end and the current buffer position.
    178    for (i = N_ALIASES-1; i >= next_SP_alias_slot; i--) {
    179       if (temp == SP_aliases[i].temp) {
    180          *delta = SP_aliases[i].delta;
    181          return True;
    182       }
    183    }
    184    return False;
    185 }
    186 
    187 static void update_SP_aliases(Long delta)
    188 {
    189    Int i;
    190    for (i = 0; i < N_ALIASES; i++) {
    191       if (SP_aliases[i].temp == IRTemp_INVALID) {
    192          return;
    193       }
    194       SP_aliases[i].delta += delta;
    195    }
    196 }
    197 
    198 /* Given a guest IP, get an origin tag for a 1-element stack trace,
    199    and wrap it up in an IR atom that can be passed as the origin-tag
    200    value for a stack-adjustment helper function. */
    201 static IRExpr* mk_ecu_Expr ( Addr64 guest_IP )
    202 {
    203    UInt ecu;
    204    ExeContext* ec
    205       = VG_(make_depth_1_ExeContext_from_Addr)( (Addr)guest_IP );
    206    vg_assert(ec);
    207    ecu = VG_(get_ECU_from_ExeContext)( ec );
    208    vg_assert(VG_(is_plausible_ECU)(ecu));
    209    /* This is always safe to do, since ecu is only 32 bits, and
    210       HWord is 32 or 64. */
    211    return mkIRExpr_HWord( (HWord)ecu );
    212 }
    213 
    214 /* When gdbserver is activated, the translation of a block must
    215    first be done by the tool function, then followed by a pass
    216    which (if needed) instruments the code for gdbserver.
    217 */
    218 static
    219 IRSB* tool_instrument_then_gdbserver_if_needed ( VgCallbackClosure* closureV,
    220                                                  IRSB*              sb_in,
    221                                                  VexGuestLayout*    layout,
    222                                                  VexGuestExtents*   vge,
    223                                                  IRType             gWordTy,
    224                                                  IRType             hWordTy )
    225 {
    226    return VG_(instrument_for_gdbserver_if_needed)
    227       (VG_(tdict).tool_instrument (closureV,
    228                                    sb_in,
    229                                    layout,
    230                                    vge,
    231                                    gWordTy,
    232                                    hWordTy),
    233        layout,
    234        vge,
    235        gWordTy,
    236        hWordTy);
    237 }
    238 
    239 /* For tools that want to know about SP changes, this pass adds
    240    in the appropriate hooks.  We have to do it after the tool's
    241    instrumentation, so the tool doesn't have to worry about the C calls
    242    it adds in, and we must do it before register allocation because
    243    spilled temps make it much harder to work out the SP deltas.
    244    This it is done with Vex's "second instrumentation" pass.
    245 
    246    Basically, we look for GET(SP)/PUT(SP) pairs and track constant
    247    increments/decrements of SP between them.  (This requires tracking one or
    248    more "aliases", which are not exact aliases but instead are tempregs
    249    whose value is equal to the SP's plus or minus a known constant.)
    250    If all the changes to SP leading up to a PUT(SP) are by known, small
    251    constants, we can do a specific call to eg. new_mem_stack_4, otherwise
    252    we fall back to the case that handles an unknown SP change.
    253 
    254    There is some extra complexity to deal correctly with updates to
    255    only parts of SP.  Bizarre, but it has been known to happen.
    256 */
    257 static
    258 IRSB* vg_SP_update_pass ( void*             closureV,
    259                           IRSB*             sb_in,
    260                           VexGuestLayout*   layout,
    261                           VexGuestExtents*  vge,
    262                           IRType            gWordTy,
    263                           IRType            hWordTy )
    264 {
    265    Int         i, j, minoff_ST, maxoff_ST, sizeof_SP, offset_SP;
    266    Int         first_SP, last_SP, first_Put, last_Put;
    267    IRDirty     *dcall, *d;
    268    IRStmt*     st;
    269    IRExpr*     e;
    270    IRRegArray* descr;
    271    IRType      typeof_SP;
    272    Long        delta, con;
    273 
    274    /* Set up stuff for tracking the guest IP */
    275    Bool   curr_IP_known = False;
    276    Addr64 curr_IP       = 0;
    277 
    278    /* Set up BB */
    279    IRSB* bb     = emptyIRSB();
    280    bb->tyenv    = deepCopyIRTypeEnv(sb_in->tyenv);
    281    bb->next     = deepCopyIRExpr(sb_in->next);
    282    bb->jumpkind = sb_in->jumpkind;
    283 
    284    delta = 0;
    285 
    286    sizeof_SP = layout->sizeof_SP;
    287    offset_SP = layout->offset_SP;
    288    typeof_SP = sizeof_SP==4 ? Ity_I32 : Ity_I64;
    289    vg_assert(sizeof_SP == 4 || sizeof_SP == 8);
    290 
    291    /* --- Start of #defines --- */
    292 
    293 #  define IS_ADD(op) (sizeof_SP==4 ? ((op)==Iop_Add32) : ((op)==Iop_Add64))
    294 #  define IS_SUB(op) (sizeof_SP==4 ? ((op)==Iop_Sub32) : ((op)==Iop_Sub64))
    295 
    296 #  define IS_ADD_OR_SUB(op) (IS_ADD(op) || IS_SUB(op))
    297 
    298 #  define GET_CONST(con)                                                \
    299        (sizeof_SP==4 ? (Long)(Int)(con->Ico.U32)                        \
    300                      : (Long)(con->Ico.U64))
    301 
    302 #  define DO_NEW(syze, tmpp)                                            \
    303       do {                                                              \
    304          Bool vanilla, w_ecu;                                           \
    305          vg_assert(curr_IP_known);                                      \
    306          vanilla = NULL != VG_(tdict).track_new_mem_stack_##syze;       \
    307          w_ecu   = NULL != VG_(tdict).track_new_mem_stack_##syze##_w_ECU; \
    308          vg_assert(!(vanilla && w_ecu)); /* can't have both */          \
    309          if (!(vanilla || w_ecu))                                       \
    310             goto generic;                                               \
    311                                                                         \
    312          /* I don't know if it's really necessary to say that the */    \
    313          /* call reads the stack pointer.  But anyway, we do. */        \
    314          if (w_ecu) {                                                   \
    315             dcall = unsafeIRDirty_0_N(                                  \
    316                        2/*regparms*/,                                   \
    317                        "track_new_mem_stack_" #syze "_w_ECU",           \
    318                        VG_(fnptr_to_fnentry)(                           \
    319                           VG_(tdict).track_new_mem_stack_##syze##_w_ECU ), \
    320                        mkIRExprVec_2(IRExpr_RdTmp(tmpp),                \
    321                                      mk_ecu_Expr(curr_IP))              \
    322                     );                                                  \
    323          } else {                                                       \
    324             dcall = unsafeIRDirty_0_N(                                  \
    325                        1/*regparms*/,                                   \
    326                        "track_new_mem_stack_" #syze ,                   \
    327                        VG_(fnptr_to_fnentry)(                           \
    328                           VG_(tdict).track_new_mem_stack_##syze ),      \
    329                        mkIRExprVec_1(IRExpr_RdTmp(tmpp))                \
    330                     );                                                  \
    331          }                                                              \
    332          dcall->nFxState = 1;                                           \
    333          dcall->fxState[0].fx     = Ifx_Read;                           \
    334          dcall->fxState[0].offset = layout->offset_SP;                  \
    335          dcall->fxState[0].size   = layout->sizeof_SP;                  \
    336                                                                         \
    337          addStmtToIRSB( bb, IRStmt_Dirty(dcall) );                      \
    338                                                                         \
    339          tl_assert(syze > 0);                                           \
    340          update_SP_aliases(syze);                                       \
    341                                                                         \
    342          n_SP_updates_fast++;                                           \
    343                                                                         \
    344       } while (0)
    345 
    346 #  define DO_DIE(syze, tmpp)                                            \
    347       do {                                                              \
    348          if (!VG_(tdict).track_die_mem_stack_##syze)                    \
    349             goto generic;                                               \
    350                                                                         \
    351          /* I don't know if it's really necessary to say that the */    \
    352          /* call reads the stack pointer.  But anyway, we do. */        \
    353          dcall = unsafeIRDirty_0_N(                                     \
    354                     1/*regparms*/,                                      \
    355                     "track_die_mem_stack_" #syze,                       \
    356                     VG_(fnptr_to_fnentry)(                              \
    357                        VG_(tdict).track_die_mem_stack_##syze ),         \
    358                     mkIRExprVec_1(IRExpr_RdTmp(tmpp))                   \
    359                  );                                                     \
    360          dcall->nFxState = 1;                                           \
    361          dcall->fxState[0].fx     = Ifx_Read;                           \
    362          dcall->fxState[0].offset = layout->offset_SP;                  \
    363          dcall->fxState[0].size   = layout->sizeof_SP;                  \
    364                                                                         \
    365          addStmtToIRSB( bb, IRStmt_Dirty(dcall) );                      \
    366                                                                         \
    367          tl_assert(syze > 0);                                           \
    368          update_SP_aliases(-(syze));                                    \
    369                                                                         \
    370          n_SP_updates_fast++;                                           \
    371                                                                         \
    372       } while (0)
    373 
    374    /* --- End of #defines --- */
    375 
    376    clear_SP_aliases();
    377 
    378    for (i = 0; i <  sb_in->stmts_used; i++) {
    379 
    380       st = sb_in->stmts[i];
    381 
    382       if (st->tag == Ist_IMark) {
    383          curr_IP_known = True;
    384          curr_IP       = st->Ist.IMark.addr;
    385       }
    386 
    387       /* t = Get(sp):   curr = t, delta = 0 */
    388       if (st->tag != Ist_WrTmp) goto case2;
    389       e = st->Ist.WrTmp.data;
    390       if (e->tag != Iex_Get)              goto case2;
    391       if (e->Iex.Get.offset != offset_SP) goto case2;
    392       if (e->Iex.Get.ty != typeof_SP)     goto case2;
    393       vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
    394       add_SP_alias(st->Ist.WrTmp.tmp, 0);
    395       addStmtToIRSB( bb, st );
    396       continue;
    397 
    398      case2:
    399       /* t' = curr +/- const:   curr = t',  delta +=/-= const */
    400       if (st->tag != Ist_WrTmp) goto case3;
    401       e = st->Ist.WrTmp.data;
    402       if (e->tag != Iex_Binop) goto case3;
    403       if (e->Iex.Binop.arg1->tag != Iex_RdTmp) goto case3;
    404       if (!get_SP_delta(e->Iex.Binop.arg1->Iex.RdTmp.tmp, &delta)) goto case3;
    405       if (e->Iex.Binop.arg2->tag != Iex_Const) goto case3;
    406       if (!IS_ADD_OR_SUB(e->Iex.Binop.op)) goto case3;
    407       con = GET_CONST(e->Iex.Binop.arg2->Iex.Const.con);
    408       vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
    409       if (IS_ADD(e->Iex.Binop.op)) {
    410          add_SP_alias(st->Ist.WrTmp.tmp, delta + con);
    411       } else {
    412          add_SP_alias(st->Ist.WrTmp.tmp, delta - con);
    413       }
    414       addStmtToIRSB( bb, st );
    415       continue;
    416 
    417      case3:
    418       /* t' = curr:   curr = t' */
    419       if (st->tag != Ist_WrTmp) goto case4;
    420       e = st->Ist.WrTmp.data;
    421       if (e->tag != Iex_RdTmp) goto case4;
    422       if (!get_SP_delta(e->Iex.RdTmp.tmp, &delta)) goto case4;
    423       vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
    424       add_SP_alias(st->Ist.WrTmp.tmp, delta);
    425       addStmtToIRSB( bb, st );
    426       continue;
    427 
    428      case4:
    429       /* Put(sp) = curr */
    430       /* More generally, we must correctly handle a Put which writes
    431          any part of SP, not just the case where all of SP is
    432          written. */
    433       if (st->tag != Ist_Put) goto case5;
    434       first_SP  = offset_SP;
    435       last_SP   = first_SP + sizeof_SP - 1;
    436       first_Put = st->Ist.Put.offset;
    437       last_Put  = first_Put
    438                   + sizeofIRType( typeOfIRExpr( bb->tyenv, st->Ist.Put.data ))
    439                   - 1;
    440       vg_assert(first_SP <= last_SP);
    441       vg_assert(first_Put <= last_Put);
    442 
    443       if (last_Put < first_SP || last_SP < first_Put)
    444          goto case5; /* no overlap */
    445 
    446       if (st->Ist.Put.data->tag == Iex_RdTmp
    447           && get_SP_delta(st->Ist.Put.data->Iex.RdTmp.tmp, &delta)) {
    448          IRTemp tttmp = st->Ist.Put.data->Iex.RdTmp.tmp;
    449          /* Why should the following assertion hold?  Because any
    450             alias added by put_SP_alias must be of a temporary which
    451             has the same type as typeof_SP, and whose value is a Get
    452             at exactly offset_SP of size typeof_SP.  Each call to
    453             put_SP_alias is immediately preceded by an assertion that
    454             we are putting in a binding for a correctly-typed
    455             temporary. */
    456          vg_assert( typeOfIRTemp(bb->tyenv, tttmp) == typeof_SP );
    457          /* From the same type-and-offset-correctness argument, if
    458             we found a useable alias, it must for an "exact" write of SP. */
    459          vg_assert(first_SP == first_Put);
    460          vg_assert(last_SP == last_Put);
    461          switch (delta) {
    462             case    0:                      addStmtToIRSB(bb,st); continue;
    463             case    4: DO_DIE(  4,  tttmp); addStmtToIRSB(bb,st); continue;
    464             case   -4: DO_NEW(  4,  tttmp); addStmtToIRSB(bb,st); continue;
    465             case    8: DO_DIE(  8,  tttmp); addStmtToIRSB(bb,st); continue;
    466             case   -8: DO_NEW(  8,  tttmp); addStmtToIRSB(bb,st); continue;
    467             case   12: DO_DIE(  12, tttmp); addStmtToIRSB(bb,st); continue;
    468             case  -12: DO_NEW(  12, tttmp); addStmtToIRSB(bb,st); continue;
    469             case   16: DO_DIE(  16, tttmp); addStmtToIRSB(bb,st); continue;
    470             case  -16: DO_NEW(  16, tttmp); addStmtToIRSB(bb,st); continue;
    471             case   32: DO_DIE(  32, tttmp); addStmtToIRSB(bb,st); continue;
    472             case  -32: DO_NEW(  32, tttmp); addStmtToIRSB(bb,st); continue;
    473             case  112: DO_DIE( 112, tttmp); addStmtToIRSB(bb,st); continue;
    474             case -112: DO_NEW( 112, tttmp); addStmtToIRSB(bb,st); continue;
    475             case  128: DO_DIE( 128, tttmp); addStmtToIRSB(bb,st); continue;
    476             case -128: DO_NEW( 128, tttmp); addStmtToIRSB(bb,st); continue;
    477             case  144: DO_DIE( 144, tttmp); addStmtToIRSB(bb,st); continue;
    478             case -144: DO_NEW( 144, tttmp); addStmtToIRSB(bb,st); continue;
    479             case  160: DO_DIE( 160, tttmp); addStmtToIRSB(bb,st); continue;
    480             case -160: DO_NEW( 160, tttmp); addStmtToIRSB(bb,st); continue;
    481             default:
    482                /* common values for ppc64: 144 128 160 112 176 */
    483                n_SP_updates_generic_known++;
    484                goto generic;
    485          }
    486       } else {
    487          /* Deal with an unknown update to SP.  We're here because
    488             either:
    489             (1) the Put does not exactly cover SP; it is a partial update.
    490                 Highly unlikely, but has been known to happen for 16-bit
    491                 Windows apps running on Wine, doing 16-bit adjustments to
    492                 %sp.
    493             (2) the Put does exactly cover SP, but we are unable to
    494                 determine how the value relates to the old SP.  In any
    495                 case, we cannot assume that the Put.data value is a tmp;
    496                 we must assume it can be anything allowed in flat IR (tmp
    497                 or const).
    498          */
    499          IRTemp  old_SP;
    500          n_SP_updates_generic_unknown++;
    501 
    502          // Nb: if all is well, this generic case will typically be
    503          // called something like every 1000th SP update.  If it's more than
    504          // that, the above code may be missing some cases.
    505         generic:
    506          /* Pass both the old and new SP values to this helper.  Also,
    507             pass an origin tag, even if it isn't needed. */
    508          old_SP = newIRTemp(bb->tyenv, typeof_SP);
    509          addStmtToIRSB(
    510             bb,
    511             IRStmt_WrTmp( old_SP, IRExpr_Get(offset_SP, typeof_SP) )
    512          );
    513 
    514          /* Now we know what the old value of SP is.  But knowing the new
    515             value is a bit tricky if there is a partial write. */
    516          if (first_Put == first_SP && last_Put == last_SP) {
    517            /* The common case, an exact write to SP.  So st->Ist.Put.data
    518               does hold the new value; simple. */
    519             vg_assert(curr_IP_known);
    520             dcall = unsafeIRDirty_0_N(
    521                        3/*regparms*/,
    522                        "VG_(unknown_SP_update)",
    523                        VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
    524                        mkIRExprVec_3( IRExpr_RdTmp(old_SP), st->Ist.Put.data,
    525                                       mk_ecu_Expr(curr_IP) )
    526                     );
    527             addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
    528             /* don't forget the original assignment */
    529             addStmtToIRSB( bb, st );
    530          } else {
    531             /* We have a partial update to SP.  We need to know what
    532                the new SP will be, and hand that to the helper call,
    533                but when the helper call happens, SP must hold the
    534                value it had before the update.  Tricky.
    535                Therefore use the following kludge:
    536                1. do the partial SP update (Put)
    537                2. Get the new SP value into a tmp, new_SP
    538                3. Put old_SP
    539                4. Call the helper
    540                5. Put new_SP
    541             */
    542             IRTemp new_SP;
    543             /* 1 */
    544             addStmtToIRSB( bb, st );
    545             /* 2 */
    546             new_SP = newIRTemp(bb->tyenv, typeof_SP);
    547             addStmtToIRSB(
    548                bb,
    549                IRStmt_WrTmp( new_SP, IRExpr_Get(offset_SP, typeof_SP) )
    550             );
    551             /* 3 */
    552             addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(old_SP) ));
    553             /* 4 */
    554             vg_assert(curr_IP_known);
    555             dcall = unsafeIRDirty_0_N(
    556                        3/*regparms*/,
    557                        "VG_(unknown_SP_update)",
    558                        VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
    559                        mkIRExprVec_3( IRExpr_RdTmp(old_SP),
    560                                       IRExpr_RdTmp(new_SP),
    561                                       mk_ecu_Expr(curr_IP) )
    562                     );
    563             addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
    564             /* 5 */
    565             addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(new_SP) ));
    566          }
    567 
    568          /* Forget what we already know. */
    569          clear_SP_aliases();
    570 
    571          /* If this is a Put of a tmp that exactly updates SP,
    572             start tracking aliases against this tmp. */
    573 
    574          if (first_Put == first_SP && last_Put == last_SP
    575              && st->Ist.Put.data->tag == Iex_RdTmp) {
    576             vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.Put.data->Iex.RdTmp.tmp)
    577                        == typeof_SP );
    578             add_SP_alias(st->Ist.Put.data->Iex.RdTmp.tmp, 0);
    579          }
    580          continue;
    581       }
    582 
    583      case5:
    584       /* PutI or Dirty call which overlaps SP: complain.  We can't
    585          deal with SP changing in weird ways (well, we can, but not at
    586          this time of night).  */
    587       if (st->tag == Ist_PutI) {
    588          descr = st->Ist.PutI.descr;
    589          minoff_ST = descr->base;
    590          maxoff_ST = descr->base
    591                      + descr->nElems * sizeofIRType(descr->elemTy) - 1;
    592          if (!(offset_SP > maxoff_ST
    593                || (offset_SP + sizeof_SP - 1) < minoff_ST))
    594             goto complain;
    595       }
    596       if (st->tag == Ist_Dirty) {
    597          d = st->Ist.Dirty.details;
    598          for (j = 0; j < d->nFxState; j++) {
    599             minoff_ST = d->fxState[j].offset;
    600             maxoff_ST = d->fxState[j].offset + d->fxState[j].size - 1;
    601             if (d->fxState[j].fx == Ifx_Read || d->fxState[j].fx == Ifx_None)
    602                continue;
    603             if (!(offset_SP > maxoff_ST
    604                   || (offset_SP + sizeof_SP - 1) < minoff_ST))
    605                goto complain;
    606          }
    607       }
    608 
    609       /* well, not interesting.  Just copy and keep going. */
    610       addStmtToIRSB( bb, st );
    611 
    612    } /* for (i = 0; i < sb_in->stmts_used; i++) */
    613 
    614    return bb;
    615 
    616   complain:
    617    VG_(core_panic)("vg_SP_update_pass: PutI or Dirty which overlaps SP");
    618 
    619 #undef IS_ADD
    620 #undef IS_SUB
    621 #undef IS_ADD_OR_SUB
    622 #undef GET_CONST
    623 #undef DO_NEW
    624 #undef DO_DIE
    625 }
    626 
    627 /*------------------------------------------------------------*/
    628 /*--- Main entry point for the JITter.                     ---*/
    629 /*------------------------------------------------------------*/
    630 
    631 /* Extra comments re self-checking translations and self-modifying
    632    code.  (JRS 14 Oct 05).
    633 
    634    There are 3 modes:
    635    (1) no checking: all code assumed to be not self-modifying
    636    (2) partial: known-problematic situations get a self-check
    637    (3) full checking: all translations get a self-check
    638 
    639    As currently implemented, the default is (2).  (3) is always safe,
    640    but very slow.  (1) works mostly, but fails for gcc nested-function
    641    code which uses trampolines on the stack; this situation is
    642    detected and handled by (2).
    643 
    644    ----------
    645 
    646    A more robust and transparent solution, which is not currently
    647    implemented, is a variant of (2): if a translation is made from an
    648    area which aspacem says does not have 'w' permission, then it can
    649    be non-self-checking.  Otherwise, it needs a self-check.
    650 
    651    This is complicated by Vex's basic-block chasing.  If a self-check
    652    is requested, then Vex will not chase over basic block boundaries
    653    (it's too complex).  However there is still a problem if it chases
    654    from a non-'w' area into a 'w' area.
    655 
    656    I think the right thing to do is:
    657 
    658    - if a translation request starts in a 'w' area, ask for a
    659      self-checking translation, and do not allow any chasing (make
    660      chase_into_ok return False).  Note that the latter is redundant
    661      in the sense that Vex won't chase anyway in this situation.
    662 
    663    - if a translation request starts in a non-'w' area, do not ask for
    664      a self-checking translation.  However, do not allow chasing (as
    665      determined by chase_into_ok) to go into a 'w' area.
    666 
    667    The result of this is that all code inside 'w' areas is self
    668    checking.
    669 
    670    To complete the trick, there is a caveat: we must watch the
    671    client's mprotect calls.  If pages are changed from non-'w' to 'w'
    672    then we should throw away all translations which intersect the
    673    affected area, so as to force them to be redone with self-checks.
    674 
    675    ----------
    676 
    677    The above outlines the conditions under which bb chasing is allowed
    678    from a self-modifying-code point of view.  There are other
    679    situations pertaining to function redirection in which it is
    680    necessary to disallow chasing, but those fall outside the scope of
    681    this comment.
    682 */
    683 
    684 
    685 /* Vex dumps the final code in here.  Then we can copy it off
    686    wherever we like. */
    687 /* 60000: should agree with assertion in VG_(add_to_transtab) in
    688    m_transtab.c. */
    689 #define N_TMPBUF 60000
    690 static UChar tmpbuf[N_TMPBUF];
    691 
    692 
    693 /* Function pointers we must supply to LibVEX in order that it
    694    can bomb out and emit messages under Valgrind's control. */
    695 __attribute__ ((noreturn))
    696 static
    697 void failure_exit ( void )
    698 {
    699    LibVEX_ShowAllocStats();
    700    VG_(core_panic)("LibVEX called failure_exit().");
    701 }
    702 
    703 static
    704 void log_bytes ( HChar* bytes, Int nbytes )
    705 {
    706   Int i;
    707   for (i = 0; i < nbytes-3; i += 4)
    708      VG_(printf)("%c%c%c%c", bytes[i], bytes[i+1], bytes[i+2], bytes[i+3]);
    709   for (; i < nbytes; i++)
    710      VG_(printf)("%c", bytes[i]);
    711 }
    712 
    713 
    714 /* --------- Various helper functions for translation --------- */
    715 
    716 /* Look for reasons to disallow making translations from the given
    717    segment. */
    718 
    719 static Bool translations_allowable_from_seg ( NSegment const* seg )
    720 {
    721 #  if defined(VGA_x86) || defined(VGA_s390x)
    722    Bool allowR = True;
    723 #  else
    724    Bool allowR = False;
    725 #  endif
    726    return seg != NULL
    727           && (seg->kind == SkAnonC || seg->kind == SkFileC || seg->kind == SkShmC)
    728           && (seg->hasX || (seg->hasR && allowR));
    729 }
    730 
    731 
    732 /* Produce a bitmask stating which of the supplied extents needs a
    733    self-check.  See documentation of
    734    VexTranslateArgs::needs_self_check for more details about the
    735    return convention. */
    736 
    737 static UInt needs_self_check ( void* closureV,
    738                                VexGuestExtents* vge )
    739 {
    740    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
    741    UInt i, bitset;
    742 
    743    vg_assert(vge->n_used >= 1 && vge->n_used <= 3);
    744    bitset = 0;
    745 
    746    for (i = 0; i < vge->n_used; i++) {
    747       Bool  check = False;
    748       Addr  addr  = (Addr)vge->base[i];
    749       SizeT len   = (SizeT)vge->len[i];
    750       NSegment const* segA = NULL;
    751 
    752 #     if defined(VGO_darwin)
    753       // GrP fixme hack - dyld i386 IMPORT gets rewritten.
    754       // To really do this correctly, we'd need to flush the
    755       // translation cache whenever a segment became +WX.
    756       segA = VG_(am_find_nsegment)(addr);
    757       if (segA && segA->hasX && segA->hasW)
    758          check = True;
    759 #     endif
    760 
    761       if (!check) {
    762          switch (VG_(clo_smc_check)) {
    763             case Vg_SmcNone:
    764                /* never check (except as per Darwin hack above) */
    765                break;
    766             case Vg_SmcAll:
    767                /* always check */
    768                check = True;
    769                break;
    770             case Vg_SmcStack: {
    771                /* check if the address is in the same segment as this
    772                   thread's stack pointer */
    773                Addr sp = VG_(get_SP)(closure->tid);
    774                if (!segA) {
    775                   segA = VG_(am_find_nsegment)(addr);
    776                }
    777                NSegment const* segSP = VG_(am_find_nsegment)(sp);
    778                if (segA && segSP && segA == segSP)
    779                   check = True;
    780                break;
    781             }
    782             case Vg_SmcAllNonFile: {
    783                /* check if any part of the extent is not in a
    784                   file-mapped segment */
    785                if (!segA) {
    786                   segA = VG_(am_find_nsegment)(addr);
    787                }
    788                if (segA && segA->kind == SkFileC && segA->start <= addr
    789                    && (len == 0 || addr + len <= segA->end + 1)) {
    790                   /* in a file-mapped segment; skip the check */
    791                } else {
    792                   check = True;
    793                }
    794                break;
    795             }
    796             default:
    797                vg_assert(0);
    798          }
    799       }
    800 
    801       if (check)
    802          bitset |= (1 << i);
    803    }
    804 
    805    return bitset;
    806 }
    807 
    808 
    809 /* This is a callback passed to LibVEX_Translate.  It stops Vex from
    810    chasing into function entry points that we wish to redirect.
    811    Chasing across them obviously defeats the redirect mechanism, with
    812    bad effects for Memcheck, Helgrind, DRD, Massif, and possibly others.
    813 */
    814 static Bool chase_into_ok ( void* closureV, Addr64 addr64 )
    815 {
    816    Addr               addr    = (Addr)addr64;
    817    NSegment const*    seg     = VG_(am_find_nsegment)(addr);
    818 
    819    /* Work through a list of possibilities why we might not want to
    820       allow a chase. */
    821 
    822    /* Destination not in a plausible segment? */
    823    if (!translations_allowable_from_seg(seg))
    824       goto dontchase;
    825 
    826    /* Destination is redirected? */
    827    if (addr != VG_(redir_do_lookup)(addr, NULL))
    828       goto dontchase;
    829 
    830 #  if defined(VG_PLAT_USES_PPCTOC)
    831    /* This needs to be at the start of its own block.  Don't chase. Re
    832       ULong_to_Ptr, be careful to ensure we only compare 32 bits on a
    833       32-bit target.*/
    834    if (ULong_to_Ptr(addr64)
    835        == (void*)&VG_(ppctoc_magic_redirect_return_stub))
    836       goto dontchase;
    837 #  endif
    838 
    839    /* overly conservative, but .. don't chase into the distinguished
    840       address that m_transtab uses as an empty-slot marker for
    841       VG_(tt_fast). */
    842    if (addr == TRANSTAB_BOGUS_GUEST_ADDR)
    843       goto dontchase;
    844 
    845 #  if defined(VGA_s390x)
    846    /* Never chase into an EX instruction. Generating IR for EX causes
    847       a round-trip through the scheduler including VG_(discard_translations).
    848       And that's expensive as shown by perf/tinycc.c:
    849       Chasing into EX increases the number of EX translations from 21 to
    850       102666 causing a 7x runtime increase for "none" and a 3.2x runtime
    851       increase for memcheck. */
    852    if (((UChar *)ULong_to_Ptr(addr))[0] == 0x44 ||   /* EX */
    853        ((UChar *)ULong_to_Ptr(addr))[0] == 0xC6)     /* EXRL */
    854      goto dontchase;
    855 #  endif
    856 
    857    /* well, ok then.  go on and chase. */
    858    return True;
    859 
    860    vg_assert(0);
    861    /*NOTREACHED*/
    862 
    863   dontchase:
    864    if (0) VG_(printf)("not chasing into 0x%lx\n", addr);
    865    return False;
    866 }
    867 
    868 
    869 /* --------------- helpers for with-TOC platforms --------------- */
    870 
    871 /* NOTE: with-TOC platforms are: ppc64-linux. */
    872 
    873 static IRExpr* mkU64 ( ULong n ) {
    874    return IRExpr_Const(IRConst_U64(n));
    875 }
    876 static IRExpr* mkU32 ( UInt n ) {
    877    return IRExpr_Const(IRConst_U32(n));
    878 }
    879 
    880 #if defined(VG_PLAT_USES_PPCTOC)
    881 static IRExpr* mkU8 ( UChar n ) {
    882    return IRExpr_Const(IRConst_U8(n));
    883 }
    884 static IRExpr* narrowTo32 ( IRTypeEnv* tyenv, IRExpr* e ) {
    885    if (typeOfIRExpr(tyenv, e) == Ity_I32) {
    886       return e;
    887    } else {
    888       vg_assert(typeOfIRExpr(tyenv, e) == Ity_I64);
    889       return IRExpr_Unop(Iop_64to32, e);
    890    }
    891 }
    892 
    893 /* Generate code to push word-typed expression 'e' onto this thread's
    894    redir stack, checking for stack overflow and generating code to
    895    bomb out if so. */
    896 
    897 static void gen_PUSH ( IRSB* bb, IRExpr* e )
    898 {
    899    IRRegArray* descr;
    900    IRTemp      t1;
    901    IRExpr*     one;
    902 
    903 #  if defined(VGP_ppc64_linux)
    904    Int    stack_size       = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
    905    Int    offB_REDIR_SP    = offsetof(VexGuestPPC64State,guest_REDIR_SP);
    906    Int    offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
    907    Int    offB_EMWARN      = offsetof(VexGuestPPC64State,guest_EMWARN);
    908    Bool   is64             = True;
    909    IRType ty_Word          = Ity_I64;
    910    IROp   op_CmpNE         = Iop_CmpNE64;
    911    IROp   op_Sar           = Iop_Sar64;
    912    IROp   op_Sub           = Iop_Sub64;
    913    IROp   op_Add           = Iop_Add64;
    914    IRExpr*(*mkU)(ULong)    = mkU64;
    915    vg_assert(VG_WORDSIZE == 8);
    916 #  else
    917    Int    stack_size       = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
    918    Int    offB_REDIR_SP    = offsetof(VexGuestPPC32State,guest_REDIR_SP);
    919    Int    offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
    920    Int    offB_EMWARN      = offsetof(VexGuestPPC32State,guest_EMWARN);
    921    Bool   is64             = False;
    922    IRType ty_Word          = Ity_I32;
    923    IROp   op_CmpNE         = Iop_CmpNE32;
    924    IROp   op_Sar           = Iop_Sar32;
    925    IROp   op_Sub           = Iop_Sub32;
    926    IROp   op_Add           = Iop_Add32;
    927    IRExpr*(*mkU)(UInt)     = mkU32;
    928    vg_assert(VG_WORDSIZE == 4);
    929 #  endif
    930 
    931    vg_assert(sizeof(void*) == VG_WORDSIZE);
    932    vg_assert(sizeof(Word)  == VG_WORDSIZE);
    933    vg_assert(sizeof(Addr)  == VG_WORDSIZE);
    934 
    935    descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
    936    t1    = newIRTemp( bb->tyenv, ty_Word );
    937    one   = mkU(1);
    938 
    939    vg_assert(typeOfIRExpr(bb->tyenv, e) == ty_Word);
    940 
    941    /* t1 = guest_REDIR_SP + 1 */
    942    addStmtToIRSB(
    943       bb,
    944       IRStmt_WrTmp(
    945          t1,
    946          IRExpr_Binop(op_Add, IRExpr_Get( offB_REDIR_SP, ty_Word ), one)
    947       )
    948    );
    949 
    950    /* Bomb out if t1 >=s stack_size, that is, (stack_size-1)-t1 <s 0.
    951       The destination (0) is a bit bogus but it doesn't matter since
    952       this is an unrecoverable error and will lead to Valgrind
    953       shutting down.  _EMWARN is set regardless - that's harmless
    954       since is only has a meaning if the exit is taken. */
    955    addStmtToIRSB(
    956       bb,
    957       IRStmt_Put(offB_EMWARN, mkU32(EmWarn_PPC64_redir_overflow))
    958    );
    959    addStmtToIRSB(
    960       bb,
    961       IRStmt_Exit(
    962          IRExpr_Binop(
    963             op_CmpNE,
    964             IRExpr_Binop(
    965                op_Sar,
    966                IRExpr_Binop(op_Sub,mkU(stack_size-1),IRExpr_RdTmp(t1)),
    967                mkU8(8 * VG_WORDSIZE - 1)
    968             ),
    969             mkU(0)
    970          ),
    971          Ijk_EmFail,
    972          is64 ? IRConst_U64(0) : IRConst_U32(0)
    973       )
    974    );
    975 
    976    /* guest_REDIR_SP = t1 */
    977    addStmtToIRSB(bb, IRStmt_Put(offB_REDIR_SP, IRExpr_RdTmp(t1)));
    978 
    979    /* guest_REDIR_STACK[t1+0] = e */
    980    /* PutI/GetI have I32-typed indexes regardless of guest word size */
    981    addStmtToIRSB(
    982       bb,
    983       IRStmt_PutI(descr, narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0, e)
    984    );
    985 }
    986 
    987 
    988 /* Generate code to pop a word-sized value from this thread's redir
    989    stack, binding it to a new temporary, which is returned.  As with
    990    gen_PUSH, an overflow check is also performed. */
    991 
    992 static IRTemp gen_POP ( IRSB* bb )
    993 {
    994 #  if defined(VGP_ppc64_linux)
    995    Int    stack_size       = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
    996    Int    offB_REDIR_SP    = offsetof(VexGuestPPC64State,guest_REDIR_SP);
    997    Int    offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
    998    Int    offB_EMWARN      = offsetof(VexGuestPPC64State,guest_EMWARN);
    999    Bool   is64             = True;
   1000    IRType ty_Word          = Ity_I64;
   1001    IROp   op_CmpNE         = Iop_CmpNE64;
   1002    IROp   op_Sar           = Iop_Sar64;
   1003    IROp   op_Sub           = Iop_Sub64;
   1004    IRExpr*(*mkU)(ULong)    = mkU64;
   1005 #  else
   1006    Int    stack_size       = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
   1007    Int    offB_REDIR_SP    = offsetof(VexGuestPPC32State,guest_REDIR_SP);
   1008    Int    offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
   1009    Int    offB_EMWARN      = offsetof(VexGuestPPC32State,guest_EMWARN);
   1010    Bool   is64             = False;
   1011    IRType ty_Word          = Ity_I32;
   1012    IROp   op_CmpNE         = Iop_CmpNE32;
   1013    IROp   op_Sar           = Iop_Sar32;
   1014    IROp   op_Sub           = Iop_Sub32;
   1015    IRExpr*(*mkU)(UInt)     = mkU32;
   1016 #  endif
   1017 
   1018    IRRegArray* descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
   1019    IRTemp      t1    = newIRTemp( bb->tyenv, ty_Word );
   1020    IRTemp      res   = newIRTemp( bb->tyenv, ty_Word );
   1021    IRExpr*     one   = mkU(1);
   1022 
   1023    vg_assert(sizeof(void*) == VG_WORDSIZE);
   1024    vg_assert(sizeof(Word)  == VG_WORDSIZE);
   1025    vg_assert(sizeof(Addr)  == VG_WORDSIZE);
   1026 
   1027    /* t1 = guest_REDIR_SP */
   1028    addStmtToIRSB(
   1029       bb,
   1030       IRStmt_WrTmp( t1, IRExpr_Get( offB_REDIR_SP, ty_Word ) )
   1031    );
   1032 
   1033    /* Bomb out if t1 < 0.  Same comments as gen_PUSH apply. */
   1034    addStmtToIRSB(
   1035       bb,
   1036       IRStmt_Put(offB_EMWARN, mkU32(EmWarn_PPC64_redir_underflow))
   1037    );
   1038    addStmtToIRSB(
   1039       bb,
   1040       IRStmt_Exit(
   1041          IRExpr_Binop(
   1042             op_CmpNE,
   1043             IRExpr_Binop(
   1044                op_Sar,
   1045                IRExpr_RdTmp(t1),
   1046                mkU8(8 * VG_WORDSIZE - 1)
   1047             ),
   1048             mkU(0)
   1049          ),
   1050          Ijk_EmFail,
   1051          is64 ? IRConst_U64(0) : IRConst_U32(0)
   1052       )
   1053    );
   1054 
   1055    /* res = guest_REDIR_STACK[t1+0] */
   1056    /* PutI/GetI have I32-typed indexes regardless of guest word size */
   1057    addStmtToIRSB(
   1058       bb,
   1059       IRStmt_WrTmp(
   1060          res,
   1061          IRExpr_GetI(descr, narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0)
   1062       )
   1063    );
   1064 
   1065    /* guest_REDIR_SP = t1-1 */
   1066    addStmtToIRSB(
   1067       bb,
   1068       IRStmt_Put(offB_REDIR_SP, IRExpr_Binop(op_Sub, IRExpr_RdTmp(t1), one))
   1069    );
   1070 
   1071    return res;
   1072 }
   1073 
   1074 /* Generate code to push LR and R2 onto this thread's redir stack,
   1075    then set R2 to the new value (which is the TOC pointer to be used
   1076    for the duration of the replacement function, as determined by
   1077    m_debuginfo), and set LR to the magic return stub, so we get to
   1078    intercept the return and restore R2 and L2 to the values saved
   1079    here. */
   1080 
   1081 static void gen_push_and_set_LR_R2 ( IRSB* bb, Addr64 new_R2_value )
   1082 {
   1083 #  if defined(VGP_ppc64_linux)
   1084    Addr64 bogus_RA  = (Addr64)&VG_(ppctoc_magic_redirect_return_stub);
   1085    Int    offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
   1086    Int    offB_LR   = offsetof(VexGuestPPC64State,guest_LR);
   1087    gen_PUSH( bb, IRExpr_Get(offB_LR,   Ity_I64) );
   1088    gen_PUSH( bb, IRExpr_Get(offB_GPR2, Ity_I64) );
   1089    addStmtToIRSB( bb, IRStmt_Put( offB_LR,   mkU64( bogus_RA )) );
   1090    addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, mkU64( new_R2_value )) );
   1091 
   1092 #  else
   1093 #    error Platform is not TOC-afflicted, fortunately
   1094 #  endif
   1095 }
   1096 
   1097 static void gen_pop_R2_LR_then_bLR ( IRSB* bb )
   1098 {
   1099 #  if defined(VGP_ppc64_linux)
   1100    Int    offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
   1101    Int    offB_LR   = offsetof(VexGuestPPC64State,guest_LR);
   1102    IRTemp old_R2    = newIRTemp( bb->tyenv, Ity_I64 );
   1103    IRTemp old_LR    = newIRTemp( bb->tyenv, Ity_I64 );
   1104    /* Restore R2 */
   1105    old_R2 = gen_POP( bb );
   1106    addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, IRExpr_RdTmp(old_R2)) );
   1107    /* Restore LR */
   1108    old_LR = gen_POP( bb );
   1109    addStmtToIRSB( bb, IRStmt_Put( offB_LR, IRExpr_RdTmp(old_LR)) );
   1110    /* Branch to LR */
   1111    /* re boring, we arrived here precisely because a wrapped fn did a
   1112       blr (hence Ijk_Ret); so we should just mark this jump as Boring,
   1113       else one _Call will have resulted in two _Rets. */
   1114    bb->jumpkind = Ijk_Boring;
   1115    bb->next = IRExpr_Binop(Iop_And64, IRExpr_RdTmp(old_LR), mkU64(~(3ULL)));
   1116 
   1117 #  else
   1118 #    error Platform is not TOC-afflicted, fortunately
   1119 #  endif
   1120 }
   1121 
   1122 static
   1123 Bool mk_preamble__ppctoc_magic_return_stub ( void* closureV, IRSB* bb )
   1124 {
   1125    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
   1126    /* Since we're creating the entire IRSB right here, give it a
   1127       proper IMark, as it won't get one any other way, and cachegrind
   1128       will barf if it doesn't have one (fair enough really). */
   1129    addStmtToIRSB( bb, IRStmt_IMark( closure->readdr, 4, 0 ) );
   1130    /* Generate the magic sequence:
   1131          pop R2 from hidden stack
   1132          pop LR from hidden stack
   1133          goto LR
   1134    */
   1135    gen_pop_R2_LR_then_bLR(bb);
   1136    return True; /* True == this is the entire BB; don't disassemble any
   1137                    real insns into it - just hand it directly to
   1138                    optimiser/instrumenter/backend. */
   1139 }
   1140 #endif
   1141 
   1142 /* --------------- END helpers for with-TOC platforms --------------- */
   1143 
   1144 
   1145 /* This is the IR preamble generator used for replacement
   1146    functions.  It adds code to set the guest_NRADDR{_GPR2} to zero
   1147    (technically not necessary, but facilitates detecting mixups in
   1148    which a replacement function has been erroneously declared using
   1149    VG_REPLACE_FUNCTION_Z{U,Z} when instead it should have been written
   1150    using VG_WRAP_FUNCTION_Z{U,Z}).
   1151 
   1152    On with-TOC platforms the follow hacks are also done: LR and R2 are
   1153    pushed onto a hidden stack, R2 is set to the correct value for the
   1154    replacement function, and LR is set to point at the magic
   1155    return-stub address.  Setting LR causes the return of the
   1156    wrapped/redirected function to lead to our magic return stub, which
   1157    restores LR and R2 from said stack and returns for real.
   1158 
   1159    VG_(get_StackTrace_wrk) understands that the LR value may point to
   1160    the return stub address, and that in that case it can get the real
   1161    LR value from the hidden stack instead. */
   1162 static
   1163 Bool mk_preamble__set_NRADDR_to_zero ( void* closureV, IRSB* bb )
   1164 {
   1165    Int nraddr_szB
   1166       = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
   1167    vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
   1168    vg_assert(nraddr_szB == VG_WORDSIZE);
   1169    addStmtToIRSB(
   1170       bb,
   1171       IRStmt_Put(
   1172          offsetof(VexGuestArchState,guest_NRADDR),
   1173          nraddr_szB == 8 ? mkU64(0) : mkU32(0)
   1174       )
   1175    );
   1176 #  if defined(VG_PLAT_USES_PPCTOC)
   1177    { VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
   1178      addStmtToIRSB(
   1179         bb,
   1180         IRStmt_Put(
   1181            offsetof(VexGuestArchState,guest_NRADDR_GPR2),
   1182            VG_WORDSIZE==8 ? mkU64(0) : mkU32(0)
   1183         )
   1184      );
   1185      gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( closure->readdr ) );
   1186    }
   1187 #  endif
   1188    return False;
   1189 }
   1190 
   1191 /* Ditto, except set guest_NRADDR to nraddr (the un-redirected guest
   1192    address).  This is needed for function wrapping - so the wrapper
   1193    can read _NRADDR and find the address of the function being
   1194    wrapped.  On toc-afflicted platforms we must also snarf r2. */
   1195 static
   1196 Bool mk_preamble__set_NRADDR_to_nraddr ( void* closureV, IRSB* bb )
   1197 {
   1198    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
   1199    Int nraddr_szB
   1200       = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
   1201    vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
   1202    vg_assert(nraddr_szB == VG_WORDSIZE);
   1203    addStmtToIRSB(
   1204       bb,
   1205       IRStmt_Put(
   1206          offsetof(VexGuestArchState,guest_NRADDR),
   1207          nraddr_szB == 8
   1208             ? IRExpr_Const(IRConst_U64( closure->nraddr ))
   1209             : IRExpr_Const(IRConst_U32( (UInt)closure->nraddr ))
   1210       )
   1211    );
   1212 #  if defined(VGP_ppc64_linux)
   1213    addStmtToIRSB(
   1214       bb,
   1215       IRStmt_Put(
   1216          offsetof(VexGuestArchState,guest_NRADDR_GPR2),
   1217          IRExpr_Get(offsetof(VexGuestArchState,guest_GPR2),
   1218                     VG_WORDSIZE==8 ? Ity_I64 : Ity_I32)
   1219       )
   1220    );
   1221    gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( closure->readdr ) );
   1222 #  endif
   1223    return False;
   1224 }
   1225 
   1226 /* --- Helpers to do with PPC related stack redzones. --- */
   1227 
   1228 __attribute__((unused))
   1229 static Bool const_True ( Addr64 guest_addr )
   1230 {
   1231    return True;
   1232 }
   1233 
   1234 /* --------------- main translation function --------------- */
   1235 
   1236 /* Note: see comments at top of m_redir.c for the Big Picture on how
   1237    redirections are managed. */
   1238 
   1239 typedef
   1240    enum {
   1241       /* normal translation, redir neither requested nor inhibited */
   1242       T_Normal,
   1243       /* redir translation, function-wrap (set _NRADDR) style */
   1244       T_Redir_Wrap,
   1245       /* redir translation, replacement (don't set _NRADDR) style */
   1246       T_Redir_Replace,
   1247       /* a translation in which redir is specifically disallowed */
   1248       T_NoRedir
   1249    }
   1250    T_Kind;
   1251 
   1252 /* Translate the basic block beginning at NRADDR, and add it to the
   1253    translation cache & translation table.  Unless
   1254    DEBUGGING_TRANSLATION is true, in which case the call is being done
   1255    for debugging purposes, so (a) throw away the translation once it
   1256    is made, and (b) produce a load of debugging output.  If
   1257    ALLOW_REDIRECTION is False, do not attempt redirection of NRADDR,
   1258    and also, put the resulting translation into the no-redirect tt/tc
   1259    instead of the normal one.
   1260 
   1261    TID is the identity of the thread requesting this translation.
   1262 */
   1263 
   1264 Bool VG_(translate) ( ThreadId tid,
   1265                       Addr64   nraddr,
   1266                       Bool     debugging_translation,
   1267                       Int      debugging_verbosity,
   1268                       ULong    bbs_done,
   1269                       Bool     allow_redirection )
   1270 {
   1271    Addr64             addr;
   1272    T_Kind             kind;
   1273    Int                tmpbuf_used, verbosity, i;
   1274    Bool (*preamble_fn)(void*,IRSB*);
   1275    VexArch            vex_arch;
   1276    VexArchInfo        vex_archinfo;
   1277    VexAbiInfo         vex_abiinfo;
   1278    VexGuestExtents    vge;
   1279    VexTranslateArgs   vta;
   1280    VexTranslateResult tres;
   1281    VgCallbackClosure  closure;
   1282 
   1283    /* Make sure Vex is initialised right. */
   1284 
   1285    static Bool vex_init_done = False;
   1286 
   1287    if (!vex_init_done) {
   1288       LibVEX_Init ( &failure_exit, &log_bytes,
   1289                     1,     /* debug_paranoia */
   1290                     False, /* valgrind support */
   1291                     &VG_(clo_vex_control) );
   1292       vex_init_done = True;
   1293    }
   1294 
   1295    /* Establish the translation kind and actual guest address to
   1296       start from.  Sets (addr,kind). */
   1297    if (allow_redirection) {
   1298       Bool isWrap;
   1299       Addr64 tmp = VG_(redir_do_lookup)( nraddr, &isWrap );
   1300       if (tmp == nraddr) {
   1301          /* no redirection found */
   1302          addr = nraddr;
   1303          kind = T_Normal;
   1304       } else {
   1305          /* found a redirect */
   1306          addr = tmp;
   1307          kind = isWrap ? T_Redir_Wrap : T_Redir_Replace;
   1308       }
   1309    } else {
   1310       addr = nraddr;
   1311       kind = T_NoRedir;
   1312    }
   1313 
   1314    /* Established: (nraddr, addr, kind) */
   1315 
   1316    /* Printing redirection info. */
   1317 
   1318    if ((kind == T_Redir_Wrap || kind == T_Redir_Replace)
   1319        && (VG_(clo_verbosity) >= 2 || VG_(clo_trace_redir))) {
   1320       Bool ok;
   1321       Char name1[64] = "";
   1322       Char name2[64] = "";
   1323       name1[0] = name2[0] = 0;
   1324       ok = VG_(get_fnname_w_offset)(nraddr, name1, 64);
   1325       if (!ok) VG_(strcpy)(name1, "???");
   1326       ok = VG_(get_fnname_w_offset)(addr, name2, 64);
   1327       if (!ok) VG_(strcpy)(name2, "???");
   1328       VG_(message)(Vg_DebugMsg,
   1329                    "REDIR: 0x%llx (%s) redirected to 0x%llx (%s)\n",
   1330                    nraddr, name1,
   1331                    addr, name2 );
   1332    }
   1333 
   1334    if (!debugging_translation)
   1335       VG_TRACK( pre_mem_read, Vg_CoreTranslate,
   1336                               tid, "(translator)", addr, 1 );
   1337 
   1338    /* If doing any code printing, print a basic block start marker */
   1339    if (VG_(clo_trace_flags) || debugging_translation) {
   1340       Char fnname[64] = "UNKNOWN_FUNCTION";
   1341       VG_(get_fnname_w_offset)(addr, fnname, 64);
   1342       const UChar* objname = "UNKNOWN_OBJECT";
   1343       OffT         objoff  = 0;
   1344       DebugInfo*   di      = VG_(find_DebugInfo)( addr );
   1345       if (di) {
   1346          objname = VG_(DebugInfo_get_filename)(di);
   1347          objoff  = addr - VG_(DebugInfo_get_text_bias)(di);
   1348       }
   1349       vg_assert(objname);
   1350       VG_(printf)(
   1351          "==== SB %d (exec'd %lld) [tid %d] 0x%llx %s %s+0x%llx\n",
   1352          VG_(get_bbs_translated)(), bbs_done, (Int)tid, addr,
   1353          fnname, objname, (ULong)objoff
   1354       );
   1355    }
   1356 
   1357    /* Are we allowed to translate here? */
   1358 
   1359    { /* BEGIN new scope specially for 'seg' */
   1360    NSegment const* seg = VG_(am_find_nsegment)(addr);
   1361 
   1362    if ( (!translations_allowable_from_seg(seg))
   1363         || addr == TRANSTAB_BOGUS_GUEST_ADDR ) {
   1364       if (VG_(clo_trace_signals))
   1365          VG_(message)(Vg_DebugMsg, "translations not allowed here (0x%llx)"
   1366                                    " - throwing SEGV\n", addr);
   1367       /* U R busted, sonny.  Place your hands on your head and step
   1368          away from the orig_addr. */
   1369       /* Code address is bad - deliver a signal instead */
   1370       if (seg != NULL) {
   1371          /* There's some kind of segment at the requested place, but we
   1372             aren't allowed to execute code here. */
   1373          if (debugging_translation)
   1374             VG_(printf)("translations not allowed here (segment not executable)"
   1375                         "(0x%llx)\n", addr);
   1376          else
   1377             VG_(synth_fault_perms)(tid, addr);
   1378       } else {
   1379         /* There is no segment at all; we are attempting to execute in
   1380            the middle of nowhere. */
   1381          if (debugging_translation)
   1382             VG_(printf)("translations not allowed here (no segment)"
   1383                         "(0x%llx)\n", addr);
   1384          else
   1385             VG_(synth_fault_mapping)(tid, addr);
   1386       }
   1387       return False;
   1388    }
   1389 
   1390    /* True if a debug trans., or if bit N set in VG_(clo_trace_codegen). */
   1391    verbosity = 0;
   1392    if (debugging_translation) {
   1393       verbosity = debugging_verbosity;
   1394    }
   1395    else
   1396    if ( (VG_(clo_trace_flags) > 0
   1397         && VG_(get_bbs_translated)() >= VG_(clo_trace_notbelow) )) {
   1398       verbosity = VG_(clo_trace_flags);
   1399    }
   1400 
   1401    /* Figure out which preamble-mangling callback to send. */
   1402    preamble_fn = NULL;
   1403    if (kind == T_Redir_Replace)
   1404       preamble_fn = mk_preamble__set_NRADDR_to_zero;
   1405    else
   1406    if (kind == T_Redir_Wrap)
   1407       preamble_fn = mk_preamble__set_NRADDR_to_nraddr;
   1408 
   1409 #  if defined(VG_PLAT_USES_PPCTOC)
   1410    if (ULong_to_Ptr(nraddr)
   1411        == (void*)&VG_(ppctoc_magic_redirect_return_stub)) {
   1412       /* If entering the special return stub, this means a wrapped or
   1413          redirected function is returning.  Make this translation one
   1414          which restores R2 and LR from the thread's hidden redir
   1415          stack, and branch to the (restored) link register, thereby
   1416          really causing the function to return. */
   1417       vg_assert(kind == T_Normal);
   1418       vg_assert(nraddr == addr);
   1419       preamble_fn = mk_preamble__ppctoc_magic_return_stub;
   1420    }
   1421 #  endif
   1422 
   1423    /* ------ Actually do the translation. ------ */
   1424    tl_assert2(VG_(tdict).tool_instrument,
   1425               "you forgot to set VgToolInterface function 'tool_instrument'");
   1426 
   1427    /* Get the CPU info established at startup. */
   1428    VG_(machine_get_VexArchInfo)( &vex_arch, &vex_archinfo );
   1429 
   1430    /* Set up 'abiinfo' structure with stuff Vex needs to know about
   1431       the guest and host ABIs. */
   1432 
   1433    LibVEX_default_VexAbiInfo( &vex_abiinfo );
   1434    vex_abiinfo.guest_stack_redzone_size = VG_STACK_REDZONE_SZB;
   1435 
   1436 #  if defined(VGP_amd64_linux)
   1437    vex_abiinfo.guest_amd64_assume_fs_is_zero  = True;
   1438 #  endif
   1439 #  if defined(VGP_amd64_darwin)
   1440    vex_abiinfo.guest_amd64_assume_gs_is_0x60  = True;
   1441 #  endif
   1442 #  if defined(VGP_ppc32_linux)
   1443    vex_abiinfo.guest_ppc_zap_RZ_at_blr        = False;
   1444    vex_abiinfo.guest_ppc_zap_RZ_at_bl         = NULL;
   1445    vex_abiinfo.host_ppc32_regalign_int64_args = True;
   1446 #  endif
   1447 #  if defined(VGP_ppc64_linux)
   1448    vex_abiinfo.guest_ppc_zap_RZ_at_blr        = True;
   1449    vex_abiinfo.guest_ppc_zap_RZ_at_bl         = const_True;
   1450    vex_abiinfo.host_ppc_calls_use_fndescrs    = True;
   1451 #  endif
   1452 
   1453    /* Set up closure args. */
   1454    closure.tid    = tid;
   1455    closure.nraddr = nraddr;
   1456    closure.readdr = addr;
   1457 
   1458    /* Set up args for LibVEX_Translate. */
   1459    vta.arch_guest       = vex_arch;
   1460    vta.archinfo_guest   = vex_archinfo;
   1461    vta.arch_host        = vex_arch;
   1462    vta.archinfo_host    = vex_archinfo;
   1463    vta.abiinfo_both     = vex_abiinfo;
   1464    vta.guest_bytes      = (UChar*)ULong_to_Ptr(addr);
   1465    vta.guest_bytes_addr = (Addr64)addr;
   1466    vta.callback_opaque  = (void*)&closure;
   1467    vta.chase_into_ok    = chase_into_ok;
   1468    vta.preamble_function = preamble_fn;
   1469    vta.guest_extents    = &vge;
   1470    vta.host_bytes       = tmpbuf;
   1471    vta.host_bytes_size  = N_TMPBUF;
   1472    vta.host_bytes_used  = &tmpbuf_used;
   1473    { /* At this point we have to reconcile Vex's view of the
   1474         instrumentation callback - which takes a void* first argument
   1475         - with Valgrind's view, in which the first arg is a
   1476         VgCallbackClosure*.  Hence the following longwinded casts.
   1477         They are entirely legal but longwinded so as to maximise the
   1478         chance of the C typechecker picking up any type snafus. */
   1479      IRSB*(*f)(VgCallbackClosure*,
   1480                IRSB*,VexGuestLayout*,VexGuestExtents*,
   1481                IRType,IRType)
   1482         = VG_(clo_vgdb) != Vg_VgdbNo
   1483              ? tool_instrument_then_gdbserver_if_needed
   1484              : VG_(tdict).tool_instrument;
   1485      IRSB*(*g)(void*,
   1486                IRSB*,VexGuestLayout*,VexGuestExtents*,
   1487                IRType,IRType)
   1488        = (IRSB*(*)(void*,IRSB*,VexGuestLayout*,VexGuestExtents*,IRType,IRType))f;
   1489      vta.instrument1    = g;
   1490    }
   1491    /* No need for type kludgery here. */
   1492    vta.instrument2      = need_to_handle_SP_assignment()
   1493                              ? vg_SP_update_pass
   1494                              : NULL;
   1495    vta.finaltidy        = VG_(needs).final_IR_tidy_pass
   1496                              ? VG_(tdict).tool_final_IR_tidy_pass
   1497                              : NULL;
   1498    vta.needs_self_check = needs_self_check;
   1499    vta.traceflags       = verbosity;
   1500 
   1501    /* Set up the dispatch-return info.  For archs without a link
   1502       register, vex generates a jump back to the specified dispatch
   1503       address.  Else, it just generates a branch-to-LR. */
   1504 
   1505 #  if defined(VGA_x86) || defined(VGA_amd64)
   1506    if (!allow_redirection) {
   1507       /* It's a no-redir translation.  Will be run with the
   1508          nonstandard dispatcher VG_(run_a_noredir_translation) and so
   1509          needs a nonstandard return point. */
   1510       vta.dispatch_assisted
   1511          = (void*) &VG_(run_a_noredir_translation__return_point);
   1512       vta.dispatch_unassisted
   1513          = vta.dispatch_assisted;
   1514    }
   1515    else
   1516    if (VG_(clo_profile_flags) > 0) {
   1517       /* normal translation; although we're profiling. */
   1518       vta.dispatch_assisted
   1519          = (void*) &VG_(run_innerloop__dispatch_assisted_profiled);
   1520       vta.dispatch_unassisted
   1521          = (void*) &VG_(run_innerloop__dispatch_unassisted_profiled);
   1522    }
   1523    else {
   1524       /* normal translation and we're not profiling (the normal case) */
   1525       vta.dispatch_assisted
   1526          = (void*) &VG_(run_innerloop__dispatch_assisted_unprofiled);
   1527       vta.dispatch_unassisted
   1528          = (void*) &VG_(run_innerloop__dispatch_unassisted_unprofiled);
   1529    }
   1530 
   1531 #  elif defined(VGA_ppc32) || defined(VGA_ppc64) \
   1532         || defined(VGA_arm) || defined(VGA_s390x)
   1533    /* See comment in libvex.h.  This target uses a
   1534       return-to-link-register scheme to get back to the dispatcher, so
   1535       both fields are NULL. */
   1536    vta.dispatch_assisted   = NULL;
   1537    vta.dispatch_unassisted = NULL;
   1538 
   1539 #  else
   1540 #    error "Unknown arch"
   1541 #  endif
   1542 
   1543    /* Sheesh.  Finally, actually _do_ the translation! */
   1544    tres = LibVEX_Translate ( &vta );
   1545 
   1546    vg_assert(tres.status == VexTransOK);
   1547    vg_assert(tres.n_sc_extents >= 0 && tres.n_sc_extents <= 3);
   1548    vg_assert(tmpbuf_used <= N_TMPBUF);
   1549    vg_assert(tmpbuf_used > 0);
   1550 
   1551    /* Tell aspacem of all segments that have had translations taken
   1552       from them.  Optimisation: don't re-look up vge.base[0] since seg
   1553       should already point to it. */
   1554 
   1555    vg_assert( vge.base[0] == (Addr64)addr );
   1556    /* set 'translations taken from this segment' flag */
   1557    VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( (NSegment*)seg );
   1558    } /* END new scope specially for 'seg' */
   1559 
   1560    for (i = 1; i < vge.n_used; i++) {
   1561       NSegment const* seg
   1562          = VG_(am_find_nsegment)( vge.base[i] );
   1563       /* set 'translations taken from this segment' flag */
   1564       VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( (NSegment*)seg );
   1565    }
   1566 
   1567    /* Copy data at trans_addr into the translation cache. */
   1568    vg_assert(tmpbuf_used > 0 && tmpbuf_used < 65536);
   1569 
   1570    // If debugging, don't do anything with the translated block;  we
   1571    // only did this for the debugging output produced along the way.
   1572    if (!debugging_translation) {
   1573 
   1574       if (kind != T_NoRedir) {
   1575           // Put it into the normal TT/TC structures.  This is the
   1576           // normal case.
   1577 
   1578           // Note that we use nraddr (the non-redirected address), not
   1579           // addr, which might have been changed by the redirection
   1580           VG_(add_to_transtab)( &vge,
   1581                                 nraddr,
   1582                                 (Addr)(&tmpbuf[0]),
   1583                                 tmpbuf_used,
   1584                                 tres.n_sc_extents > 0 );
   1585       } else {
   1586           VG_(add_to_unredir_transtab)( &vge,
   1587                                         nraddr,
   1588                                         (Addr)(&tmpbuf[0]),
   1589                                         tmpbuf_used );
   1590       }
   1591    }
   1592 
   1593    return True;
   1594 }
   1595 
   1596 /*--------------------------------------------------------------------*/
   1597 /*--- end                                                          ---*/
   1598 /*--------------------------------------------------------------------*/
   1599