Home | History | Annotate | Download | only in coregrind
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- Interface to LibVEX_Translate, and the SP-update pass        ---*/
      4 /*---                                                m_translate.c ---*/
      5 /*--------------------------------------------------------------------*/
      6 
      7 /*
      8    This file is part of Valgrind, a dynamic binary instrumentation
      9    framework.
     10 
     11    Copyright (C) 2000-2012 Julian Seward
     12       jseward (at) acm.org
     13 
     14    This program is free software; you can redistribute it and/or
     15    modify it under the terms of the GNU General Public License as
     16    published by the Free Software Foundation; either version 2 of the
     17    License, or (at your option) any later version.
     18 
     19    This program is distributed in the hope that it will be useful, but
     20    WITHOUT ANY WARRANTY; without even the implied warranty of
     21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     22    General Public License for more details.
     23 
     24    You should have received a copy of the GNU General Public License
     25    along with this program; if not, write to the Free Software
     26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     27    02111-1307, USA.
     28 
     29    The GNU General Public License is contained in the file COPYING.
     30 */
     31 
     32 #include "pub_core_basics.h"
     33 #include "pub_core_vki.h"
     34 #include "pub_core_aspacemgr.h"
     35 
     36 #include "pub_core_machine.h"    // VG_(fnptr_to_fnentry)
     37                                  // VG_(get_SP)
     38                                  // VG_(machine_get_VexArchInfo)
     39 #include "pub_core_libcbase.h"
     40 #include "pub_core_libcassert.h"
     41 #include "pub_core_libcprint.h"
     42 #include "pub_core_options.h"
     43 
     44 #include "pub_core_debuginfo.h"  // VG_(get_fnname_w_offset)
     45 #include "pub_core_redir.h"      // VG_(redir_do_lookup)
     46 
     47 #include "pub_core_signals.h"    // VG_(synth_fault_{perms,mapping}
     48 #include "pub_core_stacks.h"     // VG_(unknown_SP_update)()
     49 #include "pub_core_tooliface.h"  // VG_(tdict)
     50 
     51 #include "pub_core_translate.h"
     52 #include "pub_core_transtab.h"
     53 #include "pub_core_dispatch.h" // VG_(run_innerloop__dispatch_{un}profiled)
     54                                // VG_(run_a_noredir_translation__return_point)
     55 
     56 #include "pub_core_libcsetjmp.h"   // to keep _threadstate.h happy
     57 #include "pub_core_threadstate.h"  // VexGuestArchState
     58 #include "pub_core_trampoline.h"   // VG_(ppctoc_magic_redirect_return_stub)
     59 
     60 #include "pub_core_execontext.h"  // VG_(make_depth_1_ExeContext_from_Addr)
     61 
     62 #include "pub_core_gdbserver.h"   // VG_(tool_instrument_then_gdbserver_if_needed)
     63 
     64 /*------------------------------------------------------------*/
     65 /*--- Stats                                                ---*/
     66 /*------------------------------------------------------------*/
     67 
     68 static UInt n_SP_updates_fast            = 0;
     69 static UInt n_SP_updates_generic_known   = 0;
     70 static UInt n_SP_updates_generic_unknown = 0;
     71 
     72 void VG_(print_translation_stats) ( void )
     73 {
     74    Char buf[7];
     75    UInt n_SP_updates = n_SP_updates_fast + n_SP_updates_generic_known
     76                                          + n_SP_updates_generic_unknown;
     77    VG_(percentify)(n_SP_updates_fast, n_SP_updates, 1, 6, buf);
     78    VG_(message)(Vg_DebugMsg,
     79       "translate:            fast SP updates identified: %'u (%s)\n",
     80       n_SP_updates_fast, buf );
     81 
     82    VG_(percentify)(n_SP_updates_generic_known, n_SP_updates, 1, 6, buf);
     83    VG_(message)(Vg_DebugMsg,
     84       "translate:   generic_known SP updates identified: %'u (%s)\n",
     85       n_SP_updates_generic_known, buf );
     86 
     87    VG_(percentify)(n_SP_updates_generic_unknown, n_SP_updates, 1, 6, buf);
     88    VG_(message)(Vg_DebugMsg,
     89       "translate: generic_unknown SP updates identified: %'u (%s)\n",
     90       n_SP_updates_generic_unknown, buf );
     91 }
     92 
     93 /*------------------------------------------------------------*/
     94 /*--- %SP-update pass                                      ---*/
     95 /*------------------------------------------------------------*/
     96 
     97 static Bool need_to_handle_SP_assignment(void)
     98 {
     99    return ( VG_(tdict).track_new_mem_stack_4   ||
    100             VG_(tdict).track_die_mem_stack_4   ||
    101             VG_(tdict).track_new_mem_stack_8   ||
    102             VG_(tdict).track_die_mem_stack_8   ||
    103             VG_(tdict).track_new_mem_stack_12  ||
    104             VG_(tdict).track_die_mem_stack_12  ||
    105             VG_(tdict).track_new_mem_stack_16  ||
    106             VG_(tdict).track_die_mem_stack_16  ||
    107             VG_(tdict).track_new_mem_stack_32  ||
    108             VG_(tdict).track_die_mem_stack_32  ||
    109             VG_(tdict).track_new_mem_stack_112 ||
    110             VG_(tdict).track_die_mem_stack_112 ||
    111             VG_(tdict).track_new_mem_stack_128 ||
    112             VG_(tdict).track_die_mem_stack_128 ||
    113             VG_(tdict).track_new_mem_stack_144 ||
    114             VG_(tdict).track_die_mem_stack_144 ||
    115             VG_(tdict).track_new_mem_stack_160 ||
    116             VG_(tdict).track_die_mem_stack_160 ||
    117             VG_(tdict).track_new_mem_stack     ||
    118             VG_(tdict).track_die_mem_stack     );
    119 }
    120 
    121 // - The SP aliases are held in an array which is used as a circular buffer.
    122 //   This misses very few constant updates of SP (ie. < 0.1%) while using a
    123 //   small, constant structure that will also never fill up and cause
    124 //   execution to abort.
    125 // - Unused slots have a .temp value of 'IRTemp_INVALID'.
    126 // - 'next_SP_alias_slot' is the index where the next alias will be stored.
    127 // - If the buffer fills, we circle around and start over-writing
    128 //   non-IRTemp_INVALID values.  This is rare, and the overwriting of a
    129 //   value that would have subsequently be used is even rarer.
    130 // - Every slot below next_SP_alias_slot holds a non-IRTemp_INVALID value.
    131 //   The rest either all won't (if we haven't yet circled around) or all
    132 //   will (if we have circled around).
    133 
    134 typedef
    135    struct {
    136       IRTemp temp;
    137       Long   delta;
    138    }
    139    SP_Alias;
    140 
    141 // With 32 slots the buffer fills very rarely -- eg. once in a run of GCC.
    142 // And I've tested with smaller values and the wrap-around case works ok.
    143 #define N_ALIASES    32
    144 static SP_Alias SP_aliases[N_ALIASES];
    145 static Int      next_SP_alias_slot = 0;
    146 
    147 static void clear_SP_aliases(void)
    148 {
    149    Int i;
    150    for (i = 0; i < N_ALIASES; i++) {
    151       SP_aliases[i].temp  = IRTemp_INVALID;
    152       SP_aliases[i].delta = 0;
    153    }
    154    next_SP_alias_slot = 0;
    155 }
    156 
    157 static void add_SP_alias(IRTemp temp, Long delta)
    158 {
    159    vg_assert(temp != IRTemp_INVALID);
    160    SP_aliases[ next_SP_alias_slot ].temp  = temp;
    161    SP_aliases[ next_SP_alias_slot ].delta = delta;
    162    next_SP_alias_slot++;
    163    if (N_ALIASES == next_SP_alias_slot) next_SP_alias_slot = 0;
    164 }
    165 
    166 static Bool get_SP_delta(IRTemp temp, ULong* delta)
    167 {
    168    Int i;      // i must be signed!
    169    vg_assert(IRTemp_INVALID != temp);
    170    // Search backwards between current buffer position and the start.
    171    for (i = next_SP_alias_slot-1; i >= 0; i--) {
    172       if (temp == SP_aliases[i].temp) {
    173          *delta = SP_aliases[i].delta;
    174          return True;
    175       }
    176    }
    177    // Search backwards between the end and the current buffer position.
    178    for (i = N_ALIASES-1; i >= next_SP_alias_slot; i--) {
    179       if (temp == SP_aliases[i].temp) {
    180          *delta = SP_aliases[i].delta;
    181          return True;
    182       }
    183    }
    184    return False;
    185 }
    186 
    187 static void update_SP_aliases(Long delta)
    188 {
    189    Int i;
    190    for (i = 0; i < N_ALIASES; i++) {
    191       if (SP_aliases[i].temp == IRTemp_INVALID) {
    192          return;
    193       }
    194       SP_aliases[i].delta += delta;
    195    }
    196 }
    197 
    198 /* Given a guest IP, get an origin tag for a 1-element stack trace,
    199    and wrap it up in an IR atom that can be passed as the origin-tag
    200    value for a stack-adjustment helper function. */
    201 static IRExpr* mk_ecu_Expr ( Addr64 guest_IP )
    202 {
    203    UInt ecu;
    204    ExeContext* ec
    205       = VG_(make_depth_1_ExeContext_from_Addr)( (Addr)guest_IP );
    206    vg_assert(ec);
    207    ecu = VG_(get_ECU_from_ExeContext)( ec );
    208    vg_assert(VG_(is_plausible_ECU)(ecu));
    209    /* This is always safe to do, since ecu is only 32 bits, and
    210       HWord is 32 or 64. */
    211    return mkIRExpr_HWord( (HWord)ecu );
    212 }
    213 
    214 /* When gdbserver is activated, the translation of a block must
    215    first be done by the tool function, then followed by a pass
    216    which (if needed) instruments the code for gdbserver.
    217 */
    218 static
    219 IRSB* tool_instrument_then_gdbserver_if_needed ( VgCallbackClosure* closureV,
    220                                                  IRSB*              sb_in,
    221                                                  VexGuestLayout*    layout,
    222                                                  VexGuestExtents*   vge,
    223                                                  IRType             gWordTy,
    224                                                  IRType             hWordTy )
    225 {
    226    return VG_(instrument_for_gdbserver_if_needed)
    227       (VG_(tdict).tool_instrument (closureV,
    228                                    sb_in,
    229                                    layout,
    230                                    vge,
    231                                    gWordTy,
    232                                    hWordTy),
    233        layout,
    234        vge,
    235        gWordTy,
    236        hWordTy);
    237 }
    238 
    239 /* For tools that want to know about SP changes, this pass adds
    240    in the appropriate hooks.  We have to do it after the tool's
    241    instrumentation, so the tool doesn't have to worry about the C calls
    242    it adds in, and we must do it before register allocation because
    243    spilled temps make it much harder to work out the SP deltas.
    244    This it is done with Vex's "second instrumentation" pass.
    245 
    246    Basically, we look for GET(SP)/PUT(SP) pairs and track constant
    247    increments/decrements of SP between them.  (This requires tracking one or
    248    more "aliases", which are not exact aliases but instead are tempregs
    249    whose value is equal to the SP's plus or minus a known constant.)
    250    If all the changes to SP leading up to a PUT(SP) are by known, small
    251    constants, we can do a specific call to eg. new_mem_stack_4, otherwise
    252    we fall back to the case that handles an unknown SP change.
    253 
    254    There is some extra complexity to deal correctly with updates to
    255    only parts of SP.  Bizarre, but it has been known to happen.
    256 */
    257 static
    258 IRSB* vg_SP_update_pass ( void*             closureV,
    259                           IRSB*             sb_in,
    260                           VexGuestLayout*   layout,
    261                           VexGuestExtents*  vge,
    262                           IRType            gWordTy,
    263                           IRType            hWordTy )
    264 {
    265    Int         i, j, k, minoff_ST, maxoff_ST, sizeof_SP, offset_SP;
    266    Int         first_SP, last_SP, first_Put, last_Put;
    267    IRDirty     *dcall, *d;
    268    IRStmt*     st;
    269    IRExpr*     e;
    270    IRRegArray* descr;
    271    IRType      typeof_SP;
    272    Long        delta, con;
    273 
    274    /* Set up stuff for tracking the guest IP */
    275    Bool   curr_IP_known = False;
    276    Addr64 curr_IP       = 0;
    277 
    278    /* Set up BB */
    279    IRSB* bb     = emptyIRSB();
    280    bb->tyenv    = deepCopyIRTypeEnv(sb_in->tyenv);
    281    bb->next     = deepCopyIRExpr(sb_in->next);
    282    bb->jumpkind = sb_in->jumpkind;
    283    bb->offsIP   = sb_in->offsIP;
    284 
    285    delta = 0;
    286 
    287    sizeof_SP = layout->sizeof_SP;
    288    offset_SP = layout->offset_SP;
    289    typeof_SP = sizeof_SP==4 ? Ity_I32 : Ity_I64;
    290    vg_assert(sizeof_SP == 4 || sizeof_SP == 8);
    291 
    292    /* --- Start of #defines --- */
    293 
    294 #  define IS_ADD(op) (sizeof_SP==4 ? ((op)==Iop_Add32) : ((op)==Iop_Add64))
    295 #  define IS_SUB(op) (sizeof_SP==4 ? ((op)==Iop_Sub32) : ((op)==Iop_Sub64))
    296 
    297 #  define IS_ADD_OR_SUB(op) (IS_ADD(op) || IS_SUB(op))
    298 
    299 #  define GET_CONST(con)                                                \
    300        (sizeof_SP==4 ? (Long)(Int)(con->Ico.U32)                        \
    301                      : (Long)(con->Ico.U64))
    302 
    303 #  define DO_NEW(syze, tmpp)                                            \
    304       do {                                                              \
    305          Bool vanilla, w_ecu;                                           \
    306          vg_assert(curr_IP_known);                                      \
    307          vanilla = NULL != VG_(tdict).track_new_mem_stack_##syze;       \
    308          w_ecu   = NULL != VG_(tdict).track_new_mem_stack_##syze##_w_ECU; \
    309          vg_assert(!(vanilla && w_ecu)); /* can't have both */          \
    310          if (!(vanilla || w_ecu))                                       \
    311             goto generic;                                               \
    312                                                                         \
    313          /* I don't know if it's really necessary to say that the */    \
    314          /* call reads the stack pointer.  But anyway, we do. */        \
    315          if (w_ecu) {                                                   \
    316             dcall = unsafeIRDirty_0_N(                                  \
    317                        2/*regparms*/,                                   \
    318                        "track_new_mem_stack_" #syze "_w_ECU",           \
    319                        VG_(fnptr_to_fnentry)(                           \
    320                           VG_(tdict).track_new_mem_stack_##syze##_w_ECU ), \
    321                        mkIRExprVec_2(IRExpr_RdTmp(tmpp),                \
    322                                      mk_ecu_Expr(curr_IP))              \
    323                     );                                                  \
    324          } else {                                                       \
    325             dcall = unsafeIRDirty_0_N(                                  \
    326                        1/*regparms*/,                                   \
    327                        "track_new_mem_stack_" #syze ,                   \
    328                        VG_(fnptr_to_fnentry)(                           \
    329                           VG_(tdict).track_new_mem_stack_##syze ),      \
    330                        mkIRExprVec_1(IRExpr_RdTmp(tmpp))                \
    331                     );                                                  \
    332          }                                                              \
    333          dcall->nFxState = 1;                                           \
    334          dcall->fxState[0].fx     = Ifx_Read;                           \
    335          dcall->fxState[0].offset = layout->offset_SP;                  \
    336          dcall->fxState[0].size   = layout->sizeof_SP;                  \
    337          dcall->fxState[0].nRepeats  = 0;                               \
    338          dcall->fxState[0].repeatLen = 0;                               \
    339                                                                         \
    340          addStmtToIRSB( bb, IRStmt_Dirty(dcall) );                      \
    341                                                                         \
    342          tl_assert(syze > 0);                                           \
    343          update_SP_aliases(syze);                                       \
    344                                                                         \
    345          n_SP_updates_fast++;                                           \
    346                                                                         \
    347       } while (0)
    348 
    349 #  define DO_DIE(syze, tmpp)                                            \
    350       do {                                                              \
    351          if (!VG_(tdict).track_die_mem_stack_##syze)                    \
    352             goto generic;                                               \
    353                                                                         \
    354          /* I don't know if it's really necessary to say that the */    \
    355          /* call reads the stack pointer.  But anyway, we do. */        \
    356          dcall = unsafeIRDirty_0_N(                                     \
    357                     1/*regparms*/,                                      \
    358                     "track_die_mem_stack_" #syze,                       \
    359                     VG_(fnptr_to_fnentry)(                              \
    360                        VG_(tdict).track_die_mem_stack_##syze ),         \
    361                     mkIRExprVec_1(IRExpr_RdTmp(tmpp))                   \
    362                  );                                                     \
    363          dcall->nFxState = 1;                                           \
    364          dcall->fxState[0].fx     = Ifx_Read;                           \
    365          dcall->fxState[0].offset = layout->offset_SP;                  \
    366          dcall->fxState[0].size   = layout->sizeof_SP;                  \
    367          dcall->fxState[0].nRepeats  = 0;                               \
    368          dcall->fxState[0].repeatLen = 0;                               \
    369                                                                         \
    370          addStmtToIRSB( bb, IRStmt_Dirty(dcall) );                      \
    371                                                                         \
    372          tl_assert(syze > 0);                                           \
    373          update_SP_aliases(-(syze));                                    \
    374                                                                         \
    375          n_SP_updates_fast++;                                           \
    376                                                                         \
    377       } while (0)
    378 
    379    /* --- End of #defines --- */
    380 
    381    clear_SP_aliases();
    382 
    383    for (i = 0; i <  sb_in->stmts_used; i++) {
    384 
    385       st = sb_in->stmts[i];
    386 
    387       if (st->tag == Ist_IMark) {
    388          curr_IP_known = True;
    389          curr_IP       = st->Ist.IMark.addr;
    390       }
    391 
    392       /* t = Get(sp):   curr = t, delta = 0 */
    393       if (st->tag != Ist_WrTmp) goto case2;
    394       e = st->Ist.WrTmp.data;
    395       if (e->tag != Iex_Get)              goto case2;
    396       if (e->Iex.Get.offset != offset_SP) goto case2;
    397       if (e->Iex.Get.ty != typeof_SP)     goto case2;
    398       vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
    399       add_SP_alias(st->Ist.WrTmp.tmp, 0);
    400       addStmtToIRSB( bb, st );
    401       continue;
    402 
    403      case2:
    404       /* t' = curr +/- const:   curr = t',  delta +=/-= const */
    405       if (st->tag != Ist_WrTmp) goto case3;
    406       e = st->Ist.WrTmp.data;
    407       if (e->tag != Iex_Binop) goto case3;
    408       if (e->Iex.Binop.arg1->tag != Iex_RdTmp) goto case3;
    409       if (!get_SP_delta(e->Iex.Binop.arg1->Iex.RdTmp.tmp, &delta)) goto case3;
    410       if (e->Iex.Binop.arg2->tag != Iex_Const) goto case3;
    411       if (!IS_ADD_OR_SUB(e->Iex.Binop.op)) goto case3;
    412       con = GET_CONST(e->Iex.Binop.arg2->Iex.Const.con);
    413       vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
    414       if (IS_ADD(e->Iex.Binop.op)) {
    415          add_SP_alias(st->Ist.WrTmp.tmp, delta + con);
    416       } else {
    417          add_SP_alias(st->Ist.WrTmp.tmp, delta - con);
    418       }
    419       addStmtToIRSB( bb, st );
    420       continue;
    421 
    422      case3:
    423       /* t' = curr:   curr = t' */
    424       if (st->tag != Ist_WrTmp) goto case4;
    425       e = st->Ist.WrTmp.data;
    426       if (e->tag != Iex_RdTmp) goto case4;
    427       if (!get_SP_delta(e->Iex.RdTmp.tmp, &delta)) goto case4;
    428       vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
    429       add_SP_alias(st->Ist.WrTmp.tmp, delta);
    430       addStmtToIRSB( bb, st );
    431       continue;
    432 
    433      case4:
    434       /* Put(sp) = curr */
    435       /* More generally, we must correctly handle a Put which writes
    436          any part of SP, not just the case where all of SP is
    437          written. */
    438       if (st->tag != Ist_Put) goto case5;
    439       first_SP  = offset_SP;
    440       last_SP   = first_SP + sizeof_SP - 1;
    441       first_Put = st->Ist.Put.offset;
    442       last_Put  = first_Put
    443                   + sizeofIRType( typeOfIRExpr( bb->tyenv, st->Ist.Put.data ))
    444                   - 1;
    445       vg_assert(first_SP <= last_SP);
    446       vg_assert(first_Put <= last_Put);
    447 
    448       if (last_Put < first_SP || last_SP < first_Put)
    449          goto case5; /* no overlap */
    450 
    451       if (st->Ist.Put.data->tag == Iex_RdTmp
    452           && get_SP_delta(st->Ist.Put.data->Iex.RdTmp.tmp, &delta)) {
    453          IRTemp tttmp = st->Ist.Put.data->Iex.RdTmp.tmp;
    454          /* Why should the following assertion hold?  Because any
    455             alias added by put_SP_alias must be of a temporary which
    456             has the same type as typeof_SP, and whose value is a Get
    457             at exactly offset_SP of size typeof_SP.  Each call to
    458             put_SP_alias is immediately preceded by an assertion that
    459             we are putting in a binding for a correctly-typed
    460             temporary. */
    461          vg_assert( typeOfIRTemp(bb->tyenv, tttmp) == typeof_SP );
    462          /* From the same type-and-offset-correctness argument, if
    463             we found a useable alias, it must for an "exact" write of SP. */
    464          vg_assert(first_SP == first_Put);
    465          vg_assert(last_SP == last_Put);
    466          switch (delta) {
    467             case    0:                      addStmtToIRSB(bb,st); continue;
    468             case    4: DO_DIE(  4,  tttmp); addStmtToIRSB(bb,st); continue;
    469             case   -4: DO_NEW(  4,  tttmp); addStmtToIRSB(bb,st); continue;
    470             case    8: DO_DIE(  8,  tttmp); addStmtToIRSB(bb,st); continue;
    471             case   -8: DO_NEW(  8,  tttmp); addStmtToIRSB(bb,st); continue;
    472             case   12: DO_DIE(  12, tttmp); addStmtToIRSB(bb,st); continue;
    473             case  -12: DO_NEW(  12, tttmp); addStmtToIRSB(bb,st); continue;
    474             case   16: DO_DIE(  16, tttmp); addStmtToIRSB(bb,st); continue;
    475             case  -16: DO_NEW(  16, tttmp); addStmtToIRSB(bb,st); continue;
    476             case   32: DO_DIE(  32, tttmp); addStmtToIRSB(bb,st); continue;
    477             case  -32: DO_NEW(  32, tttmp); addStmtToIRSB(bb,st); continue;
    478             case  112: DO_DIE( 112, tttmp); addStmtToIRSB(bb,st); continue;
    479             case -112: DO_NEW( 112, tttmp); addStmtToIRSB(bb,st); continue;
    480             case  128: DO_DIE( 128, tttmp); addStmtToIRSB(bb,st); continue;
    481             case -128: DO_NEW( 128, tttmp); addStmtToIRSB(bb,st); continue;
    482             case  144: DO_DIE( 144, tttmp); addStmtToIRSB(bb,st); continue;
    483             case -144: DO_NEW( 144, tttmp); addStmtToIRSB(bb,st); continue;
    484             case  160: DO_DIE( 160, tttmp); addStmtToIRSB(bb,st); continue;
    485             case -160: DO_NEW( 160, tttmp); addStmtToIRSB(bb,st); continue;
    486             default:
    487                /* common values for ppc64: 144 128 160 112 176 */
    488                n_SP_updates_generic_known++;
    489                goto generic;
    490          }
    491       } else {
    492          /* Deal with an unknown update to SP.  We're here because
    493             either:
    494             (1) the Put does not exactly cover SP; it is a partial update.
    495                 Highly unlikely, but has been known to happen for 16-bit
    496                 Windows apps running on Wine, doing 16-bit adjustments to
    497                 %sp.
    498             (2) the Put does exactly cover SP, but we are unable to
    499                 determine how the value relates to the old SP.  In any
    500                 case, we cannot assume that the Put.data value is a tmp;
    501                 we must assume it can be anything allowed in flat IR (tmp
    502                 or const).
    503          */
    504          IRTemp  old_SP;
    505          n_SP_updates_generic_unknown++;
    506 
    507          // Nb: if all is well, this generic case will typically be
    508          // called something like every 1000th SP update.  If it's more than
    509          // that, the above code may be missing some cases.
    510         generic:
    511          /* Pass both the old and new SP values to this helper.  Also,
    512             pass an origin tag, even if it isn't needed. */
    513          old_SP = newIRTemp(bb->tyenv, typeof_SP);
    514          addStmtToIRSB(
    515             bb,
    516             IRStmt_WrTmp( old_SP, IRExpr_Get(offset_SP, typeof_SP) )
    517          );
    518 
    519          /* Now we know what the old value of SP is.  But knowing the new
    520             value is a bit tricky if there is a partial write. */
    521          if (first_Put == first_SP && last_Put == last_SP) {
    522            /* The common case, an exact write to SP.  So st->Ist.Put.data
    523               does hold the new value; simple. */
    524             vg_assert(curr_IP_known);
    525             dcall = unsafeIRDirty_0_N(
    526                        3/*regparms*/,
    527                        "VG_(unknown_SP_update)",
    528                        VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
    529                        mkIRExprVec_3( IRExpr_RdTmp(old_SP), st->Ist.Put.data,
    530                                       mk_ecu_Expr(curr_IP) )
    531                     );
    532             addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
    533             /* don't forget the original assignment */
    534             addStmtToIRSB( bb, st );
    535          } else {
    536             /* We have a partial update to SP.  We need to know what
    537                the new SP will be, and hand that to the helper call,
    538                but when the helper call happens, SP must hold the
    539                value it had before the update.  Tricky.
    540                Therefore use the following kludge:
    541                1. do the partial SP update (Put)
    542                2. Get the new SP value into a tmp, new_SP
    543                3. Put old_SP
    544                4. Call the helper
    545                5. Put new_SP
    546             */
    547             IRTemp new_SP;
    548             /* 1 */
    549             addStmtToIRSB( bb, st );
    550             /* 2 */
    551             new_SP = newIRTemp(bb->tyenv, typeof_SP);
    552             addStmtToIRSB(
    553                bb,
    554                IRStmt_WrTmp( new_SP, IRExpr_Get(offset_SP, typeof_SP) )
    555             );
    556             /* 3 */
    557             addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(old_SP) ));
    558             /* 4 */
    559             vg_assert(curr_IP_known);
    560             dcall = unsafeIRDirty_0_N(
    561                        3/*regparms*/,
    562                        "VG_(unknown_SP_update)",
    563                        VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
    564                        mkIRExprVec_3( IRExpr_RdTmp(old_SP),
    565                                       IRExpr_RdTmp(new_SP),
    566                                       mk_ecu_Expr(curr_IP) )
    567                     );
    568             addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
    569             /* 5 */
    570             addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(new_SP) ));
    571          }
    572 
    573          /* Forget what we already know. */
    574          clear_SP_aliases();
    575 
    576          /* If this is a Put of a tmp that exactly updates SP,
    577             start tracking aliases against this tmp. */
    578 
    579          if (first_Put == first_SP && last_Put == last_SP
    580              && st->Ist.Put.data->tag == Iex_RdTmp) {
    581             vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.Put.data->Iex.RdTmp.tmp)
    582                        == typeof_SP );
    583             add_SP_alias(st->Ist.Put.data->Iex.RdTmp.tmp, 0);
    584          }
    585          continue;
    586       }
    587 
    588      case5:
    589       /* PutI or Dirty call which overlaps SP: complain.  We can't
    590          deal with SP changing in weird ways (well, we can, but not at
    591          this time of night).  */
    592       if (st->tag == Ist_PutI) {
    593          descr = st->Ist.PutI.details->descr;
    594          minoff_ST = descr->base;
    595          maxoff_ST = descr->base
    596                      + descr->nElems * sizeofIRType(descr->elemTy) - 1;
    597          if (!(offset_SP > maxoff_ST
    598                || (offset_SP + sizeof_SP - 1) < minoff_ST))
    599             goto complain;
    600       }
    601       if (st->tag == Ist_Dirty) {
    602          d = st->Ist.Dirty.details;
    603          for (j = 0; j < d->nFxState; j++) {
    604             if (d->fxState[j].fx == Ifx_Read || d->fxState[j].fx == Ifx_None)
    605                continue;
    606             /* Enumerate the described state segments */
    607             for (k = 0; k < 1 + d->fxState[j].nRepeats; k++) {
    608                minoff_ST = d->fxState[j].offset + k * d->fxState[j].repeatLen;
    609                maxoff_ST = minoff_ST + d->fxState[j].size - 1;
    610                if (!(offset_SP > maxoff_ST
    611                      || (offset_SP + sizeof_SP - 1) < minoff_ST))
    612                   goto complain;
    613             }
    614          }
    615       }
    616 
    617       /* well, not interesting.  Just copy and keep going. */
    618       addStmtToIRSB( bb, st );
    619 
    620    } /* for (i = 0; i < sb_in->stmts_used; i++) */
    621 
    622    return bb;
    623 
    624   complain:
    625    VG_(core_panic)("vg_SP_update_pass: PutI or Dirty which overlaps SP");
    626 
    627 #undef IS_ADD
    628 #undef IS_SUB
    629 #undef IS_ADD_OR_SUB
    630 #undef GET_CONST
    631 #undef DO_NEW
    632 #undef DO_DIE
    633 }
    634 
    635 /*------------------------------------------------------------*/
    636 /*--- Main entry point for the JITter.                     ---*/
    637 /*------------------------------------------------------------*/
    638 
    639 /* Extra comments re self-checking translations and self-modifying
    640    code.  (JRS 14 Oct 05).
    641 
    642    There are 3 modes:
    643    (1) no checking: all code assumed to be not self-modifying
    644    (2) partial: known-problematic situations get a self-check
    645    (3) full checking: all translations get a self-check
    646 
    647    As currently implemented, the default is (2).  (3) is always safe,
    648    but very slow.  (1) works mostly, but fails for gcc nested-function
    649    code which uses trampolines on the stack; this situation is
    650    detected and handled by (2).
    651 
    652    ----------
    653 
    654    A more robust and transparent solution, which is not currently
    655    implemented, is a variant of (2): if a translation is made from an
    656    area which aspacem says does not have 'w' permission, then it can
    657    be non-self-checking.  Otherwise, it needs a self-check.
    658 
    659    This is complicated by Vex's basic-block chasing.  If a self-check
    660    is requested, then Vex will not chase over basic block boundaries
    661    (it's too complex).  However there is still a problem if it chases
    662    from a non-'w' area into a 'w' area.
    663 
    664    I think the right thing to do is:
    665 
    666    - if a translation request starts in a 'w' area, ask for a
    667      self-checking translation, and do not allow any chasing (make
    668      chase_into_ok return False).  Note that the latter is redundant
    669      in the sense that Vex won't chase anyway in this situation.
    670 
    671    - if a translation request starts in a non-'w' area, do not ask for
    672      a self-checking translation.  However, do not allow chasing (as
    673      determined by chase_into_ok) to go into a 'w' area.
    674 
    675    The result of this is that all code inside 'w' areas is self
    676    checking.
    677 
    678    To complete the trick, there is a caveat: we must watch the
    679    client's mprotect calls.  If pages are changed from non-'w' to 'w'
    680    then we should throw away all translations which intersect the
    681    affected area, so as to force them to be redone with self-checks.
    682 
    683    ----------
    684 
    685    The above outlines the conditions under which bb chasing is allowed
    686    from a self-modifying-code point of view.  There are other
    687    situations pertaining to function redirection in which it is
    688    necessary to disallow chasing, but those fall outside the scope of
    689    this comment.
    690 */
    691 
    692 
    693 /* Vex dumps the final code in here.  Then we can copy it off
    694    wherever we like. */
    695 /* 60000: should agree with assertion in VG_(add_to_transtab) in
    696    m_transtab.c. */
    697 #define N_TMPBUF 60000
    698 static UChar tmpbuf[N_TMPBUF];
    699 
    700 
    701 /* Function pointers we must supply to LibVEX in order that it
    702    can bomb out and emit messages under Valgrind's control. */
    703 __attribute__ ((noreturn))
    704 static
    705 void failure_exit ( void )
    706 {
    707    LibVEX_ShowAllocStats();
    708    VG_(core_panic)("LibVEX called failure_exit().");
    709 }
    710 
    711 static
    712 void log_bytes ( HChar* bytes, Int nbytes )
    713 {
    714   Int i;
    715   for (i = 0; i < nbytes-3; i += 4)
    716      VG_(printf)("%c%c%c%c", bytes[i], bytes[i+1], bytes[i+2], bytes[i+3]);
    717   for (; i < nbytes; i++)
    718      VG_(printf)("%c", bytes[i]);
    719 }
    720 
    721 
    722 /* --------- Various helper functions for translation --------- */
    723 
    724 /* Look for reasons to disallow making translations from the given
    725    segment. */
    726 
    727 static Bool translations_allowable_from_seg ( NSegment const* seg )
    728 {
    729 #  if defined(VGA_x86) || defined(VGA_s390x) || defined(VGA_mips32)
    730    Bool allowR = True;
    731 #  else
    732    Bool allowR = False;
    733 #  endif
    734    return seg != NULL
    735           && (seg->kind == SkAnonC || seg->kind == SkFileC || seg->kind == SkShmC)
    736           && (seg->hasX || (seg->hasR && allowR));
    737 }
    738 
    739 
    740 /* Produce a bitmask stating which of the supplied extents needs a
    741    self-check.  See documentation of
    742    VexTranslateArgs::needs_self_check for more details about the
    743    return convention. */
    744 
    745 static UInt needs_self_check ( void* closureV,
    746                                VexGuestExtents* vge )
    747 {
    748    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
    749    UInt i, bitset;
    750 
    751    vg_assert(vge->n_used >= 1 && vge->n_used <= 3);
    752    bitset = 0;
    753 
    754    for (i = 0; i < vge->n_used; i++) {
    755       Bool  check = False;
    756       Addr  addr  = (Addr)vge->base[i];
    757       SizeT len   = (SizeT)vge->len[i];
    758       NSegment const* segA = NULL;
    759 
    760 #     if defined(VGO_darwin)
    761       // GrP fixme hack - dyld i386 IMPORT gets rewritten.
    762       // To really do this correctly, we'd need to flush the
    763       // translation cache whenever a segment became +WX.
    764       segA = VG_(am_find_nsegment)(addr);
    765       if (segA && segA->hasX && segA->hasW)
    766          check = True;
    767 #     endif
    768 
    769       if (!check) {
    770          switch (VG_(clo_smc_check)) {
    771             case Vg_SmcNone:
    772                /* never check (except as per Darwin hack above) */
    773                break;
    774             case Vg_SmcAll:
    775                /* always check */
    776                check = True;
    777                break;
    778             case Vg_SmcStack: {
    779                /* check if the address is in the same segment as this
    780                   thread's stack pointer */
    781                Addr sp = VG_(get_SP)(closure->tid);
    782                if (!segA) {
    783                   segA = VG_(am_find_nsegment)(addr);
    784                }
    785                NSegment const* segSP = VG_(am_find_nsegment)(sp);
    786                if (segA && segSP && segA == segSP)
    787                   check = True;
    788                break;
    789             }
    790             case Vg_SmcAllNonFile: {
    791                /* check if any part of the extent is not in a
    792                   file-mapped segment */
    793                if (!segA) {
    794                   segA = VG_(am_find_nsegment)(addr);
    795                }
    796                if (segA && segA->kind == SkFileC && segA->start <= addr
    797                    && (len == 0 || addr + len <= segA->end + 1)) {
    798                   /* in a file-mapped segment; skip the check */
    799                } else {
    800                   check = True;
    801                }
    802                break;
    803             }
    804             default:
    805                vg_assert(0);
    806          }
    807       }
    808 
    809       if (check)
    810          bitset |= (1 << i);
    811    }
    812 
    813    return bitset;
    814 }
    815 
    816 
    817 /* This is a callback passed to LibVEX_Translate.  It stops Vex from
    818    chasing into function entry points that we wish to redirect.
    819    Chasing across them obviously defeats the redirect mechanism, with
    820    bad effects for Memcheck, Helgrind, DRD, Massif, and possibly others.
    821 */
    822 static Bool chase_into_ok ( void* closureV, Addr64 addr64 )
    823 {
    824    Addr               addr    = (Addr)addr64;
    825    NSegment const*    seg     = VG_(am_find_nsegment)(addr);
    826 
    827    /* Work through a list of possibilities why we might not want to
    828       allow a chase. */
    829 
    830    /* Destination not in a plausible segment? */
    831    if (!translations_allowable_from_seg(seg))
    832       goto dontchase;
    833 
    834    /* Destination is redirected? */
    835    if (addr != VG_(redir_do_lookup)(addr, NULL))
    836       goto dontchase;
    837 
    838 #  if defined(VG_PLAT_USES_PPCTOC)
    839    /* This needs to be at the start of its own block.  Don't chase. Re
    840       ULong_to_Ptr, be careful to ensure we only compare 32 bits on a
    841       32-bit target.*/
    842    if (ULong_to_Ptr(addr64)
    843        == (void*)&VG_(ppctoc_magic_redirect_return_stub))
    844       goto dontchase;
    845 #  endif
    846 
    847    /* overly conservative, but .. don't chase into the distinguished
    848       address that m_transtab uses as an empty-slot marker for
    849       VG_(tt_fast). */
    850    if (addr == TRANSTAB_BOGUS_GUEST_ADDR)
    851       goto dontchase;
    852 
    853 #  if defined(VGA_s390x)
    854    /* Never chase into an EX instruction. Generating IR for EX causes
    855       a round-trip through the scheduler including VG_(discard_translations).
    856       And that's expensive as shown by perf/tinycc.c:
    857       Chasing into EX increases the number of EX translations from 21 to
    858       102666 causing a 7x runtime increase for "none" and a 3.2x runtime
    859       increase for memcheck. */
    860    if (((UChar *)ULong_to_Ptr(addr))[0] == 0x44 ||   /* EX */
    861        ((UChar *)ULong_to_Ptr(addr))[0] == 0xC6)     /* EXRL */
    862      goto dontchase;
    863 #  endif
    864 
    865    /* well, ok then.  go on and chase. */
    866    return True;
    867 
    868    vg_assert(0);
    869    /*NOTREACHED*/
    870 
    871   dontchase:
    872    if (0) VG_(printf)("not chasing into 0x%lx\n", addr);
    873    return False;
    874 }
    875 
    876 
    877 /* --------------- helpers for with-TOC platforms --------------- */
    878 
    879 /* NOTE: with-TOC platforms are: ppc64-linux. */
    880 
    881 static IRExpr* mkU64 ( ULong n ) {
    882    return IRExpr_Const(IRConst_U64(n));
    883 }
    884 static IRExpr* mkU32 ( UInt n ) {
    885    return IRExpr_Const(IRConst_U32(n));
    886 }
    887 
    888 #if defined(VG_PLAT_USES_PPCTOC)
    889 static IRExpr* mkU8 ( UChar n ) {
    890    return IRExpr_Const(IRConst_U8(n));
    891 }
    892 static IRExpr* narrowTo32 ( IRTypeEnv* tyenv, IRExpr* e ) {
    893    if (typeOfIRExpr(tyenv, e) == Ity_I32) {
    894       return e;
    895    } else {
    896       vg_assert(typeOfIRExpr(tyenv, e) == Ity_I64);
    897       return IRExpr_Unop(Iop_64to32, e);
    898    }
    899 }
    900 
    901 /* Generate code to push word-typed expression 'e' onto this thread's
    902    redir stack, checking for stack overflow and generating code to
    903    bomb out if so. */
    904 
    905 static void gen_PUSH ( IRSB* bb, IRExpr* e )
    906 {
    907    IRRegArray* descr;
    908    IRTemp      t1;
    909    IRExpr*     one;
    910 
    911 #  if defined(VGP_ppc64_linux)
    912    Int    stack_size       = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
    913    Int    offB_REDIR_SP    = offsetof(VexGuestPPC64State,guest_REDIR_SP);
    914    Int    offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
    915    Int    offB_EMWARN      = offsetof(VexGuestPPC64State,guest_EMWARN);
    916    Int    offB_CIA         = offsetof(VexGuestPPC64State,guest_CIA);
    917    Bool   is64             = True;
    918    IRType ty_Word          = Ity_I64;
    919    IROp   op_CmpNE         = Iop_CmpNE64;
    920    IROp   op_Sar           = Iop_Sar64;
    921    IROp   op_Sub           = Iop_Sub64;
    922    IROp   op_Add           = Iop_Add64;
    923    IRExpr*(*mkU)(ULong)    = mkU64;
    924    vg_assert(VG_WORDSIZE == 8);
    925 #  else
    926    Int    stack_size       = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
    927    Int    offB_REDIR_SP    = offsetof(VexGuestPPC32State,guest_REDIR_SP);
    928    Int    offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
    929    Int    offB_EMWARN      = offsetof(VexGuestPPC32State,guest_EMWARN);
    930    Int    offB_CIA         = offsetof(VexGuestPPC32State,guest_CIA);
    931    Bool   is64             = False;
    932    IRType ty_Word          = Ity_I32;
    933    IROp   op_CmpNE         = Iop_CmpNE32;
    934    IROp   op_Sar           = Iop_Sar32;
    935    IROp   op_Sub           = Iop_Sub32;
    936    IROp   op_Add           = Iop_Add32;
    937    IRExpr*(*mkU)(UInt)     = mkU32;
    938    vg_assert(VG_WORDSIZE == 4);
    939 #  endif
    940 
    941    vg_assert(sizeof(void*) == VG_WORDSIZE);
    942    vg_assert(sizeof(Word)  == VG_WORDSIZE);
    943    vg_assert(sizeof(Addr)  == VG_WORDSIZE);
    944 
    945    descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
    946    t1    = newIRTemp( bb->tyenv, ty_Word );
    947    one   = mkU(1);
    948 
    949    vg_assert(typeOfIRExpr(bb->tyenv, e) == ty_Word);
    950 
    951    /* t1 = guest_REDIR_SP + 1 */
    952    addStmtToIRSB(
    953       bb,
    954       IRStmt_WrTmp(
    955          t1,
    956          IRExpr_Binop(op_Add, IRExpr_Get( offB_REDIR_SP, ty_Word ), one)
    957       )
    958    );
    959 
    960    /* Bomb out if t1 >=s stack_size, that is, (stack_size-1)-t1 <s 0.
    961       The destination (0) is a bit bogus but it doesn't matter since
    962       this is an unrecoverable error and will lead to Valgrind
    963       shutting down.  _EMWARN is set regardless - that's harmless
    964       since is only has a meaning if the exit is taken. */
    965    addStmtToIRSB(
    966       bb,
    967       IRStmt_Put(offB_EMWARN, mkU32(EmWarn_PPC64_redir_overflow))
    968    );
    969    addStmtToIRSB(
    970       bb,
    971       IRStmt_Exit(
    972          IRExpr_Binop(
    973             op_CmpNE,
    974             IRExpr_Binop(
    975                op_Sar,
    976                IRExpr_Binop(op_Sub,mkU(stack_size-1),IRExpr_RdTmp(t1)),
    977                mkU8(8 * VG_WORDSIZE - 1)
    978             ),
    979             mkU(0)
    980          ),
    981          Ijk_EmFail,
    982          is64 ? IRConst_U64(0) : IRConst_U32(0),
    983          offB_CIA
    984       )
    985    );
    986 
    987    /* guest_REDIR_SP = t1 */
    988    addStmtToIRSB(bb, IRStmt_Put(offB_REDIR_SP, IRExpr_RdTmp(t1)));
    989 
    990    /* guest_REDIR_STACK[t1+0] = e */
    991    /* PutI/GetI have I32-typed indexes regardless of guest word size */
    992    addStmtToIRSB(
    993       bb,
    994       IRStmt_PutI(mkIRPutI(descr,
    995                            narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0, e)));
    996 }
    997 
    998 
    999 /* Generate code to pop a word-sized value from this thread's redir
   1000    stack, binding it to a new temporary, which is returned.  As with
   1001    gen_PUSH, an overflow check is also performed. */
   1002 
   1003 static IRTemp gen_POP ( IRSB* bb )
   1004 {
   1005 #  if defined(VGP_ppc64_linux)
   1006    Int    stack_size       = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
   1007    Int    offB_REDIR_SP    = offsetof(VexGuestPPC64State,guest_REDIR_SP);
   1008    Int    offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
   1009    Int    offB_EMWARN      = offsetof(VexGuestPPC64State,guest_EMWARN);
   1010    Int    offB_CIA         = offsetof(VexGuestPPC64State,guest_CIA);
   1011    Bool   is64             = True;
   1012    IRType ty_Word          = Ity_I64;
   1013    IROp   op_CmpNE         = Iop_CmpNE64;
   1014    IROp   op_Sar           = Iop_Sar64;
   1015    IROp   op_Sub           = Iop_Sub64;
   1016    IRExpr*(*mkU)(ULong)    = mkU64;
   1017 #  else
   1018    Int    stack_size       = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
   1019    Int    offB_REDIR_SP    = offsetof(VexGuestPPC32State,guest_REDIR_SP);
   1020    Int    offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
   1021    Int    offB_EMWARN      = offsetof(VexGuestPPC32State,guest_EMWARN);
   1022    Int    offB_CIA         = offsetof(VexGuestPPC32State,guest_CIA);
   1023    Bool   is64             = False;
   1024    IRType ty_Word          = Ity_I32;
   1025    IROp   op_CmpNE         = Iop_CmpNE32;
   1026    IROp   op_Sar           = Iop_Sar32;
   1027    IROp   op_Sub           = Iop_Sub32;
   1028    IRExpr*(*mkU)(UInt)     = mkU32;
   1029 #  endif
   1030 
   1031    IRRegArray* descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
   1032    IRTemp      t1    = newIRTemp( bb->tyenv, ty_Word );
   1033    IRTemp      res   = newIRTemp( bb->tyenv, ty_Word );
   1034    IRExpr*     one   = mkU(1);
   1035 
   1036    vg_assert(sizeof(void*) == VG_WORDSIZE);
   1037    vg_assert(sizeof(Word)  == VG_WORDSIZE);
   1038    vg_assert(sizeof(Addr)  == VG_WORDSIZE);
   1039 
   1040    /* t1 = guest_REDIR_SP */
   1041    addStmtToIRSB(
   1042       bb,
   1043       IRStmt_WrTmp( t1, IRExpr_Get( offB_REDIR_SP, ty_Word ) )
   1044    );
   1045 
   1046    /* Bomb out if t1 < 0.  Same comments as gen_PUSH apply. */
   1047    addStmtToIRSB(
   1048       bb,
   1049       IRStmt_Put(offB_EMWARN, mkU32(EmWarn_PPC64_redir_underflow))
   1050    );
   1051    addStmtToIRSB(
   1052       bb,
   1053       IRStmt_Exit(
   1054          IRExpr_Binop(
   1055             op_CmpNE,
   1056             IRExpr_Binop(
   1057                op_Sar,
   1058                IRExpr_RdTmp(t1),
   1059                mkU8(8 * VG_WORDSIZE - 1)
   1060             ),
   1061             mkU(0)
   1062          ),
   1063          Ijk_EmFail,
   1064          is64 ? IRConst_U64(0) : IRConst_U32(0),
   1065          offB_CIA
   1066       )
   1067    );
   1068 
   1069    /* res = guest_REDIR_STACK[t1+0] */
   1070    /* PutI/GetI have I32-typed indexes regardless of guest word size */
   1071    addStmtToIRSB(
   1072       bb,
   1073       IRStmt_WrTmp(
   1074          res,
   1075          IRExpr_GetI(descr, narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0)
   1076       )
   1077    );
   1078 
   1079    /* guest_REDIR_SP = t1-1 */
   1080    addStmtToIRSB(
   1081       bb,
   1082       IRStmt_Put(offB_REDIR_SP, IRExpr_Binop(op_Sub, IRExpr_RdTmp(t1), one))
   1083    );
   1084 
   1085    return res;
   1086 }
   1087 
   1088 /* Generate code to push LR and R2 onto this thread's redir stack,
   1089    then set R2 to the new value (which is the TOC pointer to be used
   1090    for the duration of the replacement function, as determined by
   1091    m_debuginfo), and set LR to the magic return stub, so we get to
   1092    intercept the return and restore R2 and L2 to the values saved
   1093    here. */
   1094 
   1095 static void gen_push_and_set_LR_R2 ( IRSB* bb, Addr64 new_R2_value )
   1096 {
   1097 #  if defined(VGP_ppc64_linux)
   1098    Addr64 bogus_RA  = (Addr64)&VG_(ppctoc_magic_redirect_return_stub);
   1099    Int    offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
   1100    Int    offB_LR   = offsetof(VexGuestPPC64State,guest_LR);
   1101    gen_PUSH( bb, IRExpr_Get(offB_LR,   Ity_I64) );
   1102    gen_PUSH( bb, IRExpr_Get(offB_GPR2, Ity_I64) );
   1103    addStmtToIRSB( bb, IRStmt_Put( offB_LR,   mkU64( bogus_RA )) );
   1104    addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, mkU64( new_R2_value )) );
   1105 
   1106 #  else
   1107 #    error Platform is not TOC-afflicted, fortunately
   1108 #  endif
   1109 }
   1110 
   1111 static void gen_pop_R2_LR_then_bLR ( IRSB* bb )
   1112 {
   1113 #  if defined(VGP_ppc64_linux)
   1114    Int    offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
   1115    Int    offB_LR   = offsetof(VexGuestPPC64State,guest_LR);
   1116    Int    offB_CIA  = offsetof(VexGuestPPC64State,guest_CIA);
   1117    IRTemp old_R2    = newIRTemp( bb->tyenv, Ity_I64 );
   1118    IRTemp old_LR    = newIRTemp( bb->tyenv, Ity_I64 );
   1119    /* Restore R2 */
   1120    old_R2 = gen_POP( bb );
   1121    addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, IRExpr_RdTmp(old_R2)) );
   1122    /* Restore LR */
   1123    old_LR = gen_POP( bb );
   1124    addStmtToIRSB( bb, IRStmt_Put( offB_LR, IRExpr_RdTmp(old_LR)) );
   1125    /* Branch to LR */
   1126    /* re boring, we arrived here precisely because a wrapped fn did a
   1127       blr (hence Ijk_Ret); so we should just mark this jump as Boring,
   1128       else one _Call will have resulted in two _Rets. */
   1129    bb->jumpkind = Ijk_Boring;
   1130    bb->next     = IRExpr_Binop(Iop_And64, IRExpr_RdTmp(old_LR), mkU64(~(3ULL)));
   1131    bb->offsIP   = offB_CIA;
   1132 #  else
   1133 #    error Platform is not TOC-afflicted, fortunately
   1134 #  endif
   1135 }
   1136 
   1137 static
   1138 Bool mk_preamble__ppctoc_magic_return_stub ( void* closureV, IRSB* bb )
   1139 {
   1140    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
   1141    /* Since we're creating the entire IRSB right here, give it a
   1142       proper IMark, as it won't get one any other way, and cachegrind
   1143       will barf if it doesn't have one (fair enough really). */
   1144    addStmtToIRSB( bb, IRStmt_IMark( closure->readdr, 4, 0 ) );
   1145    /* Generate the magic sequence:
   1146          pop R2 from hidden stack
   1147          pop LR from hidden stack
   1148          goto LR
   1149    */
   1150    gen_pop_R2_LR_then_bLR(bb);
   1151    return True; /* True == this is the entire BB; don't disassemble any
   1152                    real insns into it - just hand it directly to
   1153                    optimiser/instrumenter/backend. */
   1154 }
   1155 #endif
   1156 
   1157 /* --------------- END helpers for with-TOC platforms --------------- */
   1158 
   1159 
   1160 /* This is the IR preamble generator used for replacement
   1161    functions.  It adds code to set the guest_NRADDR{_GPR2} to zero
   1162    (technically not necessary, but facilitates detecting mixups in
   1163    which a replacement function has been erroneously declared using
   1164    VG_REPLACE_FUNCTION_Z{U,Z} when instead it should have been written
   1165    using VG_WRAP_FUNCTION_Z{U,Z}).
   1166 
   1167    On with-TOC platforms the follow hacks are also done: LR and R2 are
   1168    pushed onto a hidden stack, R2 is set to the correct value for the
   1169    replacement function, and LR is set to point at the magic
   1170    return-stub address.  Setting LR causes the return of the
   1171    wrapped/redirected function to lead to our magic return stub, which
   1172    restores LR and R2 from said stack and returns for real.
   1173 
   1174    VG_(get_StackTrace_wrk) understands that the LR value may point to
   1175    the return stub address, and that in that case it can get the real
   1176    LR value from the hidden stack instead. */
   1177 static
   1178 Bool mk_preamble__set_NRADDR_to_zero ( void* closureV, IRSB* bb )
   1179 {
   1180    Int nraddr_szB
   1181       = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
   1182    vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
   1183    vg_assert(nraddr_szB == VG_WORDSIZE);
   1184    addStmtToIRSB(
   1185       bb,
   1186       IRStmt_Put(
   1187          offsetof(VexGuestArchState,guest_NRADDR),
   1188          nraddr_szB == 8 ? mkU64(0) : mkU32(0)
   1189       )
   1190    );
   1191 #  if defined(VGP_mips32_linux)
   1192    // t9 needs to be set to point to the start of the redirected function.
   1193    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
   1194    Int    offB_GPR25 = offsetof(VexGuestMIPS32State,guest_r25);
   1195    addStmtToIRSB( bb, IRStmt_Put( offB_GPR25, mkU32( closure->readdr )) );
   1196 #  endif
   1197 #  if defined(VG_PLAT_USES_PPCTOC)
   1198    { VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
   1199      addStmtToIRSB(
   1200         bb,
   1201         IRStmt_Put(
   1202            offsetof(VexGuestArchState,guest_NRADDR_GPR2),
   1203            VG_WORDSIZE==8 ? mkU64(0) : mkU32(0)
   1204         )
   1205      );
   1206      gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( closure->readdr ) );
   1207    }
   1208 #  endif
   1209    return False;
   1210 }
   1211 
   1212 /* Ditto, except set guest_NRADDR to nraddr (the un-redirected guest
   1213    address).  This is needed for function wrapping - so the wrapper
   1214    can read _NRADDR and find the address of the function being
   1215    wrapped.  On toc-afflicted platforms we must also snarf r2. */
   1216 static
   1217 Bool mk_preamble__set_NRADDR_to_nraddr ( void* closureV, IRSB* bb )
   1218 {
   1219    VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
   1220    Int nraddr_szB
   1221       = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
   1222    vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
   1223    vg_assert(nraddr_szB == VG_WORDSIZE);
   1224    addStmtToIRSB(
   1225       bb,
   1226       IRStmt_Put(
   1227          offsetof(VexGuestArchState,guest_NRADDR),
   1228          nraddr_szB == 8
   1229             ? IRExpr_Const(IRConst_U64( closure->nraddr ))
   1230             : IRExpr_Const(IRConst_U32( (UInt)closure->nraddr ))
   1231       )
   1232    );
   1233 #  if defined(VGP_mips32_linux)
   1234    // t9 needs to be set to point to the start of the redirected function.
   1235    Int    offB_GPR25 = offsetof(VexGuestMIPS32State,guest_r25);
   1236    addStmtToIRSB( bb, IRStmt_Put( offB_GPR25, mkU32( closure->readdr )) );
   1237 #  endif
   1238 #  if defined(VGP_ppc64_linux)
   1239    addStmtToIRSB(
   1240       bb,
   1241       IRStmt_Put(
   1242          offsetof(VexGuestArchState,guest_NRADDR_GPR2),
   1243          IRExpr_Get(offsetof(VexGuestArchState,guest_GPR2),
   1244                     VG_WORDSIZE==8 ? Ity_I64 : Ity_I32)
   1245       )
   1246    );
   1247    gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( closure->readdr ) );
   1248 #  endif
   1249    return False;
   1250 }
   1251 
   1252 /* --- Helpers to do with PPC related stack redzones. --- */
   1253 
   1254 __attribute__((unused))
   1255 static Bool const_True ( Addr64 guest_addr )
   1256 {
   1257    return True;
   1258 }
   1259 
   1260 /* --------------- main translation function --------------- */
   1261 
   1262 /* Note: see comments at top of m_redir.c for the Big Picture on how
   1263    redirections are managed. */
   1264 
   1265 typedef
   1266    enum {
   1267       /* normal translation, redir neither requested nor inhibited */
   1268       T_Normal,
   1269       /* redir translation, function-wrap (set _NRADDR) style */
   1270       T_Redir_Wrap,
   1271       /* redir translation, replacement (don't set _NRADDR) style */
   1272       T_Redir_Replace,
   1273       /* a translation in which redir is specifically disallowed */
   1274       T_NoRedir
   1275    }
   1276    T_Kind;
   1277 
   1278 /* Translate the basic block beginning at NRADDR, and add it to the
   1279    translation cache & translation table.  Unless
   1280    DEBUGGING_TRANSLATION is true, in which case the call is being done
   1281    for debugging purposes, so (a) throw away the translation once it
   1282    is made, and (b) produce a load of debugging output.  If
   1283    ALLOW_REDIRECTION is False, do not attempt redirection of NRADDR,
   1284    and also, put the resulting translation into the no-redirect tt/tc
   1285    instead of the normal one.
   1286 
   1287    TID is the identity of the thread requesting this translation.
   1288 */
   1289 
   1290 Bool VG_(translate) ( ThreadId tid,
   1291                       Addr64   nraddr,
   1292                       Bool     debugging_translation,
   1293                       Int      debugging_verbosity,
   1294                       ULong    bbs_done,
   1295                       Bool     allow_redirection )
   1296 {
   1297    Addr64             addr;
   1298    T_Kind             kind;
   1299    Int                tmpbuf_used, verbosity, i;
   1300    Bool (*preamble_fn)(void*,IRSB*);
   1301    VexArch            vex_arch;
   1302    VexArchInfo        vex_archinfo;
   1303    VexAbiInfo         vex_abiinfo;
   1304    VexGuestExtents    vge;
   1305    VexTranslateArgs   vta;
   1306    VexTranslateResult tres;
   1307    VgCallbackClosure  closure;
   1308 
   1309    /* Make sure Vex is initialised right. */
   1310 
   1311    static Bool vex_init_done = False;
   1312 
   1313    if (!vex_init_done) {
   1314       LibVEX_Init ( &failure_exit, &log_bytes,
   1315                     1,     /* debug_paranoia */
   1316                     False, /* valgrind support */
   1317                     &VG_(clo_vex_control) );
   1318       vex_init_done = True;
   1319    }
   1320 
   1321    /* Establish the translation kind and actual guest address to
   1322       start from.  Sets (addr,kind). */
   1323    if (allow_redirection) {
   1324       Bool isWrap;
   1325       Addr64 tmp = VG_(redir_do_lookup)( nraddr, &isWrap );
   1326       if (tmp == nraddr) {
   1327          /* no redirection found */
   1328          addr = nraddr;
   1329          kind = T_Normal;
   1330       } else {
   1331          /* found a redirect */
   1332          addr = tmp;
   1333          kind = isWrap ? T_Redir_Wrap : T_Redir_Replace;
   1334       }
   1335    } else {
   1336       addr = nraddr;
   1337       kind = T_NoRedir;
   1338    }
   1339 
   1340    /* Established: (nraddr, addr, kind) */
   1341 
   1342    /* Printing redirection info. */
   1343 
   1344    if ((kind == T_Redir_Wrap || kind == T_Redir_Replace)
   1345        && (VG_(clo_verbosity) >= 2 || VG_(clo_trace_redir))) {
   1346       Bool ok;
   1347       Char name1[512] = "";
   1348       Char name2[512] = "";
   1349       name1[0] = name2[0] = 0;
   1350       ok = VG_(get_fnname_w_offset)(nraddr, name1, 512);
   1351       if (!ok) VG_(strcpy)(name1, "???");
   1352       ok = VG_(get_fnname_w_offset)(addr, name2, 512);
   1353       if (!ok) VG_(strcpy)(name2, "???");
   1354       VG_(message)(Vg_DebugMsg,
   1355                    "REDIR: 0x%llx (%s) redirected to 0x%llx (%s)\n",
   1356                    nraddr, name1,
   1357                    addr, name2 );
   1358    }
   1359 
   1360    if (!debugging_translation)
   1361       VG_TRACK( pre_mem_read, Vg_CoreTranslate,
   1362                               tid, "(translator)", addr, 1 );
   1363 
   1364    /* If doing any code printing, print a basic block start marker */
   1365    if (VG_(clo_trace_flags) || debugging_translation) {
   1366       Char fnname[512] = "UNKNOWN_FUNCTION";
   1367       VG_(get_fnname_w_offset)(addr, fnname, 512);
   1368       const UChar* objname = "UNKNOWN_OBJECT";
   1369       OffT         objoff  = 0;
   1370       DebugInfo*   di      = VG_(find_DebugInfo)( addr );
   1371       if (di) {
   1372          objname = VG_(DebugInfo_get_filename)(di);
   1373          objoff  = addr - VG_(DebugInfo_get_text_bias)(di);
   1374       }
   1375       vg_assert(objname);
   1376       VG_(printf)(
   1377          "==== SB %d (evchecks %lld) [tid %d] 0x%llx %s %s+0x%llx\n",
   1378          VG_(get_bbs_translated)(), bbs_done, (Int)tid, addr,
   1379          fnname, objname, (ULong)objoff
   1380       );
   1381    }
   1382 
   1383    /* Are we allowed to translate here? */
   1384 
   1385    { /* BEGIN new scope specially for 'seg' */
   1386    NSegment const* seg = VG_(am_find_nsegment)(addr);
   1387 
   1388    if ( (!translations_allowable_from_seg(seg))
   1389         || addr == TRANSTAB_BOGUS_GUEST_ADDR ) {
   1390       if (VG_(clo_trace_signals))
   1391          VG_(message)(Vg_DebugMsg, "translations not allowed here (0x%llx)"
   1392                                    " - throwing SEGV\n", addr);
   1393       /* U R busted, sonny.  Place your hands on your head and step
   1394          away from the orig_addr. */
   1395       /* Code address is bad - deliver a signal instead */
   1396       if (seg != NULL) {
   1397          /* There's some kind of segment at the requested place, but we
   1398             aren't allowed to execute code here. */
   1399          if (debugging_translation)
   1400             VG_(printf)("translations not allowed here (segment not executable)"
   1401                         "(0x%llx)\n", addr);
   1402          else
   1403             VG_(synth_fault_perms)(tid, addr);
   1404       } else {
   1405         /* There is no segment at all; we are attempting to execute in
   1406            the middle of nowhere. */
   1407          if (debugging_translation)
   1408             VG_(printf)("translations not allowed here (no segment)"
   1409                         "(0x%llx)\n", addr);
   1410          else
   1411             VG_(synth_fault_mapping)(tid, addr);
   1412       }
   1413       return False;
   1414    }
   1415 
   1416    /* True if a debug trans., or if bit N set in VG_(clo_trace_codegen). */
   1417    verbosity = 0;
   1418    if (debugging_translation) {
   1419       verbosity = debugging_verbosity;
   1420    }
   1421    else
   1422    if ( (VG_(clo_trace_flags) > 0
   1423         && VG_(get_bbs_translated)() <= VG_(clo_trace_notabove)
   1424         && VG_(get_bbs_translated)() >= VG_(clo_trace_notbelow) )) {
   1425       verbosity = VG_(clo_trace_flags);
   1426    }
   1427 
   1428    /* Figure out which preamble-mangling callback to send. */
   1429    preamble_fn = NULL;
   1430    if (kind == T_Redir_Replace)
   1431       preamble_fn = mk_preamble__set_NRADDR_to_zero;
   1432    else
   1433    if (kind == T_Redir_Wrap)
   1434       preamble_fn = mk_preamble__set_NRADDR_to_nraddr;
   1435 
   1436 #  if defined(VG_PLAT_USES_PPCTOC)
   1437    if (ULong_to_Ptr(nraddr)
   1438        == (void*)&VG_(ppctoc_magic_redirect_return_stub)) {
   1439       /* If entering the special return stub, this means a wrapped or
   1440          redirected function is returning.  Make this translation one
   1441          which restores R2 and LR from the thread's hidden redir
   1442          stack, and branch to the (restored) link register, thereby
   1443          really causing the function to return. */
   1444       vg_assert(kind == T_Normal);
   1445       vg_assert(nraddr == addr);
   1446       preamble_fn = mk_preamble__ppctoc_magic_return_stub;
   1447    }
   1448 #  endif
   1449 
   1450    /* ------ Actually do the translation. ------ */
   1451    tl_assert2(VG_(tdict).tool_instrument,
   1452               "you forgot to set VgToolInterface function 'tool_instrument'");
   1453 
   1454    /* Get the CPU info established at startup. */
   1455    VG_(machine_get_VexArchInfo)( &vex_arch, &vex_archinfo );
   1456 
   1457    /* Set up 'abiinfo' structure with stuff Vex needs to know about
   1458       the guest and host ABIs. */
   1459 
   1460    LibVEX_default_VexAbiInfo( &vex_abiinfo );
   1461    vex_abiinfo.guest_stack_redzone_size = VG_STACK_REDZONE_SZB;
   1462 
   1463 #  if defined(VGP_amd64_linux)
   1464    vex_abiinfo.guest_amd64_assume_fs_is_zero  = True;
   1465 #  endif
   1466 #  if defined(VGP_amd64_darwin)
   1467    vex_abiinfo.guest_amd64_assume_gs_is_0x60  = True;
   1468 #  endif
   1469 #  if defined(VGP_ppc32_linux)
   1470    vex_abiinfo.guest_ppc_zap_RZ_at_blr        = False;
   1471    vex_abiinfo.guest_ppc_zap_RZ_at_bl         = NULL;
   1472    vex_abiinfo.host_ppc32_regalign_int64_args = True;
   1473 #  endif
   1474 #  if defined(VGP_ppc64_linux)
   1475    vex_abiinfo.guest_ppc_zap_RZ_at_blr        = True;
   1476    vex_abiinfo.guest_ppc_zap_RZ_at_bl         = const_True;
   1477    vex_abiinfo.host_ppc_calls_use_fndescrs    = True;
   1478 #  endif
   1479 
   1480    /* Set up closure args. */
   1481    closure.tid    = tid;
   1482    closure.nraddr = nraddr;
   1483    closure.readdr = addr;
   1484 
   1485    /* Set up args for LibVEX_Translate. */
   1486    vta.arch_guest       = vex_arch;
   1487    vta.archinfo_guest   = vex_archinfo;
   1488    vta.arch_host        = vex_arch;
   1489    vta.archinfo_host    = vex_archinfo;
   1490    vta.abiinfo_both     = vex_abiinfo;
   1491    vta.callback_opaque  = (void*)&closure;
   1492    vta.guest_bytes      = (UChar*)ULong_to_Ptr(addr);
   1493    vta.guest_bytes_addr = (Addr64)addr;
   1494    vta.chase_into_ok    = chase_into_ok;
   1495    vta.guest_extents    = &vge;
   1496    vta.host_bytes       = tmpbuf;
   1497    vta.host_bytes_size  = N_TMPBUF;
   1498    vta.host_bytes_used  = &tmpbuf_used;
   1499    { /* At this point we have to reconcile Vex's view of the
   1500         instrumentation callback - which takes a void* first argument
   1501         - with Valgrind's view, in which the first arg is a
   1502         VgCallbackClosure*.  Hence the following longwinded casts.
   1503         They are entirely legal but longwinded so as to maximise the
   1504         chance of the C typechecker picking up any type snafus. */
   1505      IRSB*(*f)(VgCallbackClosure*,
   1506                IRSB*,VexGuestLayout*,VexGuestExtents*,
   1507                IRType,IRType)
   1508         = VG_(clo_vgdb) != Vg_VgdbNo
   1509              ? tool_instrument_then_gdbserver_if_needed
   1510              : VG_(tdict).tool_instrument;
   1511      IRSB*(*g)(void*,
   1512                IRSB*,VexGuestLayout*,VexGuestExtents*,
   1513                IRType,IRType)
   1514        = (IRSB*(*)(void*,IRSB*,VexGuestLayout*,VexGuestExtents*,IRType,IRType))f;
   1515      vta.instrument1     = g;
   1516    }
   1517    /* No need for type kludgery here. */
   1518    vta.instrument2       = need_to_handle_SP_assignment()
   1519                               ? vg_SP_update_pass
   1520                               : NULL;
   1521    vta.finaltidy         = VG_(needs).final_IR_tidy_pass
   1522                               ? VG_(tdict).tool_final_IR_tidy_pass
   1523                               : NULL;
   1524    vta.needs_self_check  = needs_self_check;
   1525    vta.preamble_function = preamble_fn;
   1526    vta.traceflags        = verbosity;
   1527    vta.addProfInc        = VG_(clo_profile_flags) > 0
   1528                            && kind != T_NoRedir;
   1529 
   1530    /* Set up the dispatch continuation-point info.  If this is a
   1531       no-redir translation then it cannot be chained, and the chain-me
   1532       points are set to NULL to indicate that.  The indir point must
   1533       also be NULL, since we can't allow this translation to do an
   1534       indir transfer -- that would take it back into the main
   1535       translation cache too.
   1536 
   1537       All this is because no-redir translations live outside the main
   1538       translation cache (in a secondary one) and chaining them would
   1539       involve more adminstrative complexity that isn't worth the
   1540       hassle, because we don't expect them to get used often.  So
   1541       don't bother. */
   1542    if (allow_redirection) {
   1543       vta.disp_cp_chain_me_to_slowEP
   1544          = VG_(fnptr_to_fnentry)( &VG_(disp_cp_chain_me_to_slowEP) );
   1545       vta.disp_cp_chain_me_to_fastEP
   1546          = VG_(fnptr_to_fnentry)( &VG_(disp_cp_chain_me_to_fastEP) );
   1547       vta.disp_cp_xindir
   1548          = VG_(fnptr_to_fnentry)( &VG_(disp_cp_xindir) );
   1549    } else {
   1550       vta.disp_cp_chain_me_to_slowEP = NULL;
   1551       vta.disp_cp_chain_me_to_fastEP = NULL;
   1552       vta.disp_cp_xindir             = NULL;
   1553    }
   1554    /* This doesn't involve chaining and so is always allowable. */
   1555    vta.disp_cp_xassisted
   1556       = VG_(fnptr_to_fnentry)( &VG_(disp_cp_xassisted) );
   1557 
   1558    /* Sheesh.  Finally, actually _do_ the translation! */
   1559    tres = LibVEX_Translate ( &vta );
   1560 
   1561    vg_assert(tres.status == VexTransOK);
   1562    vg_assert(tres.n_sc_extents >= 0 && tres.n_sc_extents <= 3);
   1563    vg_assert(tmpbuf_used <= N_TMPBUF);
   1564    vg_assert(tmpbuf_used > 0);
   1565 
   1566    /* Tell aspacem of all segments that have had translations taken
   1567       from them.  Optimisation: don't re-look up vge.base[0] since seg
   1568       should already point to it. */
   1569 
   1570    vg_assert( vge.base[0] == (Addr64)addr );
   1571    /* set 'translations taken from this segment' flag */
   1572    VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( (NSegment*)seg );
   1573    } /* END new scope specially for 'seg' */
   1574 
   1575    for (i = 1; i < vge.n_used; i++) {
   1576       NSegment const* seg
   1577          = VG_(am_find_nsegment)( vge.base[i] );
   1578       /* set 'translations taken from this segment' flag */
   1579       VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( (NSegment*)seg );
   1580    }
   1581 
   1582    /* Copy data at trans_addr into the translation cache. */
   1583    vg_assert(tmpbuf_used > 0 && tmpbuf_used < 65536);
   1584 
   1585    // If debugging, don't do anything with the translated block;  we
   1586    // only did this for the debugging output produced along the way.
   1587    if (!debugging_translation) {
   1588 
   1589       if (kind != T_NoRedir) {
   1590           // Put it into the normal TT/TC structures.  This is the
   1591           // normal case.
   1592 
   1593           // Note that we use nraddr (the non-redirected address), not
   1594           // addr, which might have been changed by the redirection
   1595           VG_(add_to_transtab)( &vge,
   1596                                 nraddr,
   1597                                 (Addr)(&tmpbuf[0]),
   1598                                 tmpbuf_used,
   1599                                 tres.n_sc_extents > 0,
   1600                                 tres.offs_profInc,
   1601                                 tres.n_guest_instrs,
   1602                                 vex_arch );
   1603       } else {
   1604           vg_assert(tres.offs_profInc == -1); /* -1 == unset */
   1605           VG_(add_to_unredir_transtab)( &vge,
   1606                                         nraddr,
   1607                                         (Addr)(&tmpbuf[0]),
   1608                                         tmpbuf_used );
   1609       }
   1610    }
   1611 
   1612    return True;
   1613 }
   1614 
   1615 /*--------------------------------------------------------------------*/
   1616 /*--- end                                                          ---*/
   1617 /*--------------------------------------------------------------------*/
   1618