Home | History | Annotate | Download | only in priv
      1 
      2 /*---------------------------------------------------------------*/
      3 /*--- begin                               guest_arm_helpers.c ---*/
      4 /*---------------------------------------------------------------*/
      5 
      6 /*
      7    This file is part of Valgrind, a dynamic binary instrumentation
      8    framework.
      9 
     10    Copyright (C) 2004-2010 OpenWorks LLP
     11       info (at) open-works.net
     12 
     13    This program is free software; you can redistribute it and/or
     14    modify it under the terms of the GNU General Public License as
     15    published by the Free Software Foundation; either version 2 of the
     16    License, or (at your option) any later version.
     17 
     18    This program is distributed in the hope that it will be useful, but
     19    WITHOUT ANY WARRANTY; without even the implied warranty of
     20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     21    General Public License for more details.
     22 
     23    You should have received a copy of the GNU General Public License
     24    along with this program; if not, write to the Free Software
     25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
     26    02110-1301, USA.
     27 
     28    The GNU General Public License is contained in the file COPYING.
     29 */
     30 
     31 #include "libvex_basictypes.h"
     32 #include "libvex_emwarn.h"
     33 #include "libvex_guest_arm.h"
     34 #include "libvex_ir.h"
     35 #include "libvex.h"
     36 
     37 #include "main_util.h"
     38 #include "guest_generic_bb_to_IR.h"
     39 #include "guest_arm_defs.h"
     40 
     41 
     42 /* This file contains helper functions for arm guest code.  Calls to
     43    these functions are generated by the back end.  These calls are of
     44    course in the host machine code and this file will be compiled to
     45    host machine code, so that all makes sense.
     46 
     47    Only change the signatures of these helper functions very
     48    carefully.  If you change the signature here, you'll have to change
     49    the parameters passed to it in the IR calls constructed by
     50    guest-arm/toIR.c.
     51 */
     52 
     53 
     54 
     55 /* generalised left-shifter */
     56 static inline UInt lshift ( UInt x, Int n )
     57 {
     58    if (n >= 0)
     59       return x << n;
     60    else
     61       return x >> (-n);
     62 }
     63 
     64 
     65 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
     66 /* Calculate NZCV from the supplied thunk components, in the positions
     67    they appear in the CPSR, viz bits 31:28 for N Z C V respectively.
     68    Returned bits 27:0 are zero. */
     69 UInt armg_calculate_flags_nzcv ( UInt cc_op, UInt cc_dep1,
     70                                  UInt cc_dep2, UInt cc_dep3 )
     71 {
     72    switch (cc_op) {
     73       case ARMG_CC_OP_COPY:
     74          /* (nzcv, unused, unused) */
     75          return cc_dep1;
     76       case ARMG_CC_OP_ADD: {
     77          /* (argL, argR, unused) */
     78          UInt argL = cc_dep1;
     79          UInt argR = cc_dep2;
     80          UInt res  = argL + argR;
     81          UInt nf   = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
     82          UInt zf   = lshift( res == 0, ARMG_CC_SHIFT_Z );
     83          // CF and VF need verification
     84          UInt cf   = lshift( res < argL, ARMG_CC_SHIFT_C );
     85          UInt vf   = lshift( (res ^ argL) & (res ^ argR),
     86                              ARMG_CC_SHIFT_V + 1 - 32 )
     87                      & ARMG_CC_MASK_V;
     88          //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
     89          //           argL, argR, nf, zf, cf, vf);
     90          return nf | zf | cf | vf;
     91       }
     92       case ARMG_CC_OP_SUB: {
     93          /* (argL, argR, unused) */
     94          UInt argL = cc_dep1;
     95          UInt argR = cc_dep2;
     96          UInt res  = argL - argR;
     97          UInt nf   = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
     98          UInt zf   = lshift( res == 0, ARMG_CC_SHIFT_Z );
     99          // XXX cf is inverted relative to normal sense
    100          UInt cf   = lshift( argL >= argR, ARMG_CC_SHIFT_C );
    101          UInt vf   = lshift( (argL ^ argR) & (argL ^ res),
    102                              ARMG_CC_SHIFT_V + 1 - 32 )
    103                      & ARMG_CC_MASK_V;
    104          //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
    105          //           argL, argR, nf, zf, cf, vf);
    106          return nf | zf | cf | vf;
    107       }
    108       case ARMG_CC_OP_ADC: {
    109          /* (argL, argR, oldC) */
    110          UInt argL = cc_dep1;
    111          UInt argR = cc_dep2;
    112          UInt oldC = cc_dep3;
    113          UInt res  = (argL + argR) + oldC;
    114          UInt nf   = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
    115          UInt zf   = lshift( res == 0, ARMG_CC_SHIFT_Z );
    116          UInt cf   = oldC ? lshift( res <= argL, ARMG_CC_SHIFT_C )
    117                           : lshift( res <  argL, ARMG_CC_SHIFT_C );
    118          UInt vf   = lshift( (res ^ argL) & (res ^ argR),
    119                              ARMG_CC_SHIFT_V + 1 - 32 )
    120                      & ARMG_CC_MASK_V;
    121          //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
    122          //           argL, argR, nf, zf, cf, vf);
    123          return nf | zf | cf | vf;
    124       }
    125       case ARMG_CC_OP_SBB: {
    126          /* (argL, argR, oldC) */
    127          UInt argL = cc_dep1;
    128          UInt argR = cc_dep2;
    129          UInt oldC = cc_dep3;
    130          UInt res  = argL - argR - (oldC ^ 1);
    131          UInt nf   = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
    132          UInt zf   = lshift( res == 0, ARMG_CC_SHIFT_Z );
    133          UInt cf   = oldC ? lshift( argL >= argR, ARMG_CC_SHIFT_C )
    134                           : lshift( argL >  argR, ARMG_CC_SHIFT_C );
    135          UInt vf   = lshift( (argL ^ argR) & (argL ^ res),
    136                              ARMG_CC_SHIFT_V + 1 - 32 )
    137                      & ARMG_CC_MASK_V;
    138          //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
    139          //           argL, argR, nf, zf, cf, vf);
    140          return nf | zf | cf | vf;
    141       }
    142       case ARMG_CC_OP_LOGIC: {
    143          /* (res, shco, oldV) */
    144          UInt res  = cc_dep1;
    145          UInt shco = cc_dep2;
    146          UInt oldV = cc_dep3;
    147          UInt nf   = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
    148          UInt zf   = lshift( res == 0, ARMG_CC_SHIFT_Z );
    149          UInt cf   = lshift( shco & 1, ARMG_CC_SHIFT_C );
    150          UInt vf   = lshift( oldV & 1, ARMG_CC_SHIFT_V );
    151          return nf | zf | cf | vf;
    152       }
    153       case ARMG_CC_OP_MUL: {
    154          /* (res, unused, oldC:oldV) */
    155          UInt res  = cc_dep1;
    156          UInt oldC = (cc_dep3 >> 1) & 1;
    157          UInt oldV = (cc_dep3 >> 0) & 1;
    158          UInt nf   = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
    159          UInt zf   = lshift( res == 0, ARMG_CC_SHIFT_Z );
    160          UInt cf   = lshift( oldC & 1, ARMG_CC_SHIFT_C );
    161          UInt vf   = lshift( oldV & 1, ARMG_CC_SHIFT_V );
    162          return nf | zf | cf | vf;
    163       }
    164       case ARMG_CC_OP_MULL: {
    165          /* (resLo32, resHi32, oldC:oldV) */
    166          UInt resLo32 = cc_dep1;
    167          UInt resHi32 = cc_dep2;
    168          UInt oldC    = (cc_dep3 >> 1) & 1;
    169          UInt oldV    = (cc_dep3 >> 0) & 1;
    170          UInt nf      = lshift( resHi32 & (1<<31), ARMG_CC_SHIFT_N - 31 );
    171          UInt zf      = lshift( (resHi32|resLo32) == 0, ARMG_CC_SHIFT_Z );
    172          UInt cf      = lshift( oldC & 1, ARMG_CC_SHIFT_C );
    173          UInt vf      = lshift( oldV & 1, ARMG_CC_SHIFT_V );
    174          return nf | zf | cf | vf;
    175       }
    176       default:
    177          /* shouldn't really make these calls from generated code */
    178          vex_printf("armg_calculate_flags_nzcv"
    179                     "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
    180                     cc_op, cc_dep1, cc_dep2, cc_dep3 );
    181          vpanic("armg_calculate_flags_nzcv");
    182    }
    183 }
    184 
    185 
    186 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
    187 /* Calculate the C flag from the thunk components, in the lowest bit
    188    of the word (bit 0). */
    189 UInt armg_calculate_flag_c ( UInt cc_op, UInt cc_dep1,
    190                              UInt cc_dep2, UInt cc_dep3 )
    191 {
    192    UInt r = armg_calculate_flags_nzcv(cc_op, cc_dep1, cc_dep2, cc_dep3);
    193    return (r >> ARMG_CC_SHIFT_C) & 1;
    194 }
    195 
    196 
    197 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
    198 /* Calculate the V flag from the thunk components, in the lowest bit
    199    of the word (bit 0). */
    200 UInt armg_calculate_flag_v ( UInt cc_op, UInt cc_dep1,
    201                              UInt cc_dep2, UInt cc_dep3 )
    202 {
    203    UInt r = armg_calculate_flags_nzcv(cc_op, cc_dep1, cc_dep2, cc_dep3);
    204    return (r >> ARMG_CC_SHIFT_V) & 1;
    205 }
    206 
    207 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
    208 /* Calculate the QC flag from the arguments, in the lowest bit
    209    of the word (bit 0).  Urr, having this out of line is bizarre.
    210    Push back inline. */
    211 UInt armg_calculate_flag_qc ( UInt resL1, UInt resL2,
    212                               UInt resR1, UInt resR2 )
    213 {
    214    if (resL1 != resR1 || resL2 != resR2)
    215       return 1;
    216    else
    217       return 0;
    218 }
    219 
    220 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
    221 /* Calculate the specified condition from the thunk components, in the
    222    lowest bit of the word (bit 0). */
    223 extern
    224 UInt armg_calculate_condition ( UInt cond_n_op /* ARMCondcode << 4 | cc_op */,
    225                                 UInt cc_dep1,
    226                                 UInt cc_dep2, UInt cc_dep3 )
    227 {
    228    UInt cond  = cond_n_op >> 4;
    229    UInt cc_op = cond_n_op & 0xF;
    230    UInt nf, zf, vf, cf, nzcv, inv;
    231    //   vex_printf("XXXXXXXX %x %x %x %x\n",
    232    //              cond_n_op, cc_dep1, cc_dep2, cc_dep3);
    233 
    234    // skip flags computation in this case
    235    if (cond == ARMCondAL) return 1;
    236 
    237    inv  = cond & 1;
    238    nzcv = armg_calculate_flags_nzcv(cc_op, cc_dep1, cc_dep2, cc_dep3);
    239 
    240    switch (cond) {
    241       case ARMCondEQ:    // Z=1         => z
    242       case ARMCondNE:    // Z=0
    243          zf = nzcv >> ARMG_CC_SHIFT_Z;
    244          return 1 & (inv ^ zf);
    245 
    246       case ARMCondHS:    // C=1         => c
    247       case ARMCondLO:    // C=0
    248          cf = nzcv >> ARMG_CC_SHIFT_C;
    249          return 1 & (inv ^ cf);
    250 
    251       case ARMCondMI:    // N=1         => n
    252       case ARMCondPL:    // N=0
    253          nf = nzcv >> ARMG_CC_SHIFT_N;
    254          return 1 & (inv ^ nf);
    255 
    256       case ARMCondVS:    // V=1         => v
    257       case ARMCondVC:    // V=0
    258          vf = nzcv >> ARMG_CC_SHIFT_V;
    259          return 1 & (inv ^ vf);
    260 
    261       case ARMCondHI:    // C=1 && Z=0   => c & ~z
    262       case ARMCondLS:    // C=0 || Z=1
    263          cf = nzcv >> ARMG_CC_SHIFT_C;
    264          zf = nzcv >> ARMG_CC_SHIFT_Z;
    265          return 1 & (inv ^ (cf & ~zf));
    266 
    267       case ARMCondGE:    // N=V          => ~(n^v)
    268       case ARMCondLT:    // N!=V
    269          nf = nzcv >> ARMG_CC_SHIFT_N;
    270          vf = nzcv >> ARMG_CC_SHIFT_V;
    271          return 1 & (inv ^ ~(nf ^ vf));
    272 
    273       case ARMCondGT:    // Z=0 && N=V   => ~z & ~(n^v)  =>  ~(z | (n^v))
    274       case ARMCondLE:    // Z=1 || N!=V
    275          nf = nzcv >> ARMG_CC_SHIFT_N;
    276          vf = nzcv >> ARMG_CC_SHIFT_V;
    277          zf = nzcv >> ARMG_CC_SHIFT_Z;
    278          return 1 & (inv ^ ~(zf | (nf ^ vf)));
    279 
    280       case ARMCondAL: // handled above
    281       case ARMCondNV: // should never get here: Illegal instr
    282       default:
    283          /* shouldn't really make these calls from generated code */
    284          vex_printf("armg_calculate_condition(ARM)"
    285                     "( %u, %u, 0x%x, 0x%x, 0x%x )\n",
    286                     cond, cc_op, cc_dep1, cc_dep2, cc_dep3 );
    287          vpanic("armg_calculate_condition(ARM)");
    288    }
    289 }
    290 
    291 
    292 /*---------------------------------------------------------------*/
    293 /*--- Flag-helpers translation-time function specialisers.    ---*/
    294 /*--- These help iropt specialise calls the above run-time    ---*/
    295 /*--- flags functions.                                        ---*/
    296 /*---------------------------------------------------------------*/
    297 
    298 /* Used by the optimiser to try specialisations.  Returns an
    299    equivalent expression, or NULL if none. */
    300 
    301 static Bool isU32 ( IRExpr* e, UInt n )
    302 {
    303    return
    304       toBool( e->tag == Iex_Const
    305               && e->Iex.Const.con->tag == Ico_U32
    306               && e->Iex.Const.con->Ico.U32 == n );
    307 }
    308 
    309 IRExpr* guest_arm_spechelper ( HChar*   function_name,
    310                                IRExpr** args,
    311                                IRStmt** precedingStmts,
    312                                Int      n_precedingStmts )
    313 {
    314 #  define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
    315 #  define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
    316 #  define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
    317 #  define mkU8(_n)  IRExpr_Const(IRConst_U8(_n))
    318 
    319    Int i, arity = 0;
    320    for (i = 0; args[i]; i++)
    321       arity++;
    322 #  if 0
    323    vex_printf("spec request:\n");
    324    vex_printf("   %s  ", function_name);
    325    for (i = 0; i < arity; i++) {
    326       vex_printf("  ");
    327       ppIRExpr(args[i]);
    328    }
    329    vex_printf("\n");
    330 #  endif
    331 
    332    /* --------- specialising "armg_calculate_condition" --------- */
    333 
    334    if (vex_streq(function_name, "armg_calculate_condition")) {
    335       /* specialise calls to above "armg_calculate condition" function */
    336       IRExpr *cond_n_op, *cc_dep1, *cc_dep2, *cc_dep3;
    337       vassert(arity == 4);
    338       cond_n_op = args[0]; /* ARMCondcode << 4  |  ARMG_CC_OP_* */
    339       cc_dep1   = args[1];
    340       cc_dep2   = args[2];
    341       cc_dep3   = args[3];
    342 
    343       /*---------------- SUB ----------------*/
    344 
    345       if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_SUB)) {
    346          /* EQ after SUB --> test argL == argR */
    347          return unop(Iop_1Uto32,
    348                      binop(Iop_CmpEQ32, cc_dep1, cc_dep2));
    349       }
    350       if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_SUB)) {
    351          /* NE after SUB --> test argL != argR */
    352          return unop(Iop_1Uto32,
    353                      binop(Iop_CmpNE32, cc_dep1, cc_dep2));
    354       }
    355 
    356       if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_SUB)) {
    357          /* LE after SUB --> test argL <=s argR */
    358          return unop(Iop_1Uto32,
    359                      binop(Iop_CmpLE32S, cc_dep1, cc_dep2));
    360       }
    361 
    362       if (isU32(cond_n_op, (ARMCondLT << 4) | ARMG_CC_OP_SUB)) {
    363          /* LT after SUB --> test argL <s argR */
    364          return unop(Iop_1Uto32,
    365                      binop(Iop_CmpLT32S, cc_dep1, cc_dep2));
    366       }
    367 
    368       if (isU32(cond_n_op, (ARMCondGE << 4) | ARMG_CC_OP_SUB)) {
    369          /* GE after SUB --> test argL >=s argR
    370                          --> test argR <=s argL */
    371          return unop(Iop_1Uto32,
    372                      binop(Iop_CmpLE32S, cc_dep2, cc_dep1));
    373       }
    374 
    375       if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SUB)) {
    376          /* HS after SUB --> test argL >=u argR
    377                          --> test argR <=u argL */
    378          return unop(Iop_1Uto32,
    379                      binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
    380       }
    381 
    382       if (isU32(cond_n_op, (ARMCondLS << 4) | ARMG_CC_OP_SUB)) {
    383          /* LS after SUB --> test argL <=u argR */
    384          return unop(Iop_1Uto32,
    385                      binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
    386       }
    387 
    388       /*---------------- LOGIC ----------------*/
    389       if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_LOGIC)) {
    390          /* EQ after LOGIC --> test res == 0 */
    391          return unop(Iop_1Uto32,
    392                      binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
    393       }
    394       if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_LOGIC)) {
    395          /* NE after LOGIC --> test res != 0 */
    396          return unop(Iop_1Uto32,
    397                      binop(Iop_CmpNE32, cc_dep1, mkU32(0)));
    398       }
    399 
    400       /*----------------- AL -----------------*/
    401       /* A critically important case for Thumb code.
    402 
    403          What we're trying to spot is the case where cond_n_op is an
    404          expression of the form Or32(..., 0xE0) since that means the
    405          caller is asking for CondAL and we can simply return 1
    406          without caring what the ... part is.  This is a potentially
    407          dodgy kludge in that it assumes that the ... part has zeroes
    408          in bits 7:4, so that the result of the Or32 is guaranteed to
    409          be 0xE in bits 7:4.  Given that the places where this first
    410          arg are constructed (in guest_arm_toIR.c) are very
    411          constrained, we can get away with this.  To make this
    412          guaranteed safe would require to have a new primop, Slice44
    413          or some such, thusly
    414 
    415          Slice44(arg1, arg2) = 0--(24)--0 arg1[7:4] arg2[3:0]
    416 
    417          and we would then look for Slice44(0xE0, ...)
    418          which would give the required safety property.
    419 
    420          It would be infeasibly expensive to scan backwards through
    421          the entire block looking for an assignment to the temp, so
    422          just look at the previous 16 statements.  That should find it
    423          if it is an interesting case, as a result of how the
    424          boilerplate guff at the start of each Thumb insn translation
    425          is made.
    426       */
    427       if (cond_n_op->tag == Iex_RdTmp) {
    428          Int    j;
    429          IRTemp look_for = cond_n_op->Iex.RdTmp.tmp;
    430          Int    limit    = n_precedingStmts - 16;
    431          if (limit < 0) limit = 0;
    432          if (0) vex_printf("scanning %d .. %d\n", n_precedingStmts-1, limit);
    433          for (j = n_precedingStmts - 1; j >= limit; j--) {
    434             IRStmt* st = precedingStmts[j];
    435             if (st->tag == Ist_WrTmp
    436                 && st->Ist.WrTmp.tmp == look_for
    437                 && st->Ist.WrTmp.data->tag == Iex_Binop
    438                 && st->Ist.WrTmp.data->Iex.Binop.op == Iop_Or32
    439                 && isU32(st->Ist.WrTmp.data->Iex.Binop.arg2, (ARMCondAL << 4)))
    440                return mkU32(1);
    441          }
    442          /* Didn't find any useful binding to the first arg
    443             in the previous 16 stmts. */
    444       }
    445    }
    446 
    447 #  undef unop
    448 #  undef binop
    449 #  undef mkU32
    450 #  undef mkU8
    451 
    452    return NULL;
    453 }
    454 
    455 
    456 /*----------------------------------------------*/
    457 /*--- The exported fns ..                    ---*/
    458 /*----------------------------------------------*/
    459 
    460 /* VISIBLE TO LIBVEX CLIENT */
    461 #if 0
    462 void LibVEX_GuestARM_put_flags ( UInt flags_native,
    463                                  /*OUT*/VexGuestARMState* vex_state )
    464 {
    465    vassert(0); // FIXME
    466 
    467    /* Mask out everything except N Z V C. */
    468    flags_native
    469       &= (ARMG_CC_MASK_N | ARMG_CC_MASK_Z | ARMG_CC_MASK_V | ARMG_CC_MASK_C);
    470 
    471    vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
    472    vex_state->guest_CC_DEP1 = flags_native;
    473    vex_state->guest_CC_DEP2 = 0;
    474    vex_state->guest_CC_NDEP = 0;
    475 }
    476 #endif
    477 
    478 /* VISIBLE TO LIBVEX CLIENT */
    479 UInt LibVEX_GuestARM_get_cpsr ( /*IN*/VexGuestARMState* vex_state )
    480 {
    481    UInt cpsr = 0;
    482    // NZCV
    483    cpsr |= armg_calculate_flags_nzcv(
    484                vex_state->guest_CC_OP,
    485                vex_state->guest_CC_DEP1,
    486                vex_state->guest_CC_DEP2,
    487                vex_state->guest_CC_NDEP
    488             );
    489    vassert(0 == (cpsr & 0x0FFFFFFF));
    490    // Q
    491    if (vex_state->guest_QFLAG32 > 0)
    492       cpsr |= (1 << 27);
    493    // GE
    494    if (vex_state->guest_GEFLAG0 > 0)
    495       cpsr |= (1 << 16);
    496    if (vex_state->guest_GEFLAG1 > 0)
    497       cpsr |= (1 << 17);
    498    if (vex_state->guest_GEFLAG2 > 0)
    499       cpsr |= (1 << 18);
    500    if (vex_state->guest_GEFLAG3 > 0)
    501       cpsr |= (1 << 19);
    502    // M
    503    cpsr |= (1 << 4); // 0b10000 means user-mode
    504    // J,T   J (bit 24) is zero by initialisation above
    505    // T  we copy from R15T[0]
    506    if (vex_state->guest_R15T & 1)
    507       cpsr |= (1 << 5);
    508    // ITSTATE we punt on for the time being.  Could compute it
    509    // if needed though.
    510    // E, endianness, 0 (littleendian) from initialisation above
    511    // A,I,F disable some async exceptions.  Not sure about these.
    512    // Leave as zero for the time being.
    513    return cpsr;
    514 }
    515 
    516 /* VISIBLE TO LIBVEX CLIENT */
    517 void LibVEX_GuestARM_initialise ( /*OUT*/VexGuestARMState* vex_state )
    518 {
    519    vex_state->guest_R0  = 0;
    520    vex_state->guest_R1  = 0;
    521    vex_state->guest_R2  = 0;
    522    vex_state->guest_R3  = 0;
    523    vex_state->guest_R4  = 0;
    524    vex_state->guest_R5  = 0;
    525    vex_state->guest_R6  = 0;
    526    vex_state->guest_R7  = 0;
    527    vex_state->guest_R8  = 0;
    528    vex_state->guest_R9  = 0;
    529    vex_state->guest_R10 = 0;
    530    vex_state->guest_R11 = 0;
    531    vex_state->guest_R12 = 0;
    532    vex_state->guest_R13 = 0;
    533    vex_state->guest_R14 = 0;
    534    vex_state->guest_R15T = 0;  /* NB: implies ARM mode */
    535 
    536    vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
    537    vex_state->guest_CC_DEP1 = 0;
    538    vex_state->guest_CC_DEP2 = 0;
    539    vex_state->guest_CC_NDEP = 0;
    540    vex_state->guest_QFLAG32 = 0;
    541    vex_state->guest_GEFLAG0 = 0;
    542    vex_state->guest_GEFLAG1 = 0;
    543    vex_state->guest_GEFLAG2 = 0;
    544    vex_state->guest_GEFLAG3 = 0;
    545 
    546    vex_state->guest_EMWARN  = 0;
    547    vex_state->guest_TISTART = 0;
    548    vex_state->guest_TILEN   = 0;
    549    vex_state->guest_NRADDR  = 0;
    550    vex_state->guest_IP_AT_SYSCALL = 0;
    551 
    552    vex_state->guest_D0  = 0;
    553    vex_state->guest_D1  = 0;
    554    vex_state->guest_D2  = 0;
    555    vex_state->guest_D3  = 0;
    556    vex_state->guest_D4  = 0;
    557    vex_state->guest_D5  = 0;
    558    vex_state->guest_D6  = 0;
    559    vex_state->guest_D7  = 0;
    560    vex_state->guest_D8  = 0;
    561    vex_state->guest_D9  = 0;
    562    vex_state->guest_D10 = 0;
    563    vex_state->guest_D11 = 0;
    564    vex_state->guest_D12 = 0;
    565    vex_state->guest_D13 = 0;
    566    vex_state->guest_D14 = 0;
    567    vex_state->guest_D15 = 0;
    568    vex_state->guest_D16 = 0;
    569    vex_state->guest_D17 = 0;
    570    vex_state->guest_D18 = 0;
    571    vex_state->guest_D19 = 0;
    572    vex_state->guest_D20 = 0;
    573    vex_state->guest_D21 = 0;
    574    vex_state->guest_D22 = 0;
    575    vex_state->guest_D23 = 0;
    576    vex_state->guest_D24 = 0;
    577    vex_state->guest_D25 = 0;
    578    vex_state->guest_D26 = 0;
    579    vex_state->guest_D27 = 0;
    580    vex_state->guest_D28 = 0;
    581    vex_state->guest_D29 = 0;
    582    vex_state->guest_D30 = 0;
    583    vex_state->guest_D31 = 0;
    584 
    585    /* ARM encoded; zero is the default as it happens (result flags
    586       (NZCV) cleared, FZ disabled, round to nearest, non-vector mode,
    587       all exns masked, all exn sticky bits cleared). */
    588    vex_state->guest_FPSCR = 0;
    589 
    590    vex_state->guest_TPIDRURO = 0;
    591 
    592    /* Not in a Thumb IT block. */
    593    vex_state->guest_ITSTATE = 0;
    594 
    595    vex_state->padding1 = 0;
    596    vex_state->padding2 = 0;
    597    vex_state->padding3 = 0;
    598 }
    599 
    600 
    601 /*-----------------------------------------------------------*/
    602 /*--- Describing the arm guest state, for the benefit     ---*/
    603 /*--- of iropt and instrumenters.                         ---*/
    604 /*-----------------------------------------------------------*/
    605 
    606 /* Figure out if any part of the guest state contained in minoff
    607    .. maxoff requires precise memory exceptions.  If in doubt return
    608    True (but this is generates significantly slower code).
    609 
    610    We enforce precise exns for guest R13(sp), R15T(pc).
    611 */
    612 Bool guest_arm_state_requires_precise_mem_exns ( Int minoff,
    613                                                  Int maxoff)
    614 {
    615    Int sp_min = offsetof(VexGuestARMState, guest_R13);
    616    Int sp_max = sp_min + 4 - 1;
    617    Int pc_min = offsetof(VexGuestARMState, guest_R15T);
    618    Int pc_max = pc_min + 4 - 1;
    619 
    620    if (maxoff < sp_min || minoff > sp_max) {
    621       /* no overlap with sp */
    622    } else {
    623       return True;
    624    }
    625 
    626    if (maxoff < pc_min || minoff > pc_max) {
    627       /* no overlap with pc */
    628    } else {
    629       return True;
    630    }
    631 
    632    /* We appear to need precise updates of R11 in order to get proper
    633       stacktraces from non-optimised code. */
    634    Int r11_min = offsetof(VexGuestARMState, guest_R11);
    635    Int r11_max = r11_min + 4 - 1;
    636 
    637    if (maxoff < r11_min || minoff > r11_max) {
    638       /* no overlap with r11 */
    639    } else {
    640       return True;
    641    }
    642 
    643    /* Ditto R7, particularly needed for proper stacktraces in Thumb
    644       code. */
    645    Int r7_min = offsetof(VexGuestARMState, guest_R7);
    646    Int r7_max = r7_min + 4 - 1;
    647 
    648    if (maxoff < r7_min || minoff > r7_max) {
    649       /* no overlap with r7 */
    650    } else {
    651       return True;
    652    }
    653 
    654    return False;
    655 }
    656 
    657 
    658 
    659 #define ALWAYSDEFD(field)                           \
    660     { offsetof(VexGuestARMState, field),            \
    661       (sizeof ((VexGuestARMState*)0)->field) }
    662 
    663 VexGuestLayout
    664    armGuest_layout
    665       = {
    666           /* Total size of the guest state, in bytes. */
    667           .total_sizeB = sizeof(VexGuestARMState),
    668 
    669           /* Describe the stack pointer. */
    670           .offset_SP = offsetof(VexGuestARMState,guest_R13),
    671           .sizeof_SP = 4,
    672 
    673           /* Describe the instruction pointer. */
    674           .offset_IP = offsetof(VexGuestARMState,guest_R15T),
    675           .sizeof_IP = 4,
    676 
    677           /* Describe any sections to be regarded by Memcheck as
    678              'always-defined'. */
    679           .n_alwaysDefd = 10,
    680 
    681           /* flags thunk: OP is always defd, whereas DEP1 and DEP2
    682              have to be tracked.  See detailed comment in gdefs.h on
    683              meaning of thunk fields. */
    684           .alwaysDefd
    685              = { /* 0 */ ALWAYSDEFD(guest_R15T),
    686                  /* 1 */ ALWAYSDEFD(guest_CC_OP),
    687                  /* 2 */ ALWAYSDEFD(guest_CC_NDEP),
    688                  /* 3 */ ALWAYSDEFD(guest_EMWARN),
    689                  /* 4 */ ALWAYSDEFD(guest_TISTART),
    690                  /* 5 */ ALWAYSDEFD(guest_TILEN),
    691                  /* 6 */ ALWAYSDEFD(guest_NRADDR),
    692                  /* 7 */ ALWAYSDEFD(guest_IP_AT_SYSCALL),
    693                  /* 8 */ ALWAYSDEFD(guest_TPIDRURO),
    694                  /* 9 */ ALWAYSDEFD(guest_ITSTATE)
    695                }
    696         };
    697 
    698 
    699 /*---------------------------------------------------------------*/
    700 /*--- end                                 guest_arm_helpers.c ---*/
    701 /*---------------------------------------------------------------*/
    702