Home | History | Annotate | Download | only in priv
      1 
      2 /*---------------------------------------------------------------*/
      3 /*--- begin                               guest_arm_helpers.c ---*/
      4 /*---------------------------------------------------------------*/
      5 
      6 /*
      7    This file is part of Valgrind, a dynamic binary instrumentation
      8    framework.
      9 
     10    Copyright (C) 2004-2012 OpenWorks LLP
     11       info (at) open-works.net
     12 
     13    This program is free software; you can redistribute it and/or
     14    modify it under the terms of the GNU General Public License as
     15    published by the Free Software Foundation; either version 2 of the
     16    License, or (at your option) any later version.
     17 
     18    This program is distributed in the hope that it will be useful, but
     19    WITHOUT ANY WARRANTY; without even the implied warranty of
     20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     21    General Public License for more details.
     22 
     23    You should have received a copy of the GNU General Public License
     24    along with this program; if not, write to the Free Software
     25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
     26    02110-1301, USA.
     27 
     28    The GNU General Public License is contained in the file COPYING.
     29 */
     30 
     31 #include "libvex_basictypes.h"
     32 #include "libvex_emwarn.h"
     33 #include "libvex_guest_arm.h"
     34 #include "libvex_ir.h"
     35 #include "libvex.h"
     36 
     37 #include "main_util.h"
     38 #include "guest_generic_bb_to_IR.h"
     39 #include "guest_arm_defs.h"
     40 
     41 
     42 /* This file contains helper functions for arm guest code.  Calls to
     43    these functions are generated by the back end.  These calls are of
     44    course in the host machine code and this file will be compiled to
     45    host machine code, so that all makes sense.
     46 
     47    Only change the signatures of these helper functions very
     48    carefully.  If you change the signature here, you'll have to change
     49    the parameters passed to it in the IR calls constructed by
     50    guest-arm/toIR.c.
     51 */
     52 
     53 
     54 /* Set to 1 to get detailed profiling info about individual N, Z, C
     55    and V flag evaluation. */
     56 #define PROFILE_NZCV_FLAGS 0
     57 
     58 #if PROFILE_NZCV_FLAGS
     59 
     60 static UInt tab_n_eval[ARMG_CC_OP_NUMBER];
     61 static UInt tab_z_eval[ARMG_CC_OP_NUMBER];
     62 static UInt tab_c_eval[ARMG_CC_OP_NUMBER];
     63 static UInt tab_v_eval[ARMG_CC_OP_NUMBER];
     64 static UInt initted = 0;
     65 static UInt tot_evals = 0;
     66 
     67 static void initCounts ( void )
     68 {
     69    UInt i;
     70    for (i = 0; i < ARMG_CC_OP_NUMBER; i++) {
     71       tab_n_eval[i] = tab_z_eval[i] = tab_c_eval[i] = tab_v_eval[i] = 0;
     72    }
     73    initted = 1;
     74 }
     75 
     76 static void showCounts ( void )
     77 {
     78    UInt i;
     79    vex_printf("\n                 N          Z          C          V\n");
     80    vex_printf(  "---------------------------------------------------\n");
     81    for (i = 0; i < ARMG_CC_OP_NUMBER; i++) {
     82       vex_printf("CC_OP=%d  %9d  %9d  %9d  %9d\n",
     83                  i,
     84                  tab_n_eval[i], tab_z_eval[i],
     85                  tab_c_eval[i], tab_v_eval[i] );
     86     }
     87 }
     88 
     89 #define NOTE_N_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_n_eval)
     90 #define NOTE_Z_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_z_eval)
     91 #define NOTE_C_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_c_eval)
     92 #define NOTE_V_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_v_eval)
     93 
     94 #define NOTE_EVAL(_cc_op, _tab) \
     95    do { \
     96       if (!initted) initCounts(); \
     97       vassert( ((UInt)(_cc_op)) < ARMG_CC_OP_NUMBER); \
     98       _tab[(UInt)(_cc_op)]++; \
     99       tot_evals++; \
    100       if (0 == (tot_evals & 0xFFFFF)) \
    101         showCounts(); \
    102    } while (0)
    103 
    104 #endif /* PROFILE_NZCV_FLAGS */
    105 
    106 
    107 /* Calculate the N flag from the supplied thunk components, in the
    108    least significant bit of the word.  Returned bits 31:1 are zero. */
    109 static
    110 UInt armg_calculate_flag_n ( UInt cc_op, UInt cc_dep1,
    111                              UInt cc_dep2, UInt cc_dep3 )
    112 {
    113 #  if PROFILE_NZCV_FLAGS
    114    NOTE_N_EVAL(cc_op);
    115 #  endif
    116 
    117    switch (cc_op) {
    118       case ARMG_CC_OP_COPY: {
    119          /* (nzcv:28x0, unused, unused) */
    120          UInt nf   = (cc_dep1 >> ARMG_CC_SHIFT_N) & 1;
    121          return nf;
    122       }
    123       case ARMG_CC_OP_ADD: {
    124          /* (argL, argR, unused) */
    125          UInt argL = cc_dep1;
    126          UInt argR = cc_dep2;
    127          UInt res  = argL + argR;
    128          UInt nf   = res >> 31;
    129          return nf;
    130       }
    131       case ARMG_CC_OP_SUB: {
    132          /* (argL, argR, unused) */
    133          UInt argL = cc_dep1;
    134          UInt argR = cc_dep2;
    135          UInt res  = argL - argR;
    136          UInt nf   = res >> 31;
    137          return nf;
    138       }
    139       case ARMG_CC_OP_ADC: {
    140          /* (argL, argR, oldC) */
    141          UInt argL = cc_dep1;
    142          UInt argR = cc_dep2;
    143          UInt oldC = cc_dep3;
    144          vassert((oldC & ~1) == 0);
    145          UInt res  = argL + argR + oldC;
    146          UInt nf   = res >> 31;
    147          return nf;
    148       }
    149       case ARMG_CC_OP_SBB: {
    150          /* (argL, argR, oldC) */
    151          UInt argL = cc_dep1;
    152          UInt argR = cc_dep2;
    153          UInt oldC = cc_dep3;
    154          vassert((oldC & ~1) == 0);
    155          UInt res  = argL - argR - (oldC ^ 1);
    156          UInt nf   = res >> 31;
    157          return nf;
    158       }
    159       case ARMG_CC_OP_LOGIC: {
    160          /* (res, shco, oldV) */
    161          UInt res  = cc_dep1;
    162          UInt nf   = res >> 31;
    163          return nf;
    164       }
    165       case ARMG_CC_OP_MUL: {
    166          /* (res, unused, oldC:oldV) */
    167          UInt res  = cc_dep1;
    168          UInt nf   = res >> 31;
    169          return nf;
    170       }
    171       case ARMG_CC_OP_MULL: {
    172          /* (resLo32, resHi32, oldC:oldV) */
    173          UInt resHi32 = cc_dep2;
    174          UInt nf      = resHi32 >> 31;
    175          return nf;
    176       }
    177       default:
    178          /* shouldn't really make these calls from generated code */
    179          vex_printf("armg_calculate_flag_n"
    180                     "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
    181                     cc_op, cc_dep1, cc_dep2, cc_dep3 );
    182          vpanic("armg_calculate_flags_n");
    183    }
    184 }
    185 
    186 
    187 /* Calculate the Z flag from the supplied thunk components, in the
    188    least significant bit of the word.  Returned bits 31:1 are zero. */
    189 static
    190 UInt armg_calculate_flag_z ( UInt cc_op, UInt cc_dep1,
    191                              UInt cc_dep2, UInt cc_dep3 )
    192 {
    193 #  if PROFILE_NZCV_FLAGS
    194    NOTE_Z_EVAL(cc_op);
    195 #  endif
    196 
    197    switch (cc_op) {
    198       case ARMG_CC_OP_COPY: {
    199          /* (nzcv:28x0, unused, unused) */
    200          UInt zf   = (cc_dep1 >> ARMG_CC_SHIFT_Z) & 1;
    201          return zf;
    202       }
    203       case ARMG_CC_OP_ADD: {
    204          /* (argL, argR, unused) */
    205          UInt argL = cc_dep1;
    206          UInt argR = cc_dep2;
    207          UInt res  = argL + argR;
    208          UInt zf   = res == 0;
    209          return zf;
    210       }
    211       case ARMG_CC_OP_SUB: {
    212          /* (argL, argR, unused) */
    213          UInt argL = cc_dep1;
    214          UInt argR = cc_dep2;
    215          UInt res  = argL - argR;
    216          UInt zf   = res == 0;
    217          return zf;
    218       }
    219       case ARMG_CC_OP_ADC: {
    220          /* (argL, argR, oldC) */
    221          UInt argL = cc_dep1;
    222          UInt argR = cc_dep2;
    223          UInt oldC = cc_dep3;
    224          vassert((oldC & ~1) == 0);
    225          UInt res  = argL + argR + oldC;
    226          UInt zf   = res == 0;
    227          return zf;
    228       }
    229       case ARMG_CC_OP_SBB: {
    230          /* (argL, argR, oldC) */
    231          UInt argL = cc_dep1;
    232          UInt argR = cc_dep2;
    233          UInt oldC = cc_dep3;
    234          vassert((oldC & ~1) == 0);
    235          UInt res  = argL - argR - (oldC ^ 1);
    236          UInt zf   = res == 0;
    237          return zf;
    238       }
    239       case ARMG_CC_OP_LOGIC: {
    240          /* (res, shco, oldV) */
    241          UInt res  = cc_dep1;
    242          UInt zf   = res == 0;
    243          return zf;
    244       }
    245       case ARMG_CC_OP_MUL: {
    246          /* (res, unused, oldC:oldV) */
    247          UInt res  = cc_dep1;
    248          UInt zf   = res == 0;
    249          return zf;
    250       }
    251       case ARMG_CC_OP_MULL: {
    252          /* (resLo32, resHi32, oldC:oldV) */
    253          UInt resLo32 = cc_dep1;
    254          UInt resHi32 = cc_dep2;
    255          UInt zf      = (resHi32|resLo32) == 0;
    256          return zf;
    257       }
    258       default:
    259          /* shouldn't really make these calls from generated code */
    260          vex_printf("armg_calculate_flags_z"
    261                     "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
    262                     cc_op, cc_dep1, cc_dep2, cc_dep3 );
    263          vpanic("armg_calculate_flags_z");
    264    }
    265 }
    266 
    267 
    268 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
    269 /* Calculate the C flag from the supplied thunk components, in the
    270    least significant bit of the word.  Returned bits 31:1 are zero. */
    271 UInt armg_calculate_flag_c ( UInt cc_op, UInt cc_dep1,
    272                              UInt cc_dep2, UInt cc_dep3 )
    273 {
    274 #  if PROFILE_NZCV_FLAGS
    275    NOTE_C_EVAL(cc_op);
    276 #  endif
    277 
    278    switch (cc_op) {
    279       case ARMG_CC_OP_COPY: {
    280          /* (nzcv:28x0, unused, unused) */
    281          UInt cf   = (cc_dep1 >> ARMG_CC_SHIFT_C) & 1;
    282          return cf;
    283       }
    284       case ARMG_CC_OP_ADD: {
    285          /* (argL, argR, unused) */
    286          UInt argL = cc_dep1;
    287          UInt argR = cc_dep2;
    288          UInt res  = argL + argR;
    289          UInt cf   = res < argL;
    290          return cf;
    291       }
    292       case ARMG_CC_OP_SUB: {
    293          /* (argL, argR, unused) */
    294          UInt argL = cc_dep1;
    295          UInt argR = cc_dep2;
    296          UInt cf   = argL >= argR;
    297          return cf;
    298       }
    299       case ARMG_CC_OP_ADC: {
    300          /* (argL, argR, oldC) */
    301          UInt argL = cc_dep1;
    302          UInt argR = cc_dep2;
    303          UInt oldC = cc_dep3;
    304          vassert((oldC & ~1) == 0);
    305          UInt res  = argL + argR + oldC;
    306          UInt cf   = oldC ? (res <= argL) : (res < argL);
    307          return cf;
    308       }
    309       case ARMG_CC_OP_SBB: {
    310          /* (argL, argR, oldC) */
    311          UInt argL = cc_dep1;
    312          UInt argR = cc_dep2;
    313          UInt oldC = cc_dep3;
    314          vassert((oldC & ~1) == 0);
    315          UInt cf   = oldC ? (argL >= argR) : (argL > argR);
    316          return cf;
    317       }
    318       case ARMG_CC_OP_LOGIC: {
    319          /* (res, shco, oldV) */
    320          UInt shco = cc_dep2;
    321          vassert((shco & ~1) == 0);
    322          UInt cf   = shco;
    323          return cf;
    324       }
    325       case ARMG_CC_OP_MUL: {
    326          /* (res, unused, oldC:oldV) */
    327          UInt oldC = (cc_dep3 >> 1) & 1;
    328          vassert((cc_dep3 & ~3) == 0);
    329          UInt cf   = oldC;
    330          return cf;
    331       }
    332       case ARMG_CC_OP_MULL: {
    333          /* (resLo32, resHi32, oldC:oldV) */
    334          UInt oldC    = (cc_dep3 >> 1) & 1;
    335          vassert((cc_dep3 & ~3) == 0);
    336          UInt cf      = oldC;
    337          return cf;
    338       }
    339       default:
    340          /* shouldn't really make these calls from generated code */
    341          vex_printf("armg_calculate_flag_c"
    342                     "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
    343                     cc_op, cc_dep1, cc_dep2, cc_dep3 );
    344          vpanic("armg_calculate_flag_c");
    345    }
    346 }
    347 
    348 
    349 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
    350 /* Calculate the V flag from the supplied thunk components, in the
    351    least significant bit of the word.  Returned bits 31:1 are zero. */
    352 UInt armg_calculate_flag_v ( UInt cc_op, UInt cc_dep1,
    353                              UInt cc_dep2, UInt cc_dep3 )
    354 {
    355 #  if PROFILE_NZCV_FLAGS
    356    NOTE_V_EVAL(cc_op);
    357 #  endif
    358 
    359    switch (cc_op) {
    360       case ARMG_CC_OP_COPY: {
    361          /* (nzcv:28x0, unused, unused) */
    362          UInt vf   = (cc_dep1 >> ARMG_CC_SHIFT_V) & 1;
    363          return vf;
    364       }
    365       case ARMG_CC_OP_ADD: {
    366          /* (argL, argR, unused) */
    367          UInt argL = cc_dep1;
    368          UInt argR = cc_dep2;
    369          UInt res  = argL + argR;
    370          UInt vf   = ((res ^ argL) & (res ^ argR)) >> 31;
    371          return vf;
    372       }
    373       case ARMG_CC_OP_SUB: {
    374          /* (argL, argR, unused) */
    375          UInt argL = cc_dep1;
    376          UInt argR = cc_dep2;
    377          UInt res  = argL - argR;
    378          UInt vf   = ((argL ^ argR) & (argL ^ res)) >> 31;
    379          return vf;
    380       }
    381       case ARMG_CC_OP_ADC: {
    382          /* (argL, argR, oldC) */
    383          UInt argL = cc_dep1;
    384          UInt argR = cc_dep2;
    385          UInt oldC = cc_dep3;
    386          vassert((oldC & ~1) == 0);
    387          UInt res  = argL + argR + oldC;
    388          UInt vf   = ((res ^ argL) & (res ^ argR)) >> 31;
    389          return vf;
    390       }
    391       case ARMG_CC_OP_SBB: {
    392          /* (argL, argR, oldC) */
    393          UInt argL = cc_dep1;
    394          UInt argR = cc_dep2;
    395          UInt oldC = cc_dep3;
    396          vassert((oldC & ~1) == 0);
    397          UInt res  = argL - argR - (oldC ^ 1);
    398          UInt vf   = ((argL ^ argR) & (argL ^ res)) >> 31;
    399          return vf;
    400       }
    401       case ARMG_CC_OP_LOGIC: {
    402          /* (res, shco, oldV) */
    403          UInt oldV = cc_dep3;
    404          vassert((oldV & ~1) == 0);
    405          UInt vf   = oldV;
    406          return vf;
    407       }
    408       case ARMG_CC_OP_MUL: {
    409          /* (res, unused, oldC:oldV) */
    410          UInt oldV = (cc_dep3 >> 0) & 1;
    411          vassert((cc_dep3 & ~3) == 0);
    412          UInt vf   = oldV;
    413          return vf;
    414       }
    415       case ARMG_CC_OP_MULL: {
    416          /* (resLo32, resHi32, oldC:oldV) */
    417          UInt oldV    = (cc_dep3 >> 0) & 1;
    418          vassert((cc_dep3 & ~3) == 0);
    419          UInt vf      = oldV;
    420          return vf;
    421       }
    422       default:
    423          /* shouldn't really make these calls from generated code */
    424          vex_printf("armg_calculate_flag_v"
    425                     "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
    426                     cc_op, cc_dep1, cc_dep2, cc_dep3 );
    427          vpanic("armg_calculate_flag_v");
    428    }
    429 }
    430 
    431 
    432 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
    433 /* Calculate NZCV from the supplied thunk components, in the positions
    434    they appear in the CPSR, viz bits 31:28 for N Z C V respectively.
    435    Returned bits 27:0 are zero. */
    436 UInt armg_calculate_flags_nzcv ( UInt cc_op, UInt cc_dep1,
    437                                  UInt cc_dep2, UInt cc_dep3 )
    438 {
    439    UInt f;
    440    UInt res = 0;
    441    f = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
    442    res |= (f << ARMG_CC_SHIFT_N);
    443    f = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
    444    res |= (f << ARMG_CC_SHIFT_Z);
    445    f = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
    446    res |= (f << ARMG_CC_SHIFT_C);
    447    f = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
    448    res |= (f << ARMG_CC_SHIFT_V);
    449    return res;
    450 }
    451 
    452 
    453 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
    454 /* Calculate the QC flag from the arguments, in the lowest bit
    455    of the word (bit 0).  Urr, having this out of line is bizarre.
    456    Push back inline. */
    457 UInt armg_calculate_flag_qc ( UInt resL1, UInt resL2,
    458                               UInt resR1, UInt resR2 )
    459 {
    460    if (resL1 != resR1 || resL2 != resR2)
    461       return 1;
    462    else
    463       return 0;
    464 }
    465 
    466 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
    467 /* Calculate the specified condition from the thunk components, in the
    468    lowest bit of the word (bit 0).  Returned bits 31:1 are zero. */
    469 UInt armg_calculate_condition ( UInt cond_n_op /* (ARMCondcode << 4) | cc_op */,
    470                                 UInt cc_dep1,
    471                                 UInt cc_dep2, UInt cc_dep3 )
    472 {
    473    UInt cond  = cond_n_op >> 4;
    474    UInt cc_op = cond_n_op & 0xF;
    475    UInt nf, zf, vf, cf, inv;
    476    //   vex_printf("XXXXXXXX %x %x %x %x\n",
    477    //              cond_n_op, cc_dep1, cc_dep2, cc_dep3);
    478 
    479    // skip flags computation in this case
    480    if (cond == ARMCondAL) return 1;
    481 
    482    inv  = cond & 1;
    483 
    484    switch (cond) {
    485       case ARMCondEQ:    // Z=1         => z
    486       case ARMCondNE:    // Z=0
    487          zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
    488          return inv ^ zf;
    489 
    490       case ARMCondHS:    // C=1         => c
    491       case ARMCondLO:    // C=0
    492          cf = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
    493          return inv ^ cf;
    494 
    495       case ARMCondMI:    // N=1         => n
    496       case ARMCondPL:    // N=0
    497          nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
    498          return inv ^ nf;
    499 
    500       case ARMCondVS:    // V=1         => v
    501       case ARMCondVC:    // V=0
    502          vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
    503          return inv ^ vf;
    504 
    505       case ARMCondHI:    // C=1 && Z=0   => c & ~z
    506       case ARMCondLS:    // C=0 || Z=1
    507          cf = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
    508          zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
    509          return inv ^ (cf & ~zf);
    510 
    511       case ARMCondGE:    // N=V          => ~(n^v)
    512       case ARMCondLT:    // N!=V
    513          nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
    514          vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
    515          return inv ^ (1 & ~(nf ^ vf));
    516 
    517       case ARMCondGT:    // Z=0 && N=V   => ~z & ~(n^v)  =>  ~(z | (n^v))
    518       case ARMCondLE:    // Z=1 || N!=V
    519          nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
    520          vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
    521          zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
    522          return inv ^ (1 & ~(zf | (nf ^ vf)));
    523 
    524       case ARMCondAL: // handled above
    525       case ARMCondNV: // should never get here: Illegal instr
    526       default:
    527          /* shouldn't really make these calls from generated code */
    528          vex_printf("armg_calculate_condition(ARM)"
    529                     "( %u, %u, 0x%x, 0x%x, 0x%x )\n",
    530                     cond, cc_op, cc_dep1, cc_dep2, cc_dep3 );
    531          vpanic("armg_calculate_condition(ARM)");
    532    }
    533 }
    534 
    535 
    536 /*---------------------------------------------------------------*/
    537 /*--- Flag-helpers translation-time function specialisers.    ---*/
    538 /*--- These help iropt specialise calls the above run-time    ---*/
    539 /*--- flags functions.                                        ---*/
    540 /*---------------------------------------------------------------*/
    541 
    542 /* Used by the optimiser to try specialisations.  Returns an
    543    equivalent expression, or NULL if none. */
    544 
    545 static Bool isU32 ( IRExpr* e, UInt n )
    546 {
    547    return
    548       toBool( e->tag == Iex_Const
    549               && e->Iex.Const.con->tag == Ico_U32
    550               && e->Iex.Const.con->Ico.U32 == n );
    551 }
    552 
    553 IRExpr* guest_arm_spechelper ( HChar*   function_name,
    554                                IRExpr** args,
    555                                IRStmt** precedingStmts,
    556                                Int      n_precedingStmts )
    557 {
    558 #  define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
    559 #  define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
    560 #  define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
    561 #  define mkU8(_n)  IRExpr_Const(IRConst_U8(_n))
    562 
    563    Int i, arity = 0;
    564    for (i = 0; args[i]; i++)
    565       arity++;
    566 #  if 0
    567    vex_printf("spec request:\n");
    568    vex_printf("   %s  ", function_name);
    569    for (i = 0; i < arity; i++) {
    570       vex_printf("  ");
    571       ppIRExpr(args[i]);
    572    }
    573    vex_printf("\n");
    574 #  endif
    575 
    576    /* --------- specialising "armg_calculate_condition" --------- */
    577 
    578    if (vex_streq(function_name, "armg_calculate_condition")) {
    579 
    580       /* specialise calls to the "armg_calculate_condition" function.
    581          Not sure whether this is strictly necessary, but: the
    582          replacement IR must produce only the values 0 or 1.  Bits
    583          31:1 are required to be zero. */
    584       IRExpr *cond_n_op, *cc_dep1, *cc_dep2, *cc_ndep;
    585       vassert(arity == 4);
    586       cond_n_op = args[0]; /* (ARMCondcode << 4)  |  ARMG_CC_OP_* */
    587       cc_dep1   = args[1];
    588       cc_dep2   = args[2];
    589       cc_ndep   = args[3];
    590 
    591       /*---------------- SUB ----------------*/
    592 
    593       if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_SUB)) {
    594          /* EQ after SUB --> test argL == argR */
    595          return unop(Iop_1Uto32,
    596                      binop(Iop_CmpEQ32, cc_dep1, cc_dep2));
    597       }
    598       if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_SUB)) {
    599          /* NE after SUB --> test argL != argR */
    600          return unop(Iop_1Uto32,
    601                      binop(Iop_CmpNE32, cc_dep1, cc_dep2));
    602       }
    603 
    604       if (isU32(cond_n_op, (ARMCondGT << 4) | ARMG_CC_OP_SUB)) {
    605          /* GT after SUB --> test argL >s argR
    606                          --> test argR <s argL */
    607          return unop(Iop_1Uto32,
    608                      binop(Iop_CmpLT32S, cc_dep2, cc_dep1));
    609       }
    610       if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_SUB)) {
    611          /* LE after SUB --> test argL <=s argR */
    612          return unop(Iop_1Uto32,
    613                      binop(Iop_CmpLE32S, cc_dep1, cc_dep2));
    614       }
    615 
    616       if (isU32(cond_n_op, (ARMCondLT << 4) | ARMG_CC_OP_SUB)) {
    617          /* LT after SUB --> test argL <s argR */
    618          return unop(Iop_1Uto32,
    619                      binop(Iop_CmpLT32S, cc_dep1, cc_dep2));
    620       }
    621 
    622       if (isU32(cond_n_op, (ARMCondGE << 4) | ARMG_CC_OP_SUB)) {
    623          /* GE after SUB --> test argL >=s argR
    624                          --> test argR <=s argL */
    625          return unop(Iop_1Uto32,
    626                      binop(Iop_CmpLE32S, cc_dep2, cc_dep1));
    627       }
    628 
    629       if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SUB)) {
    630          /* HS after SUB --> test argL >=u argR
    631                          --> test argR <=u argL */
    632          return unop(Iop_1Uto32,
    633                      binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
    634       }
    635       if (isU32(cond_n_op, (ARMCondLO << 4) | ARMG_CC_OP_SUB)) {
    636          /* LO after SUB --> test argL <u argR */
    637          return unop(Iop_1Uto32,
    638                      binop(Iop_CmpLT32U, cc_dep1, cc_dep2));
    639       }
    640 
    641       if (isU32(cond_n_op, (ARMCondLS << 4) | ARMG_CC_OP_SUB)) {
    642          /* LS after SUB --> test argL <=u argR */
    643          return unop(Iop_1Uto32,
    644                      binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
    645       }
    646       if (isU32(cond_n_op, (ARMCondHI << 4) | ARMG_CC_OP_SUB)) {
    647          /* HI after SUB --> test argL >u argR
    648                          --> test argR <u argL */
    649          return unop(Iop_1Uto32,
    650                      binop(Iop_CmpLT32U, cc_dep2, cc_dep1));
    651       }
    652 
    653       /*---------------- SBB ----------------*/
    654 
    655       if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SBB)) {
    656          /* This seems to happen a lot in softfloat code, eg __divdf3+140 */
    657          /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
    658          /* HS after SBB (same as C after SBB below)
    659             --> oldC ? (argL >=u argR) : (argL >u argR)
    660             --> oldC ? (argR <=u argL) : (argR <u argL)
    661          */
    662          return
    663             IRExpr_Mux0X(
    664                unop(Iop_32to8, cc_ndep),
    665                /* case oldC == 0 */
    666                unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1)),
    667                /* case oldC != 0 */
    668                unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1))
    669             );
    670       }
    671 
    672       /*---------------- LOGIC ----------------*/
    673 
    674       if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_LOGIC)) {
    675          /* EQ after LOGIC --> test res == 0 */
    676          return unop(Iop_1Uto32,
    677                      binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
    678       }
    679       if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_LOGIC)) {
    680          /* NE after LOGIC --> test res != 0 */
    681          return unop(Iop_1Uto32,
    682                      binop(Iop_CmpNE32, cc_dep1, mkU32(0)));
    683       }
    684 
    685       if (isU32(cond_n_op, (ARMCondPL << 4) | ARMG_CC_OP_LOGIC)) {
    686          /* PL after LOGIC --> test (res >> 31) == 0 */
    687          return unop(Iop_1Uto32,
    688                      binop(Iop_CmpEQ32,
    689                            binop(Iop_Shr32, cc_dep1, mkU8(31)),
    690                            mkU32(0)));
    691       }
    692       if (isU32(cond_n_op, (ARMCondMI << 4) | ARMG_CC_OP_LOGIC)) {
    693          /* MI after LOGIC --> test (res >> 31) == 1 */
    694          return unop(Iop_1Uto32,
    695                      binop(Iop_CmpEQ32,
    696                            binop(Iop_Shr32, cc_dep1, mkU8(31)),
    697                            mkU32(1)));
    698       }
    699 
    700       /*---------------- COPY ----------------*/
    701 
    702       if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_COPY)) {
    703          /* NE after COPY --> ((cc_dep1 >> ARMG_CC_SHIFT_Z) ^ 1) & 1 */
    704          return binop(Iop_And32,
    705                       binop(Iop_Xor32,
    706                             binop(Iop_Shr32, cc_dep1,
    707                                              mkU8(ARMG_CC_SHIFT_Z)),
    708                             mkU32(1)),
    709                       mkU32(1));
    710       }
    711 
    712       /*----------------- AL -----------------*/
    713 
    714       /* A critically important case for Thumb code.
    715 
    716          What we're trying to spot is the case where cond_n_op is an
    717          expression of the form Or32(..., 0xE0) since that means the
    718          caller is asking for CondAL and we can simply return 1
    719          without caring what the ... part is.  This is a potentially
    720          dodgy kludge in that it assumes that the ... part has zeroes
    721          in bits 7:4, so that the result of the Or32 is guaranteed to
    722          be 0xE in bits 7:4.  Given that the places where this first
    723          arg are constructed (in guest_arm_toIR.c) are very
    724          constrained, we can get away with this.  To make this
    725          guaranteed safe would require to have a new primop, Slice44
    726          or some such, thusly
    727 
    728          Slice44(arg1, arg2) = 0--(24)--0 arg1[7:4] arg2[3:0]
    729 
    730          and we would then look for Slice44(0xE0, ...)
    731          which would give the required safety property.
    732 
    733          It would be infeasibly expensive to scan backwards through
    734          the entire block looking for an assignment to the temp, so
    735          just look at the previous 16 statements.  That should find it
    736          if it is an interesting case, as a result of how the
    737          boilerplate guff at the start of each Thumb insn translation
    738          is made.
    739       */
    740       if (cond_n_op->tag == Iex_RdTmp) {
    741          Int    j;
    742          IRTemp look_for = cond_n_op->Iex.RdTmp.tmp;
    743          Int    limit    = n_precedingStmts - 16;
    744          if (limit < 0) limit = 0;
    745          if (0) vex_printf("scanning %d .. %d\n", n_precedingStmts-1, limit);
    746          for (j = n_precedingStmts - 1; j >= limit; j--) {
    747             IRStmt* st = precedingStmts[j];
    748             if (st->tag == Ist_WrTmp
    749                 && st->Ist.WrTmp.tmp == look_for
    750                 && st->Ist.WrTmp.data->tag == Iex_Binop
    751                 && st->Ist.WrTmp.data->Iex.Binop.op == Iop_Or32
    752                 && isU32(st->Ist.WrTmp.data->Iex.Binop.arg2, (ARMCondAL << 4)))
    753                return mkU32(1);
    754          }
    755          /* Didn't find any useful binding to the first arg
    756             in the previous 16 stmts. */
    757       }
    758    }
    759 
    760    /* --------- specialising "armg_calculate_flag_c" --------- */
    761 
    762    else
    763    if (vex_streq(function_name, "armg_calculate_flag_c")) {
    764 
    765       /* specialise calls to the "armg_calculate_flag_c" function.
    766          Note that the returned value must be either 0 or 1; nonzero
    767          bits 31:1 are not allowed.  In turn, incoming oldV and oldC
    768          values (from the thunk) are assumed to have bits 31:1
    769          clear. */
    770       IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
    771       vassert(arity == 4);
    772       cc_op   = args[0]; /* ARMG_CC_OP_* */
    773       cc_dep1 = args[1];
    774       cc_dep2 = args[2];
    775       cc_ndep = args[3];
    776 
    777       if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
    778          /* Thunk args are (result, shco, oldV) */
    779          /* C after LOGIC --> shco */
    780          return cc_dep2;
    781       }
    782 
    783       if (isU32(cc_op, ARMG_CC_OP_SUB)) {
    784          /* Thunk args are (argL, argR, unused) */
    785          /* C after SUB --> argL >=u argR
    786                         --> argR <=u argL */
    787          return unop(Iop_1Uto32,
    788                      binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
    789       }
    790 
    791       if (isU32(cc_op, ARMG_CC_OP_SBB)) {
    792          /* This happens occasionally in softfloat code, eg __divdf3+140 */
    793          /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
    794          /* C after SBB (same as HS after SBB above)
    795             --> oldC ? (argL >=u argR) : (argL >u argR)
    796             --> oldC ? (argR <=u argL) : (argR <u argL)
    797          */
    798          return
    799             IRExpr_Mux0X(
    800                unop(Iop_32to8, cc_ndep),
    801                /* case oldC == 0 */
    802                unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1)),
    803                /* case oldC != 0 */
    804                unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1))
    805             );
    806       }
    807 
    808    }
    809 
    810    /* --------- specialising "armg_calculate_flag_v" --------- */
    811 
    812    else
    813    if (vex_streq(function_name, "armg_calculate_flag_v")) {
    814 
    815       /* specialise calls to the "armg_calculate_flag_v" function.
    816          Note that the returned value must be either 0 or 1; nonzero
    817          bits 31:1 are not allowed.  In turn, incoming oldV and oldC
    818          values (from the thunk) are assumed to have bits 31:1
    819          clear. */
    820       IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
    821       vassert(arity == 4);
    822       cc_op   = args[0]; /* ARMG_CC_OP_* */
    823       cc_dep1 = args[1];
    824       cc_dep2 = args[2];
    825       cc_ndep = args[3];
    826 
    827       if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
    828          /* Thunk args are (result, shco, oldV) */
    829          /* V after LOGIC --> oldV */
    830          return cc_ndep;
    831       }
    832 
    833       if (isU32(cc_op, ARMG_CC_OP_SUB)) {
    834          /* Thunk args are (argL, argR, unused) */
    835          /* V after SUB
    836             --> let res = argL - argR
    837                 in ((argL ^ argR) & (argL ^ res)) >> 31
    838             --> ((argL ^ argR) & (argL ^ (argL - argR))) >> 31
    839          */
    840          IRExpr* argL = cc_dep1;
    841          IRExpr* argR = cc_dep2;
    842          return
    843             binop(Iop_Shr32,
    844                   binop(Iop_And32,
    845                         binop(Iop_Xor32, argL, argR),
    846                         binop(Iop_Xor32, argL, binop(Iop_Sub32, argL, argR))
    847                   ),
    848                   mkU8(31)
    849             );
    850       }
    851 
    852       if (isU32(cc_op, ARMG_CC_OP_SBB)) {
    853          /* This happens occasionally in softfloat code, eg __divdf3+140 */
    854          /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
    855          /* V after SBB
    856             --> let res = argL - argR - (oldC ^ 1)
    857                 in  (argL ^ argR) & (argL ^ res) & 1
    858          */
    859          return
    860             binop(
    861                Iop_And32,
    862                binop(
    863                   Iop_And32,
    864                   // argL ^ argR
    865                   binop(Iop_Xor32, cc_dep1, cc_dep2),
    866                   // argL ^ (argL - argR - (oldC ^ 1))
    867                   binop(Iop_Xor32,
    868                         cc_dep1,
    869                         binop(Iop_Sub32,
    870                               binop(Iop_Sub32, cc_dep1, cc_dep2),
    871                               binop(Iop_Xor32, cc_ndep, mkU32(1)))
    872                   )
    873                ),
    874                mkU32(1)
    875             );
    876       }
    877 
    878    }
    879 
    880 #  undef unop
    881 #  undef binop
    882 #  undef mkU32
    883 #  undef mkU8
    884 
    885    return NULL;
    886 }
    887 
    888 
    889 /*----------------------------------------------*/
    890 /*--- The exported fns ..                    ---*/
    891 /*----------------------------------------------*/
    892 
    893 /* VISIBLE TO LIBVEX CLIENT */
    894 #if 0
    895 void LibVEX_GuestARM_put_flags ( UInt flags_native,
    896                                  /*OUT*/VexGuestARMState* vex_state )
    897 {
    898    vassert(0); // FIXME
    899 
    900    /* Mask out everything except N Z V C. */
    901    flags_native
    902       &= (ARMG_CC_MASK_N | ARMG_CC_MASK_Z | ARMG_CC_MASK_V | ARMG_CC_MASK_C);
    903 
    904    vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
    905    vex_state->guest_CC_DEP1 = flags_native;
    906    vex_state->guest_CC_DEP2 = 0;
    907    vex_state->guest_CC_NDEP = 0;
    908 }
    909 #endif
    910 
    911 /* VISIBLE TO LIBVEX CLIENT */
    912 UInt LibVEX_GuestARM_get_cpsr ( /*IN*/VexGuestARMState* vex_state )
    913 {
    914    UInt cpsr = 0;
    915    // NZCV
    916    cpsr |= armg_calculate_flags_nzcv(
    917                vex_state->guest_CC_OP,
    918                vex_state->guest_CC_DEP1,
    919                vex_state->guest_CC_DEP2,
    920                vex_state->guest_CC_NDEP
    921             );
    922    vassert(0 == (cpsr & 0x0FFFFFFF));
    923    // Q
    924    if (vex_state->guest_QFLAG32 > 0)
    925       cpsr |= (1 << 27);
    926    // GE
    927    if (vex_state->guest_GEFLAG0 > 0)
    928       cpsr |= (1 << 16);
    929    if (vex_state->guest_GEFLAG1 > 0)
    930       cpsr |= (1 << 17);
    931    if (vex_state->guest_GEFLAG2 > 0)
    932       cpsr |= (1 << 18);
    933    if (vex_state->guest_GEFLAG3 > 0)
    934       cpsr |= (1 << 19);
    935    // M
    936    cpsr |= (1 << 4); // 0b10000 means user-mode
    937    // J,T   J (bit 24) is zero by initialisation above
    938    // T  we copy from R15T[0]
    939    if (vex_state->guest_R15T & 1)
    940       cpsr |= (1 << 5);
    941    // ITSTATE we punt on for the time being.  Could compute it
    942    // if needed though.
    943    // E, endianness, 0 (littleendian) from initialisation above
    944    // A,I,F disable some async exceptions.  Not sure about these.
    945    // Leave as zero for the time being.
    946    return cpsr;
    947 }
    948 
    949 /* VISIBLE TO LIBVEX CLIENT */
    950 void LibVEX_GuestARM_initialise ( /*OUT*/VexGuestARMState* vex_state )
    951 {
    952    vex_state->host_EvC_FAILADDR = 0;
    953    vex_state->host_EvC_COUNTER = 0;
    954 
    955    vex_state->guest_R0  = 0;
    956    vex_state->guest_R1  = 0;
    957    vex_state->guest_R2  = 0;
    958    vex_state->guest_R3  = 0;
    959    vex_state->guest_R4  = 0;
    960    vex_state->guest_R5  = 0;
    961    vex_state->guest_R6  = 0;
    962    vex_state->guest_R7  = 0;
    963    vex_state->guest_R8  = 0;
    964    vex_state->guest_R9  = 0;
    965    vex_state->guest_R10 = 0;
    966    vex_state->guest_R11 = 0;
    967    vex_state->guest_R12 = 0;
    968    vex_state->guest_R13 = 0;
    969    vex_state->guest_R14 = 0;
    970    vex_state->guest_R15T = 0;  /* NB: implies ARM mode */
    971 
    972    vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
    973    vex_state->guest_CC_DEP1 = 0;
    974    vex_state->guest_CC_DEP2 = 0;
    975    vex_state->guest_CC_NDEP = 0;
    976    vex_state->guest_QFLAG32 = 0;
    977    vex_state->guest_GEFLAG0 = 0;
    978    vex_state->guest_GEFLAG1 = 0;
    979    vex_state->guest_GEFLAG2 = 0;
    980    vex_state->guest_GEFLAG3 = 0;
    981 
    982    vex_state->guest_EMWARN  = 0;
    983    vex_state->guest_TISTART = 0;
    984    vex_state->guest_TILEN   = 0;
    985    vex_state->guest_NRADDR  = 0;
    986    vex_state->guest_IP_AT_SYSCALL = 0;
    987 
    988    vex_state->guest_D0  = 0;
    989    vex_state->guest_D1  = 0;
    990    vex_state->guest_D2  = 0;
    991    vex_state->guest_D3  = 0;
    992    vex_state->guest_D4  = 0;
    993    vex_state->guest_D5  = 0;
    994    vex_state->guest_D6  = 0;
    995    vex_state->guest_D7  = 0;
    996    vex_state->guest_D8  = 0;
    997    vex_state->guest_D9  = 0;
    998    vex_state->guest_D10 = 0;
    999    vex_state->guest_D11 = 0;
   1000    vex_state->guest_D12 = 0;
   1001    vex_state->guest_D13 = 0;
   1002    vex_state->guest_D14 = 0;
   1003    vex_state->guest_D15 = 0;
   1004    vex_state->guest_D16 = 0;
   1005    vex_state->guest_D17 = 0;
   1006    vex_state->guest_D18 = 0;
   1007    vex_state->guest_D19 = 0;
   1008    vex_state->guest_D20 = 0;
   1009    vex_state->guest_D21 = 0;
   1010    vex_state->guest_D22 = 0;
   1011    vex_state->guest_D23 = 0;
   1012    vex_state->guest_D24 = 0;
   1013    vex_state->guest_D25 = 0;
   1014    vex_state->guest_D26 = 0;
   1015    vex_state->guest_D27 = 0;
   1016    vex_state->guest_D28 = 0;
   1017    vex_state->guest_D29 = 0;
   1018    vex_state->guest_D30 = 0;
   1019    vex_state->guest_D31 = 0;
   1020 
   1021    /* ARM encoded; zero is the default as it happens (result flags
   1022       (NZCV) cleared, FZ disabled, round to nearest, non-vector mode,
   1023       all exns masked, all exn sticky bits cleared). */
   1024    vex_state->guest_FPSCR = 0;
   1025 
   1026    vex_state->guest_TPIDRURO = 0;
   1027 
   1028    /* Not in a Thumb IT block. */
   1029    vex_state->guest_ITSTATE = 0;
   1030 
   1031    vex_state->padding1 = 0;
   1032    vex_state->padding2 = 0;
   1033    vex_state->padding3 = 0;
   1034    vex_state->padding4 = 0;
   1035    vex_state->padding5 = 0;
   1036 }
   1037 
   1038 
   1039 /*-----------------------------------------------------------*/
   1040 /*--- Describing the arm guest state, for the benefit     ---*/
   1041 /*--- of iropt and instrumenters.                         ---*/
   1042 /*-----------------------------------------------------------*/
   1043 
   1044 /* Figure out if any part of the guest state contained in minoff
   1045    .. maxoff requires precise memory exceptions.  If in doubt return
   1046    True (but this is generates significantly slower code).
   1047 
   1048    We enforce precise exns for guest R13(sp), R15T(pc).
   1049 */
   1050 Bool guest_arm_state_requires_precise_mem_exns ( Int minoff,
   1051                                                  Int maxoff)
   1052 {
   1053    Int sp_min = offsetof(VexGuestARMState, guest_R13);
   1054    Int sp_max = sp_min + 4 - 1;
   1055    Int pc_min = offsetof(VexGuestARMState, guest_R15T);
   1056    Int pc_max = pc_min + 4 - 1;
   1057 
   1058    if (maxoff < sp_min || minoff > sp_max) {
   1059       /* no overlap with sp */
   1060    } else {
   1061       return True;
   1062    }
   1063 
   1064    if (maxoff < pc_min || minoff > pc_max) {
   1065       /* no overlap with pc */
   1066    } else {
   1067       return True;
   1068    }
   1069 
   1070    /* We appear to need precise updates of R11 in order to get proper
   1071       stacktraces from non-optimised code. */
   1072    Int r11_min = offsetof(VexGuestARMState, guest_R11);
   1073    Int r11_max = r11_min + 4 - 1;
   1074 
   1075    if (maxoff < r11_min || minoff > r11_max) {
   1076       /* no overlap with r11 */
   1077    } else {
   1078       return True;
   1079    }
   1080 
   1081    /* Ditto R7, particularly needed for proper stacktraces in Thumb
   1082       code. */
   1083    Int r7_min = offsetof(VexGuestARMState, guest_R7);
   1084    Int r7_max = r7_min + 4 - 1;
   1085 
   1086    if (maxoff < r7_min || minoff > r7_max) {
   1087       /* no overlap with r7 */
   1088    } else {
   1089       return True;
   1090    }
   1091 
   1092    return False;
   1093 }
   1094 
   1095 
   1096 
   1097 #define ALWAYSDEFD(field)                           \
   1098     { offsetof(VexGuestARMState, field),            \
   1099       (sizeof ((VexGuestARMState*)0)->field) }
   1100 
   1101 VexGuestLayout
   1102    armGuest_layout
   1103       = {
   1104           /* Total size of the guest state, in bytes. */
   1105           .total_sizeB = sizeof(VexGuestARMState),
   1106 
   1107           /* Describe the stack pointer. */
   1108           .offset_SP = offsetof(VexGuestARMState,guest_R13),
   1109           .sizeof_SP = 4,
   1110 
   1111           /* Describe the instruction pointer. */
   1112           .offset_IP = offsetof(VexGuestARMState,guest_R15T),
   1113           .sizeof_IP = 4,
   1114 
   1115           /* Describe any sections to be regarded by Memcheck as
   1116              'always-defined'. */
   1117           .n_alwaysDefd = 10,
   1118 
   1119           /* flags thunk: OP is always defd, whereas DEP1 and DEP2
   1120              have to be tracked.  See detailed comment in gdefs.h on
   1121              meaning of thunk fields. */
   1122           .alwaysDefd
   1123              = { /* 0 */ ALWAYSDEFD(guest_R15T),
   1124                  /* 1 */ ALWAYSDEFD(guest_CC_OP),
   1125                  /* 2 */ ALWAYSDEFD(guest_CC_NDEP),
   1126                  /* 3 */ ALWAYSDEFD(guest_EMWARN),
   1127                  /* 4 */ ALWAYSDEFD(guest_TISTART),
   1128                  /* 5 */ ALWAYSDEFD(guest_TILEN),
   1129                  /* 6 */ ALWAYSDEFD(guest_NRADDR),
   1130                  /* 7 */ ALWAYSDEFD(guest_IP_AT_SYSCALL),
   1131                  /* 8 */ ALWAYSDEFD(guest_TPIDRURO),
   1132                  /* 9 */ ALWAYSDEFD(guest_ITSTATE)
   1133                }
   1134         };
   1135 
   1136 
   1137 /*---------------------------------------------------------------*/
   1138 /*--- end                                 guest_arm_helpers.c ---*/
   1139 /*---------------------------------------------------------------*/
   1140