Home | History | Annotate | Download | only in priv
      1 
      2 /*---------------------------------------------------------------*/
      3 /*--- begin                                       main_main.c ---*/
      4 /*---------------------------------------------------------------*/
      5 
      6 /*
      7    This file is part of Valgrind, a dynamic binary instrumentation
      8    framework.
      9 
     10    Copyright (C) 2004-2011 OpenWorks LLP
     11       info (at) open-works.net
     12 
     13    This program is free software; you can redistribute it and/or
     14    modify it under the terms of the GNU General Public License as
     15    published by the Free Software Foundation; either version 2 of the
     16    License, or (at your option) any later version.
     17 
     18    This program is distributed in the hope that it will be useful, but
     19    WITHOUT ANY WARRANTY; without even the implied warranty of
     20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     21    General Public License for more details.
     22 
     23    You should have received a copy of the GNU General Public License
     24    along with this program; if not, write to the Free Software
     25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
     26    02110-1301, USA.
     27 
     28    The GNU General Public License is contained in the file COPYING.
     29 
     30    Neither the names of the U.S. Department of Energy nor the
     31    University of California nor the names of its contributors may be
     32    used to endorse or promote products derived from this software
     33    without prior written permission.
     34 */
     35 
     36 #include "libvex.h"
     37 #include "libvex_emwarn.h"
     38 #include "libvex_guest_x86.h"
     39 #include "libvex_guest_amd64.h"
     40 #include "libvex_guest_arm.h"
     41 #include "libvex_guest_ppc32.h"
     42 #include "libvex_guest_ppc64.h"
     43 #include "libvex_guest_s390x.h"
     44 
     45 #include "main_globals.h"
     46 #include "main_util.h"
     47 #include "host_generic_regs.h"
     48 #include "ir_opt.h"
     49 
     50 #include "host_x86_defs.h"
     51 #include "host_amd64_defs.h"
     52 #include "host_ppc_defs.h"
     53 #include "host_arm_defs.h"
     54 #include "host_s390_defs.h"
     55 
     56 #include "guest_generic_bb_to_IR.h"
     57 #include "guest_x86_defs.h"
     58 #include "guest_amd64_defs.h"
     59 #include "guest_arm_defs.h"
     60 #include "guest_ppc_defs.h"
     61 #include "guest_s390_defs.h"
     62 
     63 #include "host_generic_simd128.h"
     64 
     65 
     66 /* This file contains the top level interface to the library. */
     67 
     68 /* --------- fwds ... --------- */
     69 
     70 static Bool   are_valid_hwcaps ( VexArch arch, UInt hwcaps );
     71 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
     72 
     73 
     74 /* --------- Initialise the library. --------- */
     75 
     76 /* Exported to library client. */
     77 
     78 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
     79 {
     80    vcon->iropt_verbosity            = 0;
     81    vcon->iropt_level                = 2;
     82    vcon->iropt_precise_memory_exns  = False;
     83    vcon->iropt_unroll_thresh        = 120;
     84    vcon->guest_max_insns            = 60;
     85    vcon->guest_chase_thresh         = 10;
     86    vcon->guest_chase_cond           = False;
     87 }
     88 
     89 
     90 /* Exported to library client. */
     91 
     92 void LibVEX_Init (
     93    /* failure exit function */
     94    __attribute__ ((noreturn))
     95    void (*failure_exit) ( void ),
     96    /* logging output function */
     97    void (*log_bytes) ( HChar*, Int nbytes ),
     98    /* debug paranoia level */
     99    Int debuglevel,
    100    /* Are we supporting valgrind checking? */
    101    Bool valgrind_support,
    102    /* Control ... */
    103    /*READONLY*/VexControl* vcon
    104 )
    105 {
    106    /* First off, do enough minimal setup so that the following
    107       assertions can fail in a sane fashion, if need be. */
    108    vex_failure_exit = failure_exit;
    109    vex_log_bytes    = log_bytes;
    110 
    111    /* Now it's safe to check parameters for sanity. */
    112    vassert(!vex_initdone);
    113    vassert(failure_exit);
    114    vassert(log_bytes);
    115    vassert(debuglevel >= 0);
    116 
    117    vassert(vcon->iropt_verbosity >= 0);
    118    vassert(vcon->iropt_level >= 0);
    119    vassert(vcon->iropt_level <= 2);
    120    vassert(vcon->iropt_unroll_thresh >= 0);
    121    vassert(vcon->iropt_unroll_thresh <= 400);
    122    vassert(vcon->guest_max_insns >= 1);
    123    vassert(vcon->guest_max_insns <= 100);
    124    vassert(vcon->guest_chase_thresh >= 0);
    125    vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
    126    vassert(vcon->guest_chase_cond == True
    127            || vcon->guest_chase_cond == False);
    128 
    129    /* Check that Vex has been built with sizes of basic types as
    130       stated in priv/libvex_basictypes.h.  Failure of any of these is
    131       a serious configuration error and should be corrected
    132       immediately.  If any of these assertions fail you can fully
    133       expect Vex not to work properly, if at all. */
    134 
    135    vassert(1 == sizeof(UChar));
    136    vassert(1 == sizeof(Char));
    137    vassert(2 == sizeof(UShort));
    138    vassert(2 == sizeof(Short));
    139    vassert(4 == sizeof(UInt));
    140    vassert(4 == sizeof(Int));
    141    vassert(8 == sizeof(ULong));
    142    vassert(8 == sizeof(Long));
    143    vassert(4 == sizeof(Float));
    144    vassert(8 == sizeof(Double));
    145    vassert(1 == sizeof(Bool));
    146    vassert(4 == sizeof(Addr32));
    147    vassert(8 == sizeof(Addr64));
    148    vassert(16 == sizeof(U128));
    149    vassert(16 == sizeof(V128));
    150 
    151    vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
    152    vassert(sizeof(void*) == sizeof(int*));
    153    vassert(sizeof(void*) == sizeof(HWord));
    154 
    155    vassert(VEX_HOST_WORDSIZE == sizeof(void*));
    156    vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
    157 
    158    /* Really start up .. */
    159    vex_debuglevel         = debuglevel;
    160    vex_valgrind_support   = valgrind_support;
    161    vex_control            = *vcon;
    162    vex_initdone           = True;
    163    vexSetAllocMode ( VexAllocModeTEMP );
    164 }
    165 
    166 
    167 /* --------- Make a translation. --------- */
    168 
    169 /* Exported to library client. */
    170 
    171 VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
    172 {
    173    /* This the bundle of functions we need to do the back-end stuff
    174       (insn selection, reg-alloc, assembly) whilst being insulated
    175       from the target instruction set. */
    176    HReg* available_real_regs;
    177    Int   n_available_real_regs;
    178    Bool         (*isMove)       ( HInstr*, HReg*, HReg* );
    179    void         (*getRegUsage)  ( HRegUsage*, HInstr*, Bool );
    180    void         (*mapRegs)      ( HRegRemap*, HInstr*, Bool );
    181    void         (*genSpill)     ( HInstr**, HInstr**, HReg, Int, Bool );
    182    void         (*genReload)    ( HInstr**, HInstr**, HReg, Int, Bool );
    183    HInstr*      (*directReload) ( HInstr*, HReg, Short );
    184    void         (*ppInstr)      ( HInstr*, Bool );
    185    void         (*ppReg)        ( HReg );
    186    HInstrArray* (*iselSB)       ( IRSB*, VexArch, VexArchInfo*,
    187                                                   VexAbiInfo* );
    188    Int          (*emit)         ( UChar*, Int, HInstr*, Bool, void*, void* );
    189    IRExpr*      (*specHelper)   ( HChar*, IRExpr**, IRStmt**, Int );
    190    Bool         (*preciseMemExnsFn) ( Int, Int );
    191 
    192    DisOneInstrFn disInstrFn;
    193 
    194    VexGuestLayout* guest_layout;
    195    Bool            host_is_bigendian = False;
    196    IRSB*           irsb;
    197    HInstrArray*    vcode;
    198    HInstrArray*    rcode;
    199    Int             i, j, k, out_used, guest_sizeB;
    200    Int             offB_TISTART, offB_TILEN;
    201    UChar           insn_bytes[48];
    202    IRType          guest_word_type;
    203    IRType          host_word_type;
    204    Bool            mode64;
    205 
    206    guest_layout           = NULL;
    207    available_real_regs    = NULL;
    208    n_available_real_regs  = 0;
    209    isMove                 = NULL;
    210    getRegUsage            = NULL;
    211    mapRegs                = NULL;
    212    genSpill               = NULL;
    213    genReload              = NULL;
    214    directReload           = NULL;
    215    ppInstr                = NULL;
    216    ppReg                  = NULL;
    217    iselSB                 = NULL;
    218    emit                   = NULL;
    219    specHelper             = NULL;
    220    preciseMemExnsFn       = NULL;
    221    disInstrFn             = NULL;
    222    guest_word_type        = Ity_INVALID;
    223    host_word_type         = Ity_INVALID;
    224    offB_TISTART           = 0;
    225    offB_TILEN             = 0;
    226    mode64                 = False;
    227 
    228    vex_traceflags = vta->traceflags;
    229 
    230    vassert(vex_initdone);
    231    vassert(vta->needs_self_check != NULL);
    232 
    233    vexSetAllocModeTEMP_and_clear();
    234    vexAllocSanityCheck();
    235 
    236    /* First off, check that the guest and host insn sets
    237       are supported. */
    238 
    239    switch (vta->arch_host) {
    240 
    241       case VexArchX86:
    242          mode64       = False;
    243          getAllocableRegs_X86 ( &n_available_real_regs,
    244                                 &available_real_regs );
    245          isMove       = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_X86Instr;
    246          getRegUsage  = (void(*)(HRegUsage*,HInstr*, Bool))
    247                         getRegUsage_X86Instr;
    248          mapRegs      = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_X86Instr;
    249          genSpill     = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
    250                         genSpill_X86;
    251          genReload    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
    252                         genReload_X86;
    253          directReload = (HInstr*(*)(HInstr*,HReg,Short)) directReload_X86;
    254          ppInstr      = (void(*)(HInstr*, Bool)) ppX86Instr;
    255          ppReg        = (void(*)(HReg)) ppHRegX86;
    256          iselSB       = iselSB_X86;
    257          emit         = (Int(*)(UChar*,Int,HInstr*,Bool,void*,void*))
    258                         emit_X86Instr;
    259          host_is_bigendian = False;
    260          host_word_type    = Ity_I32;
    261          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
    262          /* jump-to-dispatcher scheme */
    263          vassert(vta->dispatch_unassisted != NULL);
    264          vassert(vta->dispatch_assisted != NULL);
    265          break;
    266 
    267       case VexArchAMD64:
    268          mode64      = True;
    269          getAllocableRegs_AMD64 ( &n_available_real_regs,
    270                                   &available_real_regs );
    271          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_AMD64Instr;
    272          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool))
    273                        getRegUsage_AMD64Instr;
    274          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_AMD64Instr;
    275          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
    276                        genSpill_AMD64;
    277          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
    278                        genReload_AMD64;
    279          ppInstr     = (void(*)(HInstr*, Bool)) ppAMD64Instr;
    280          ppReg       = (void(*)(HReg)) ppHRegAMD64;
    281          iselSB      = iselSB_AMD64;
    282          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*,void*))
    283                        emit_AMD64Instr;
    284          host_is_bigendian = False;
    285          host_word_type    = Ity_I64;
    286          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
    287          /* jump-to-dispatcher scheme */
    288          vassert(vta->dispatch_unassisted != NULL);
    289          vassert(vta->dispatch_assisted != NULL);
    290          break;
    291 
    292       case VexArchPPC32:
    293          mode64      = False;
    294          getAllocableRegs_PPC ( &n_available_real_regs,
    295                                 &available_real_regs, mode64 );
    296          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
    297          getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPCInstr;
    298          mapRegs     = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPCInstr;
    299          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
    300          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
    301          ppInstr     = (void(*)(HInstr*,Bool)) ppPPCInstr;
    302          ppReg       = (void(*)(HReg)) ppHRegPPC;
    303          iselSB      = iselSB_PPC;
    304          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*,void*))
    305                        emit_PPCInstr;
    306          host_is_bigendian = True;
    307          host_word_type    = Ity_I32;
    308          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
    309          /* return-to-dispatcher scheme */
    310          vassert(vta->dispatch_unassisted == NULL);
    311          vassert(vta->dispatch_assisted == NULL);
    312          break;
    313 
    314       case VexArchPPC64:
    315          mode64      = True;
    316          getAllocableRegs_PPC ( &n_available_real_regs,
    317                                 &available_real_regs, mode64 );
    318          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
    319          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPCInstr;
    320          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPCInstr;
    321          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
    322          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
    323          ppInstr     = (void(*)(HInstr*, Bool)) ppPPCInstr;
    324          ppReg       = (void(*)(HReg)) ppHRegPPC;
    325          iselSB      = iselSB_PPC;
    326          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*,void*))
    327                        emit_PPCInstr;
    328          host_is_bigendian = True;
    329          host_word_type    = Ity_I64;
    330          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
    331          /* return-to-dispatcher scheme */
    332          vassert(vta->dispatch_unassisted == NULL);
    333          vassert(vta->dispatch_assisted == NULL);
    334          break;
    335 
    336       case VexArchS390X:
    337          mode64      = True;
    338          getAllocableRegs_S390 ( &n_available_real_regs,
    339                                  &available_real_regs, mode64 );
    340          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_S390Instr;
    341          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_S390Instr;
    342          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_S390Instr;
    343          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_S390;
    344          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_S390;
    345          ppInstr     = (void(*)(HInstr*, Bool)) ppS390Instr;
    346          ppReg       = (void(*)(HReg)) ppHRegS390;
    347          iselSB      = iselSB_S390;
    348          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*,void*))
    349                        emit_S390Instr;
    350          host_is_bigendian = True;
    351          host_word_type    = Ity_I64;
    352          vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_host.hwcaps));
    353          /* return-to-dispatcher scheme */
    354          vassert(vta->dispatch_unassisted == NULL);
    355          vassert(vta->dispatch_assisted == NULL);
    356          break;
    357 
    358       case VexArchARM:
    359          mode64      = False;
    360          getAllocableRegs_ARM ( &n_available_real_regs,
    361                                 &available_real_regs );
    362          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_ARMInstr;
    363          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_ARMInstr;
    364          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_ARMInstr;
    365          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_ARM;
    366          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_ARM;
    367          ppInstr     = (void(*)(HInstr*, Bool)) ppARMInstr;
    368          ppReg       = (void(*)(HReg)) ppHRegARM;
    369          iselSB      = iselSB_ARM;
    370          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*,void*))
    371                        emit_ARMInstr;
    372          host_is_bigendian = False;
    373          host_word_type    = Ity_I32;
    374          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_host.hwcaps));
    375          vassert(vta->dispatch_unassisted == NULL);
    376          vassert(vta->dispatch_assisted == NULL);
    377          /* return-to-dispatcher scheme */
    378          break;
    379 
    380       default:
    381          vpanic("LibVEX_Translate: unsupported host insn set");
    382    }
    383 
    384 
    385    switch (vta->arch_guest) {
    386 
    387       case VexArchX86:
    388          preciseMemExnsFn = guest_x86_state_requires_precise_mem_exns;
    389          disInstrFn       = disInstr_X86;
    390          specHelper       = guest_x86_spechelper;
    391          guest_sizeB      = sizeof(VexGuestX86State);
    392          guest_word_type  = Ity_I32;
    393          guest_layout     = &x86guest_layout;
    394          offB_TISTART     = offsetof(VexGuestX86State,guest_TISTART);
    395          offB_TILEN       = offsetof(VexGuestX86State,guest_TILEN);
    396          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_guest.hwcaps));
    397          vassert(0 == sizeof(VexGuestX86State) % 16);
    398          vassert(sizeof( ((VexGuestX86State*)0)->guest_TISTART) == 4);
    399          vassert(sizeof( ((VexGuestX86State*)0)->guest_TILEN  ) == 4);
    400          vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
    401          break;
    402 
    403       case VexArchAMD64:
    404          preciseMemExnsFn = guest_amd64_state_requires_precise_mem_exns;
    405          disInstrFn       = disInstr_AMD64;
    406          specHelper       = guest_amd64_spechelper;
    407          guest_sizeB      = sizeof(VexGuestAMD64State);
    408          guest_word_type  = Ity_I64;
    409          guest_layout     = &amd64guest_layout;
    410          offB_TISTART     = offsetof(VexGuestAMD64State,guest_TISTART);
    411          offB_TILEN       = offsetof(VexGuestAMD64State,guest_TILEN);
    412          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_guest.hwcaps));
    413          vassert(0 == sizeof(VexGuestAMD64State) % 16);
    414          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TISTART ) == 8);
    415          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TILEN   ) == 8);
    416          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR  ) == 8);
    417          break;
    418 
    419       case VexArchPPC32:
    420          preciseMemExnsFn = guest_ppc32_state_requires_precise_mem_exns;
    421          disInstrFn       = disInstr_PPC;
    422          specHelper       = guest_ppc32_spechelper;
    423          guest_sizeB      = sizeof(VexGuestPPC32State);
    424          guest_word_type  = Ity_I32;
    425          guest_layout     = &ppc32Guest_layout;
    426          offB_TISTART     = offsetof(VexGuestPPC32State,guest_TISTART);
    427          offB_TILEN       = offsetof(VexGuestPPC32State,guest_TILEN);
    428          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_guest.hwcaps));
    429          vassert(0 == sizeof(VexGuestPPC32State) % 16);
    430          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TISTART ) == 4);
    431          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TILEN   ) == 4);
    432          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR  ) == 4);
    433          break;
    434 
    435       case VexArchPPC64:
    436          preciseMemExnsFn = guest_ppc64_state_requires_precise_mem_exns;
    437          disInstrFn       = disInstr_PPC;
    438          specHelper       = guest_ppc64_spechelper;
    439          guest_sizeB      = sizeof(VexGuestPPC64State);
    440          guest_word_type  = Ity_I64;
    441          guest_layout     = &ppc64Guest_layout;
    442          offB_TISTART     = offsetof(VexGuestPPC64State,guest_TISTART);
    443          offB_TILEN       = offsetof(VexGuestPPC64State,guest_TILEN);
    444          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_guest.hwcaps));
    445          vassert(0 == sizeof(VexGuestPPC64State) % 16);
    446          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TISTART    ) == 8);
    447          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TILEN      ) == 8);
    448          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR     ) == 8);
    449          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
    450          break;
    451 
    452       case VexArchS390X:
    453          preciseMemExnsFn = guest_s390x_state_requires_precise_mem_exns;
    454          disInstrFn       = disInstr_S390;
    455          specHelper       = guest_s390x_spechelper;
    456          guest_sizeB      = sizeof(VexGuestS390XState);
    457          guest_word_type  = Ity_I64;
    458          guest_layout     = &s390xGuest_layout;
    459          offB_TISTART     = offsetof(VexGuestS390XState,guest_TISTART);
    460          offB_TILEN       = offsetof(VexGuestS390XState,guest_TILEN);
    461          vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_guest.hwcaps));
    462          vassert(0 == sizeof(VexGuestS390XState) % 16);
    463          vassert(sizeof( ((VexGuestS390XState*)0)->guest_TISTART    ) == 8);
    464          vassert(sizeof( ((VexGuestS390XState*)0)->guest_TILEN      ) == 8);
    465          vassert(sizeof( ((VexGuestS390XState*)0)->guest_NRADDR     ) == 8);
    466          break;
    467 
    468       case VexArchARM:
    469          preciseMemExnsFn = guest_arm_state_requires_precise_mem_exns;
    470          disInstrFn       = disInstr_ARM;
    471          specHelper       = guest_arm_spechelper;
    472          guest_sizeB      = sizeof(VexGuestARMState);
    473          guest_word_type  = Ity_I32;
    474          guest_layout     = &armGuest_layout;
    475          offB_TISTART     = offsetof(VexGuestARMState,guest_TISTART);
    476          offB_TILEN       = offsetof(VexGuestARMState,guest_TILEN);
    477          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_guest.hwcaps));
    478          vassert(0 == sizeof(VexGuestARMState) % 16);
    479          vassert(sizeof( ((VexGuestARMState*)0)->guest_TISTART) == 4);
    480          vassert(sizeof( ((VexGuestARMState*)0)->guest_TILEN  ) == 4);
    481          vassert(sizeof( ((VexGuestARMState*)0)->guest_NRADDR ) == 4);
    482          break;
    483 
    484       default:
    485          vpanic("LibVEX_Translate: unsupported guest insn set");
    486    }
    487 
    488    /* Set up result struct. */
    489    VexTranslateResult res;
    490    res.status       = VexTransOK;
    491    res.n_sc_extents = 0;
    492 
    493    /* yet more sanity checks ... */
    494    if (vta->arch_guest == vta->arch_host) {
    495       /* doesn't necessarily have to be true, but if it isn't it means
    496          we are simulating one flavour of an architecture a different
    497          flavour of the same architecture, which is pretty strange. */
    498       vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
    499    }
    500 
    501    vexAllocSanityCheck();
    502 
    503    if (vex_traceflags & VEX_TRACE_FE)
    504       vex_printf("\n------------------------"
    505                    " Front end "
    506                    "------------------------\n\n");
    507 
    508    irsb = bb_to_IR ( vta->guest_extents,
    509                      &res.n_sc_extents,
    510                      vta->callback_opaque,
    511                      disInstrFn,
    512                      vta->guest_bytes,
    513                      vta->guest_bytes_addr,
    514                      vta->chase_into_ok,
    515                      host_is_bigendian,
    516                      vta->arch_guest,
    517                      &vta->archinfo_guest,
    518                      &vta->abiinfo_both,
    519                      guest_word_type,
    520                      vta->needs_self_check,
    521                      vta->preamble_function,
    522                      offB_TISTART,
    523                      offB_TILEN );
    524 
    525    vexAllocSanityCheck();
    526 
    527    if (irsb == NULL) {
    528       /* Access failure. */
    529       vexSetAllocModeTEMP_and_clear();
    530       vex_traceflags = 0;
    531       res.status = VexTransAccessFail; return res;
    532    }
    533 
    534    vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
    535    vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
    536    for (i = 0; i < vta->guest_extents->n_used; i++) {
    537       vassert(vta->guest_extents->len[i] < 10000); /* sanity */
    538    }
    539 
    540    /* If debugging, show the raw guest bytes for this bb. */
    541    if (0 || (vex_traceflags & VEX_TRACE_FE)) {
    542       if (vta->guest_extents->n_used > 1) {
    543          vex_printf("can't show code due to extents > 1\n");
    544       } else {
    545          /* HACK */
    546          UChar* p = (UChar*)vta->guest_bytes;
    547          UInt   sum = 0;
    548          UInt   guest_bytes_read = (UInt)vta->guest_extents->len[0];
    549          vex_printf("GuestBytes %llx %u ", vta->guest_bytes_addr,
    550                                            guest_bytes_read );
    551          for (i = 0; i < guest_bytes_read; i++) {
    552             UInt b = (UInt)p[i];
    553             vex_printf(" %02x", b );
    554             sum = (sum << 1) ^ b;
    555          }
    556          vex_printf("  %08x\n\n", sum);
    557       }
    558    }
    559 
    560    /* Sanity check the initial IR. */
    561    sanityCheckIRSB( irsb, "initial IR",
    562                     False/*can be non-flat*/, guest_word_type );
    563 
    564    vexAllocSanityCheck();
    565 
    566    /* Clean it up, hopefully a lot. */
    567    irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn,
    568                               vta->guest_bytes_addr,
    569                               vta->arch_guest );
    570    sanityCheckIRSB( irsb, "after initial iropt",
    571                     True/*must be flat*/, guest_word_type );
    572 
    573    if (vex_traceflags & VEX_TRACE_OPT1) {
    574       vex_printf("\n------------------------"
    575                    " After pre-instr IR optimisation "
    576                    "------------------------\n\n");
    577       ppIRSB ( irsb );
    578       vex_printf("\n");
    579    }
    580 
    581    vexAllocSanityCheck();
    582 
    583    /* Get the thing instrumented. */
    584    if (vta->instrument1)
    585       irsb = vta->instrument1(vta->callback_opaque,
    586                               irsb, guest_layout,
    587                               vta->guest_extents,
    588                               guest_word_type, host_word_type);
    589    vexAllocSanityCheck();
    590 
    591    if (vta->instrument2)
    592       irsb = vta->instrument2(vta->callback_opaque,
    593                               irsb, guest_layout,
    594                               vta->guest_extents,
    595                               guest_word_type, host_word_type);
    596 
    597    if (vex_traceflags & VEX_TRACE_INST) {
    598       vex_printf("\n------------------------"
    599                    " After instrumentation "
    600                    "------------------------\n\n");
    601       ppIRSB ( irsb );
    602       vex_printf("\n");
    603    }
    604 
    605    if (vta->instrument1 || vta->instrument2)
    606       sanityCheckIRSB( irsb, "after instrumentation",
    607                        True/*must be flat*/, guest_word_type );
    608 
    609    /* Do a post-instrumentation cleanup pass. */
    610    if (vta->instrument1 || vta->instrument2) {
    611       do_deadcode_BB( irsb );
    612       irsb = cprop_BB( irsb );
    613       do_deadcode_BB( irsb );
    614       sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
    615                        True/*must be flat*/, guest_word_type );
    616    }
    617 
    618    vexAllocSanityCheck();
    619 
    620    if (vex_traceflags & VEX_TRACE_OPT2) {
    621       vex_printf("\n------------------------"
    622                    " After post-instr IR optimisation "
    623                    "------------------------\n\n");
    624       ppIRSB ( irsb );
    625       vex_printf("\n");
    626    }
    627 
    628    /* Turn it into virtual-registerised code.  Build trees -- this
    629       also throws away any dead bindings. */
    630    ado_treebuild_BB( irsb );
    631 
    632    if (vta->finaltidy) {
    633       irsb = vta->finaltidy(irsb);
    634    }
    635 
    636    vexAllocSanityCheck();
    637 
    638    if (vex_traceflags & VEX_TRACE_TREES) {
    639       vex_printf("\n------------------------"
    640                    "  After tree-building "
    641                    "------------------------\n\n");
    642       ppIRSB ( irsb );
    643       vex_printf("\n");
    644    }
    645 
    646    /* HACK */
    647    if (0) {
    648       *(vta->host_bytes_used) = 0;
    649       res.status = VexTransOK; return res;
    650    }
    651    /* end HACK */
    652 
    653    if (vex_traceflags & VEX_TRACE_VCODE)
    654       vex_printf("\n------------------------"
    655                    " Instruction selection "
    656                    "------------------------\n");
    657 
    658    vcode = iselSB ( irsb, vta->arch_host, &vta->archinfo_host,
    659                                           &vta->abiinfo_both );
    660 
    661    vexAllocSanityCheck();
    662 
    663    if (vex_traceflags & VEX_TRACE_VCODE)
    664       vex_printf("\n");
    665 
    666    if (vex_traceflags & VEX_TRACE_VCODE) {
    667       for (i = 0; i < vcode->arr_used; i++) {
    668          vex_printf("%3d   ", i);
    669          ppInstr(vcode->arr[i], mode64);
    670          vex_printf("\n");
    671       }
    672       vex_printf("\n");
    673    }
    674 
    675    /* Register allocate. */
    676    rcode = doRegisterAllocation ( vcode, available_real_regs,
    677                                   n_available_real_regs,
    678                                   isMove, getRegUsage, mapRegs,
    679                                   genSpill, genReload, directReload,
    680                                   guest_sizeB,
    681                                   ppInstr, ppReg, mode64 );
    682 
    683    vexAllocSanityCheck();
    684 
    685    if (vex_traceflags & VEX_TRACE_RCODE) {
    686       vex_printf("\n------------------------"
    687                    " Register-allocated code "
    688                    "------------------------\n\n");
    689       for (i = 0; i < rcode->arr_used; i++) {
    690          vex_printf("%3d   ", i);
    691          ppInstr(rcode->arr[i], mode64);
    692          vex_printf("\n");
    693       }
    694       vex_printf("\n");
    695    }
    696 
    697    /* HACK */
    698    if (0) {
    699       *(vta->host_bytes_used) = 0;
    700       res.status = VexTransOK; return res;
    701    }
    702    /* end HACK */
    703 
    704    /* Assemble */
    705    if (vex_traceflags & VEX_TRACE_ASM) {
    706       vex_printf("\n------------------------"
    707                    " Assembly "
    708                    "------------------------\n\n");
    709    }
    710 
    711    out_used = 0; /* tracks along the host_bytes array */
    712    for (i = 0; i < rcode->arr_used; i++) {
    713       if (vex_traceflags & VEX_TRACE_ASM) {
    714          ppInstr(rcode->arr[i], mode64);
    715          vex_printf("\n");
    716       }
    717       j = (*emit)( insn_bytes, sizeof insn_bytes, rcode->arr[i], mode64,
    718                    vta->dispatch_unassisted, vta->dispatch_assisted );
    719       if (vex_traceflags & VEX_TRACE_ASM) {
    720          for (k = 0; k < j; k++)
    721             if (insn_bytes[k] < 16)
    722                vex_printf("0%x ",  (UInt)insn_bytes[k]);
    723             else
    724                vex_printf("%x ", (UInt)insn_bytes[k]);
    725          vex_printf("\n\n");
    726       }
    727       if (out_used + j > vta->host_bytes_size) {
    728          vexSetAllocModeTEMP_and_clear();
    729          vex_traceflags = 0;
    730          res.status = VexTransOutputFull;
    731          return res;
    732       }
    733       for (k = 0; k < j; k++) {
    734          vta->host_bytes[out_used] = insn_bytes[k];
    735          out_used++;
    736       }
    737       vassert(out_used <= vta->host_bytes_size);
    738    }
    739    *(vta->host_bytes_used) = out_used;
    740 
    741    vexAllocSanityCheck();
    742 
    743    vexSetAllocModeTEMP_and_clear();
    744 
    745    vex_traceflags = 0;
    746    res.status = VexTransOK;
    747    return res;
    748 }
    749 
    750 
    751 /* --------- Emulation warnings. --------- */
    752 
    753 HChar* LibVEX_EmWarn_string ( VexEmWarn ew )
    754 {
    755    switch (ew) {
    756      case EmWarn_NONE:
    757         return "none";
    758      case EmWarn_X86_x87exns:
    759         return "Unmasking x87 FP exceptions";
    760      case EmWarn_X86_x87precision:
    761         return "Selection of non-80-bit x87 FP precision";
    762      case EmWarn_X86_sseExns:
    763         return "Unmasking SSE FP exceptions";
    764      case EmWarn_X86_fz:
    765         return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
    766      case EmWarn_X86_daz:
    767         return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
    768      case EmWarn_X86_acFlag:
    769         return "Setting %eflags.ac (setting noted but ignored)";
    770      case EmWarn_PPCexns:
    771         return "Unmasking PPC32/64 FP exceptions";
    772      case EmWarn_PPC64_redir_overflow:
    773         return "PPC64 function redirection stack overflow";
    774      case EmWarn_PPC64_redir_underflow:
    775         return "PPC64 function redirection stack underflow";
    776      default:
    777         vpanic("LibVEX_EmWarn_string: unknown warning");
    778    }
    779 }
    780 
    781 /* ------------------ Arch/HwCaps stuff. ------------------ */
    782 
    783 const HChar* LibVEX_ppVexArch ( VexArch arch )
    784 {
    785    switch (arch) {
    786       case VexArch_INVALID: return "INVALID";
    787       case VexArchX86:      return "X86";
    788       case VexArchAMD64:    return "AMD64";
    789       case VexArchARM:      return "ARM";
    790       case VexArchPPC32:    return "PPC32";
    791       case VexArchPPC64:    return "PPC64";
    792       case VexArchS390X:    return "S390X";
    793       default:              return "VexArch???";
    794    }
    795 }
    796 
    797 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
    798 {
    799    HChar* str = show_hwcaps(arch,hwcaps);
    800    return str ? str : "INVALID";
    801 }
    802 
    803 
    804 /* Write default settings info *vai. */
    805 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
    806 {
    807    vai->hwcaps             = 0;
    808    vai->ppc_cache_line_szB = 0;
    809    vai->ppc_dcbz_szB       = 0;
    810    vai->ppc_dcbzl_szB      = 0;
    811 
    812 }
    813 
    814 /* Write default settings info *vbi. */
    815 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
    816 {
    817    vbi->guest_stack_redzone_size       = 0;
    818    vbi->guest_amd64_assume_fs_is_zero  = False;
    819    vbi->guest_amd64_assume_gs_is_0x60  = False;
    820    vbi->guest_ppc_zap_RZ_at_blr        = False;
    821    vbi->guest_ppc_zap_RZ_at_bl         = NULL;
    822    vbi->guest_ppc_sc_continues_at_LR   = False;
    823    vbi->host_ppc_calls_use_fndescrs    = False;
    824    vbi->host_ppc32_regalign_int64_args = False;
    825 }
    826 
    827 
    828 /* Return a string showing the hwcaps in a nice way.  The string will
    829    be NULL for invalid combinations of flags, so these functions also
    830    serve as a way to validate hwcaps values. */
    831 
    832 static HChar* show_hwcaps_x86 ( UInt hwcaps )
    833 {
    834    /* Monotonic, SSE3 > SSE2 > SSE1 > baseline. */
    835    switch (hwcaps) {
    836       case 0:
    837          return "x86-sse0";
    838       case VEX_HWCAPS_X86_SSE1:
    839          return "x86-sse1";
    840       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2:
    841          return "x86-sse1-sse2";
    842       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
    843            | VEX_HWCAPS_X86_LZCNT:
    844          return "x86-sse1-sse2-lzcnt";
    845       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
    846            | VEX_HWCAPS_X86_SSE3:
    847          return "x86-sse1-sse2-sse3";
    848       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
    849            | VEX_HWCAPS_X86_SSE3 | VEX_HWCAPS_X86_LZCNT:
    850          return "x86-sse1-sse2-sse3-lzcnt";
    851       default:
    852          return NULL;
    853    }
    854 }
    855 
    856 static HChar* show_hwcaps_amd64 ( UInt hwcaps )
    857 {
    858    /* SSE3 and CX16 are orthogonal and > baseline, although we really
    859       don't expect to come across anything which can do SSE3 but can't
    860       do CX16.  Still, we can handle that case.  LZCNT is similarly
    861       orthogonal. */
    862    switch (hwcaps) {
    863       case 0:
    864          return "amd64-sse2";
    865       case VEX_HWCAPS_AMD64_SSE3:
    866          return "amd64-sse3";
    867       case VEX_HWCAPS_AMD64_CX16:
    868          return "amd64-sse2-cx16";
    869       case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16:
    870          return "amd64-sse3-cx16";
    871       case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_LZCNT:
    872          return "amd64-sse3-lzcnt";
    873       case VEX_HWCAPS_AMD64_CX16 | VEX_HWCAPS_AMD64_LZCNT:
    874          return "amd64-sse2-cx16-lzcnt";
    875       case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16
    876            | VEX_HWCAPS_AMD64_LZCNT:
    877          return "amd64-sse3-cx16-lzcnt";
    878 
    879       default:
    880          return NULL;
    881    }
    882 }
    883 
    884 static HChar* show_hwcaps_ppc32 ( UInt hwcaps )
    885 {
    886    /* Monotonic with complications.  Basically V > F > baseline,
    887       but once you have F then you can have FX or GX too. */
    888    const UInt F  = VEX_HWCAPS_PPC32_F;
    889    const UInt V  = VEX_HWCAPS_PPC32_V;
    890    const UInt FX = VEX_HWCAPS_PPC32_FX;
    891    const UInt GX = VEX_HWCAPS_PPC32_GX;
    892    const UInt VX = VEX_HWCAPS_PPC32_VX;
    893          UInt c  = hwcaps;
    894    if (c == 0)           return "ppc32-int";
    895    if (c == F)           return "ppc32-int-flt";
    896    if (c == (F|FX))      return "ppc32-int-flt-FX";
    897    if (c == (F|GX))      return "ppc32-int-flt-GX";
    898    if (c == (F|FX|GX))   return "ppc32-int-flt-FX-GX";
    899    if (c == (F|V))       return "ppc32-int-flt-vmx";
    900    if (c == (F|V|FX))    return "ppc32-int-flt-vmx-FX";
    901    if (c == (F|V|GX))    return "ppc32-int-flt-vmx-GX";
    902    if (c == (F|V|FX|GX)) return "ppc32-int-flt-vmx-FX-GX";
    903    if (c == (F|V|FX|GX|VX)) return "ppc32-int-flt-vmx-FX-GX-VX";
    904    return NULL;
    905 }
    906 
    907 static HChar* show_hwcaps_ppc64 ( UInt hwcaps )
    908 {
    909    /* Monotonic with complications.  Basically V > baseline(==F),
    910       but once you have F then you can have FX or GX too. */
    911    const UInt V  = VEX_HWCAPS_PPC64_V;
    912    const UInt FX = VEX_HWCAPS_PPC64_FX;
    913    const UInt GX = VEX_HWCAPS_PPC64_GX;
    914    const UInt VX = VEX_HWCAPS_PPC64_VX;
    915          UInt c  = hwcaps;
    916    if (c == 0)         return "ppc64-int-flt";
    917    if (c == FX)        return "ppc64-int-flt-FX";
    918    if (c == GX)        return "ppc64-int-flt-GX";
    919    if (c == (FX|GX))   return "ppc64-int-flt-FX-GX";
    920    if (c == V)         return "ppc64-int-flt-vmx";
    921    if (c == (V|FX))    return "ppc64-int-flt-vmx-FX";
    922    if (c == (V|GX))    return "ppc64-int-flt-vmx-GX";
    923    if (c == (V|FX|GX)) return "ppc64-int-flt-vmx-FX-GX";
    924    if (c == (V|FX|GX|VX)) return "ppc64-int-flt-vmx-FX-GX-VX";
    925    return NULL;
    926 }
    927 
    928 static HChar* show_hwcaps_arm ( UInt hwcaps )
    929 {
    930    Bool N = ((hwcaps & VEX_HWCAPS_ARM_NEON) != 0);
    931    Bool vfp = ((hwcaps & (VEX_HWCAPS_ARM_VFP |
    932                VEX_HWCAPS_ARM_VFP2 | VEX_HWCAPS_ARM_VFP3)) != 0);
    933    switch (VEX_ARM_ARCHLEVEL(hwcaps)) {
    934       case 5:
    935          if (N)
    936             return NULL;
    937          if (vfp)
    938             return "ARMv5-vfp";
    939          else
    940             return "ARMv5";
    941          return NULL;
    942       case 6:
    943          if (N)
    944             return NULL;
    945          if (vfp)
    946             return "ARMv6-vfp";
    947          else
    948             return "ARMv6";
    949          return NULL;
    950       case 7:
    951          if (vfp) {
    952             if (N)
    953                return "ARMv7-vfp-neon";
    954             else
    955                return "ARMv7-vfp";
    956          } else {
    957             if (N)
    958                return "ARMv7-neon";
    959             else
    960                return "ARMv7";
    961          }
    962       default:
    963          return NULL;
    964    }
    965    return NULL;
    966 }
    967 
    968 static HChar* show_hwcaps_s390x ( UInt hwcaps )
    969 {
    970    static const HChar prefix[] = "s390x";
    971    static const HChar facilities[][6] = {
    972      { "ldisp" },
    973      { "eimm" },
    974      { "gie" },
    975      { "dfp" },
    976      { "fgx" },
    977    };
    978    static HChar buf[sizeof facilities + sizeof prefix + 1];
    979    static HChar *p;
    980 
    981    if (buf[0] != '\0') return buf;  /* already constructed */
    982 
    983    hwcaps = VEX_HWCAPS_S390X(hwcaps);
    984 
    985    p = buf + vex_sprintf(buf, "%s", prefix);
    986    if (hwcaps & VEX_HWCAPS_S390X_LDISP)
    987      p = p + vex_sprintf(p, "-%s", facilities[0]);
    988    if (hwcaps & VEX_HWCAPS_S390X_EIMM)
    989      p = p + vex_sprintf(p, "-%s", facilities[1]);
    990    if (hwcaps & VEX_HWCAPS_S390X_GIE)
    991      p = p + vex_sprintf(p, "-%s", facilities[2]);
    992    if (hwcaps & VEX_HWCAPS_S390X_DFP)
    993      p = p + vex_sprintf(p, "-%s", facilities[3]);
    994    if (hwcaps & VEX_HWCAPS_S390X_FGX)
    995      p = p + vex_sprintf(p, "-%s", facilities[4]);
    996 
    997    /* If there are no facilities, add "zarch" */
    998    if (hwcaps == 0)
    999      vex_sprintf(p, "-%s", "zarch");
   1000 
   1001    return buf;
   1002 }
   1003 
   1004 /* ---- */
   1005 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
   1006 {
   1007    switch (arch) {
   1008       case VexArchX86:   return show_hwcaps_x86(hwcaps);
   1009       case VexArchAMD64: return show_hwcaps_amd64(hwcaps);
   1010       case VexArchPPC32: return show_hwcaps_ppc32(hwcaps);
   1011       case VexArchPPC64: return show_hwcaps_ppc64(hwcaps);
   1012       case VexArchARM:   return show_hwcaps_arm(hwcaps);
   1013       case VexArchS390X: return show_hwcaps_s390x(hwcaps);
   1014       default: return NULL;
   1015    }
   1016 }
   1017 
   1018 static Bool are_valid_hwcaps ( VexArch arch, UInt hwcaps )
   1019 {
   1020    return show_hwcaps(arch,hwcaps) != NULL;
   1021 }
   1022 
   1023 
   1024 /*---------------------------------------------------------------*/
   1025 /*--- end                                         main_main.c ---*/
   1026 /*---------------------------------------------------------------*/
   1027