Home | History | Annotate | Download | only in m_dispatch
      1 
      2 /*--------------------------------------------------------------------*/
      3 /*--- The core dispatch loop, for jumping to a code address.       ---*/
      4 /*---                                         dispatch-arm-linux.S ---*/
      5 /*--------------------------------------------------------------------*/
      6 
      7 /*
      8   This file is part of Valgrind, a dynamic binary instrumentation
      9   framework.
     10 
     11   Copyright (C) 2008-2013 Evan Geller
     12      gaze (at) bea.ms
     13 
     14   This program is free software; you can redistribute it and/or
     15   modify it under the terms of the GNU General Public License as
     16   published by the Free Software Foundation; either version 2 of the
     17   License, or (at your option) any later version.
     18 
     19   This program is distributed in the hope that it will be useful, but
     20   WITHOUT ANY WARRANTY; without even the implied warranty of
     21   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     22   General Public License for more details.
     23 
     24   You should have received a copy of the GNU General Public License
     25   along with this program; if not, write to the Free Software
     26   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
     27   02111-1307, USA.
     28 
     29   The GNU General Public License is contained in the file COPYING.
     30 */
     31 
     32 #if defined(VGP_arm_linux)
     33 	.fpu vfp
     34 
     35 #include "pub_core_basics_asm.h"
     36 #include "pub_core_dispatch_asm.h"
     37 #include "pub_core_transtab_asm.h"
     38 #include "libvex_guest_offsets.h"	/* for OFFSET_arm_R* */
     39 
     40 
     41 /*------------------------------------------------------------*/
     42 /*---                                                      ---*/
     43 /*--- The dispatch loop.  VG_(disp_run_translations) is    ---*/
     44 /*--- used to run all translations,                        ---*/
     45 /*--- including no-redir ones.                             ---*/
     46 /*---                                                      ---*/
     47 /*------------------------------------------------------------*/
     48 
     49 /*----------------------------------------------------*/
     50 /*--- Entry and preamble (set everything up)       ---*/
     51 /*----------------------------------------------------*/
     52 
     53 /* signature:
     54 void VG_(disp_run_translations)( UWord* two_words,
     55                                  void*  guest_state,
     56                                  Addr   host_addr );
     57 */
     58 .text
     59 .global VG_(disp_run_translations)
     60 VG_(disp_run_translations):
     61         /* r0  holds two_words
     62            r1  holds guest_state
     63            r2  holds host_addr
     64         */
     65         /* The number of regs in this list needs to be even, in
     66            order to keep the stack 8-aligned. */
     67 	push {r0, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
     68 
     69         /* set FPSCR to vex-required default value */
     70         mov  r4, #0
     71         fmxr fpscr, r4
     72 
     73        	/* Set up the guest state pointer */
     74         mov r8, r1
     75 
     76         /* and jump into the code cache.  Chained translations in
     77            the code cache run, until for whatever reason, they can't
     78            continue.  When that happens, the translation in question
     79            will jump (or call) to one of the continuation points
     80            VG_(cp_...) below. */
     81         bx r2
     82         /* NOTREACHED */
     83 
     84 /*----------------------------------------------------*/
     85 /*--- Postamble and exit.                          ---*/
     86 /*----------------------------------------------------*/
     87 
     88 postamble:
     89         /* At this point, r1 and r2 contain two
     90            words to be returned to the caller.  r1
     91            holds a TRC value, and r2 optionally may
     92            hold another word (for CHAIN_ME exits, the
     93            address of the place to patch.) */
     94 
     95         /* We're leaving.  Check that nobody messed with
     96            FPSCR in ways we don't expect. */
     97         fmrx r4, fpscr
     98         bic  r4, #0xF8000000 /* mask out NZCV and QC */
     99         bic  r4, #0x0000009F /* mask out IDC,IXC,UFC,OFC,DZC,IOC */
    100         cmp  r4, #0
    101         beq  remove_frame /* we're OK */
    102         /* otherwise we have an invariant violation */
    103         movw r1, #VG_TRC_INVARIANT_FAILED
    104         movw r2, #0
    105         /* fall through */
    106 
    107 remove_frame:
    108         /* Restore int regs, including importantly r0 (two_words) */
    109 	pop {r0, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
    110         /* Stash return values */
    111         str  r1, [r0, #0]
    112         str  r2, [r0, #4]
    113         bx   lr
    114 
    115 /*----------------------------------------------------*/
    116 /*--- Continuation points                          ---*/
    117 /*----------------------------------------------------*/
    118 
    119 /* ------ Chain me to slow entry point ------ */
    120 .global VG_(disp_cp_chain_me_to_slowEP)
    121 VG_(disp_cp_chain_me_to_slowEP):
    122         /* We got called.  The return address indicates
    123            where the patching needs to happen.  Collect
    124            the return address and, exit back to C land,
    125            handing the caller the pair (Chain_me_S, RA) */
    126         mov  r1, #VG_TRC_CHAIN_ME_TO_SLOW_EP
    127         mov  r2, lr
    128         /* 4 = movw r12, lo16(disp_cp_chain_me_to_slowEP)
    129            4 = movt r12, hi16(disp_cp_chain_me_to_slowEP)
    130            4 = blx  r12 */
    131         sub  r2, r2, #4+4+4
    132         b    postamble
    133 
    134 /* ------ Chain me to fast entry point ------ */
    135 .global VG_(disp_cp_chain_me_to_fastEP)
    136 VG_(disp_cp_chain_me_to_fastEP):
    137         /* We got called.  The return address indicates
    138            where the patching needs to happen.  Collect
    139            the return address and, exit back to C land,
    140            handing the caller the pair (Chain_me_F, RA) */
    141         mov  r1, #VG_TRC_CHAIN_ME_TO_FAST_EP
    142         mov  r2, lr
    143         /* 4 = movw r12, lo16(disp_cp_chain_me_to_fastEP)
    144            4 = movt r12, hi16(disp_cp_chain_me_to_fastEP)
    145            4 = blx  r12 */
    146         sub  r2, r2, #4+4+4
    147         b    postamble
    148 
    149 /* ------ Indirect but boring jump ------ */
    150 .global VG_(disp_cp_xindir)
    151 VG_(disp_cp_xindir):
    152 	/* Where are we going? */
    153         ldr  r0, [r8, #OFFSET_arm_R15T]
    154 
    155         /* stats only */
    156         movw r1, #:lower16:vgPlain_stats__n_xindirs_32
    157         movt r1, #:upper16:vgPlain_stats__n_xindirs_32
    158         ldr  r2, [r1, #0]
    159         add  r2, r2, #1
    160         str  r2, [r1, #0]
    161 
    162         /* try a fast lookup in the translation cache */
    163         // r0 = next guest, r1,r2,r3,r4 scratch
    164         movw r1, #VG_TT_FAST_MASK       // r1 = VG_TT_FAST_MASK
    165         movw r4, #:lower16:VG_(tt_fast)
    166 
    167 	and  r2, r1, r0, LSR #1         // r2 = entry #
    168         movt r4, #:upper16:VG_(tt_fast) // r4 = &VG_(tt_fast)
    169 
    170 	add  r1, r4, r2, LSL #3         // r1 = &tt_fast[entry#]
    171 
    172         ldrd r4, r5, [r1, #0]           // r4 = .guest, r5 = .host
    173 
    174 	cmp  r4, r0
    175 
    176         // jump to host if lookup succeeded
    177 	bxeq r5
    178 
    179         /* otherwise the fast lookup failed */
    180         /* RM ME -- stats only */
    181         movw r1, #:lower16:vgPlain_stats__n_xindir_misses_32
    182         movt r1, #:upper16:vgPlain_stats__n_xindir_misses_32
    183         ldr  r2, [r1, #0]
    184         add  r2, r2, #1
    185         str  r2, [r1, #0]
    186 
    187 	mov  r1, #VG_TRC_INNER_FASTMISS
    188         mov  r2, #0
    189 	b    postamble
    190 
    191 /* ------ Assisted jump ------ */
    192 .global VG_(disp_cp_xassisted)
    193 VG_(disp_cp_xassisted):
    194         /* r8 contains the TRC */
    195         mov  r1, r8
    196         mov  r2, #0
    197         b    postamble
    198 
    199 /* ------ Event check failed ------ */
    200 .global VG_(disp_cp_evcheck_fail)
    201 VG_(disp_cp_evcheck_fail):
    202        	mov  r1, #VG_TRC_INNER_COUNTERZERO
    203         mov  r2, #0
    204 	b    postamble
    205 
    206 
    207 .size VG_(disp_run_translations), .-VG_(disp_run_translations)
    208 
    209 /* Let the linker know we don't need an executable stack */
    210 .section .note.GNU-stack,"",%progbits
    211 
    212 #endif // defined(VGP_arm_linux)
    213 
    214 /*--------------------------------------------------------------------*/
    215 /*--- end                                     dispatch-arm-linux.S ---*/
    216 /*--------------------------------------------------------------------*/
    217