Home | History | Annotate | Download | only in armv5te
      1 
      2 /*
      3  * ===========================================================================
      4  *  Common subroutines and data
      5  * ===========================================================================
      6  */
      7 
      8 
      9 
     10     .text
     11     .align  2
     12 
     13 #if defined(WITH_JIT)
     14 #if defined(WITH_SELF_VERIFICATION)
     15     .global dvmJitToInterpPunt
     16 dvmJitToInterpPunt:
     17     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     18     mov    r2,#kSVSPunt                 @ r2<- interpreter entry point
     19     mov    r3, #0
     20     str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
     21     b      jitSVShadowRunEnd            @ doesn't return
     22 
     23     .global dvmJitToInterpSingleStep
     24 dvmJitToInterpSingleStep:
     25     str    lr,[rGLUE,#offGlue_jitResumeNPC]
     26     str    r1,[rGLUE,#offGlue_jitResumeDPC]
     27     mov    r2,#kSVSSingleStep           @ r2<- interpreter entry point
     28     b      jitSVShadowRunEnd            @ doesn't return
     29 
     30     .global dvmJitToInterpNoChainNoProfile
     31 dvmJitToInterpNoChainNoProfile:
     32     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     33     mov    r0,rPC                       @ pass our target PC
     34     mov    r2,#kSVSNoProfile            @ r2<- interpreter entry point
     35     mov    r3, #0                       @ 0 means !inJitCodeCache
     36     str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
     37     b      jitSVShadowRunEnd            @ doesn't return
     38 
     39     .global dvmJitToInterpTraceSelectNoChain
     40 dvmJitToInterpTraceSelectNoChain:
     41     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     42     mov    r0,rPC                       @ pass our target PC
     43     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     44     mov    r3, #0                       @ 0 means !inJitCodeCache
     45     str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
     46     b      jitSVShadowRunEnd            @ doesn't return
     47 
     48     .global dvmJitToInterpTraceSelect
     49 dvmJitToInterpTraceSelect:
     50     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     51     ldr    r0,[lr, #-1]                 @ pass our target PC
     52     mov    r2,#kSVSTraceSelect          @ r2<- interpreter entry point
     53     mov    r3, #0                       @ 0 means !inJitCodeCache
     54     str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
     55     b      jitSVShadowRunEnd            @ doesn't return
     56 
     57     .global dvmJitToInterpBackwardBranch
     58 dvmJitToInterpBackwardBranch:
     59     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     60     ldr    r0,[lr, #-1]                 @ pass our target PC
     61     mov    r2,#kSVSBackwardBranch       @ r2<- interpreter entry point
     62     mov    r3, #0                       @ 0 means !inJitCodeCache
     63     str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
     64     b      jitSVShadowRunEnd            @ doesn't return
     65 
     66     .global dvmJitToInterpNormal
     67 dvmJitToInterpNormal:
     68     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     69     ldr    r0,[lr, #-1]                 @ pass our target PC
     70     mov    r2,#kSVSNormal               @ r2<- interpreter entry point
     71     mov    r3, #0                       @ 0 means !inJitCodeCache
     72     str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
     73     b      jitSVShadowRunEnd            @ doesn't return
     74 
     75     .global dvmJitToInterpNoChain
     76 dvmJitToInterpNoChain:
     77     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     78     mov    r0,rPC                       @ pass our target PC
     79     mov    r2,#kSVSNoChain              @ r2<- interpreter entry point
     80     mov    r3, #0                       @ 0 means !inJitCodeCache
     81     str    r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land
     82     b      jitSVShadowRunEnd            @ doesn't return
     83 #else
     84 /*
     85  * Return from the translation cache to the interpreter when the compiler is
     86  * having issues translating/executing a Dalvik instruction. We have to skip
     87  * the code cache lookup otherwise it is possible to indefinitely bouce
     88  * between the interpreter and the code cache if the instruction that fails
     89  * to be compiled happens to be at a trace start.
     90  */
     91     .global dvmJitToInterpPunt
     92 dvmJitToInterpPunt:
     93     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
     94     mov    rPC, r0
     95 #if defined(WITH_JIT_TUNING)
     96     mov    r0,lr
     97     bl     dvmBumpPunt;
     98 #endif
     99     EXPORT_PC()
    100     mov    r0, #0
    101     str    r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land
    102     adrl   rIBASE, dvmAsmInstructionStart
    103     FETCH_INST()
    104     GET_INST_OPCODE(ip)
    105     GOTO_OPCODE(ip)
    106 
    107 /*
    108  * Return to the interpreter to handle a single instruction.
    109  * On entry:
    110  *    r0 <= PC
    111  *    r1 <= PC of resume instruction
    112  *    lr <= resume point in translation
    113  */
    114     .global dvmJitToInterpSingleStep
    115 dvmJitToInterpSingleStep:
    116     str    lr,[rGLUE,#offGlue_jitResumeNPC]
    117     str    r1,[rGLUE,#offGlue_jitResumeDPC]
    118     mov    r1,#kInterpEntryInstr
    119     @ enum is 4 byte in aapcs-EABI
    120     str    r1, [rGLUE, #offGlue_entryPoint]
    121     mov    rPC,r0
    122     EXPORT_PC()
    123 
    124     adrl   rIBASE, dvmAsmInstructionStart
    125     mov    r2,#kJitSingleStep     @ Ask for single step and then revert
    126     str    r2,[rGLUE,#offGlue_jitState]
    127     mov    r1,#1                  @ set changeInterp to bail to debug interp
    128     b      common_gotoBail
    129 
    130 /*
    131  * Return from the translation cache and immediately request
    132  * a translation for the exit target.  Commonly used for callees.
    133  */
    134     .global dvmJitToInterpTraceSelectNoChain
    135 dvmJitToInterpTraceSelectNoChain:
    136 #if defined(WITH_JIT_TUNING)
    137     bl     dvmBumpNoChain
    138 #endif
    139     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
    140     mov    r0,rPC
    141     bl     dvmJitGetCodeAddr        @ Is there a translation?
    142     str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
    143     mov    r1, rPC                  @ arg1 of translation may need this
    144     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
    145     cmp    r0,#0                    @ !0 means translation exists
    146     bxne   r0                       @ continue native execution if so
    147     b      2f                       @ branch over to use the interpreter
    148 
    149 /*
    150  * Return from the translation cache and immediately request
    151  * a translation for the exit target.  Commonly used following
    152  * invokes.
    153  */
    154     .global dvmJitToInterpTraceSelect
    155 dvmJitToInterpTraceSelect:
    156     ldr    rPC,[lr, #-1]           @ get our target PC
    157     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
    158     add    rINST,lr,#-5            @ save start of chain branch
    159     add    rINST, #-4              @  .. which is 9 bytes back
    160     mov    r0,rPC
    161     bl     dvmJitGetCodeAddr       @ Is there a translation?
    162     str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
    163     cmp    r0,#0
    164     beq    2f
    165     mov    r1,rINST
    166     bl     dvmJitChain              @ r0<- dvmJitChain(codeAddr,chainAddr)
    167     mov    r1, rPC                  @ arg1 of translation may need this
    168     mov    lr, #0                   @ in case target is HANDLER_INTERPRET
    169     cmp    r0,#0                    @ successful chain?
    170     bxne   r0                       @ continue native execution
    171     b      toInterpreter            @ didn't chain - resume with interpreter
    172 
    173 /* No translation, so request one if profiling isn't disabled*/
    174 2:
    175     adrl   rIBASE, dvmAsmInstructionStart
    176     GET_JIT_PROF_TABLE(r0)
    177     FETCH_INST()
    178     cmp    r0, #0
    179     movne  r2,#kJitTSelectRequestHot   @ ask for trace selection
    180     bne    common_selectTrace
    181     GET_INST_OPCODE(ip)
    182     GOTO_OPCODE(ip)
    183 
    184 /*
    185  * Return from the translation cache to the interpreter.
    186  * The return was done with a BLX from thumb mode, and
    187  * the following 32-bit word contains the target rPC value.
    188  * Note that lr (r14) will have its low-order bit set to denote
    189  * its thumb-mode origin.
    190  *
    191  * We'll need to stash our lr origin away, recover the new
    192  * target and then check to see if there is a translation available
    193  * for our new target.  If so, we do a translation chain and
    194  * go back to native execution.  Otherwise, it's back to the
    195  * interpreter (after treating this entry as a potential
    196  * trace start).
    197  */
    198     .global dvmJitToInterpNormal
    199 dvmJitToInterpNormal:
    200     ldr    rPC,[lr, #-1]           @ get our target PC
    201     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
    202     add    rINST,lr,#-5            @ save start of chain branch
    203     add    rINST,#-4               @ .. which is 9 bytes back
    204 #if defined(WITH_JIT_TUNING)
    205     bl     dvmBumpNormal
    206 #endif
    207     mov    r0,rPC
    208     bl     dvmJitGetCodeAddr        @ Is there a translation?
    209     str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
    210     cmp    r0,#0
    211     beq    toInterpreter            @ go if not, otherwise do chain
    212     mov    r1,rINST
    213     bl     dvmJitChain              @ r0<- dvmJitChain(codeAddr,chainAddr)
    214     mov    r1, rPC                  @ arg1 of translation may need this
    215     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
    216     cmp    r0,#0                    @ successful chain?
    217     bxne   r0                       @ continue native execution
    218     b      toInterpreter            @ didn't chain - resume with interpreter
    219 
    220 /*
    221  * Return from the translation cache to the interpreter to do method invocation.
    222  * Check if translation exists for the callee, but don't chain to it.
    223  */
    224     .global dvmJitToInterpNoChainNoProfile
    225 dvmJitToInterpNoChainNoProfile:
    226 #if defined(WITH_JIT_TUNING)
    227     bl     dvmBumpNoChain
    228 #endif
    229     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
    230     mov    r0,rPC
    231     bl     dvmJitGetCodeAddr        @ Is there a translation?
    232     str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
    233     mov    r1, rPC                  @ arg1 of translation may need this
    234     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
    235     cmp    r0,#0
    236     bxne   r0                       @ continue native execution if so
    237     EXPORT_PC()
    238     adrl   rIBASE, dvmAsmInstructionStart
    239     FETCH_INST()
    240     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
    241     GOTO_OPCODE(ip)                     @ jump to next instruction
    242 
    243 /*
    244  * Return from the translation cache to the interpreter to do method invocation.
    245  * Check if translation exists for the callee, but don't chain to it.
    246  */
    247     .global dvmJitToInterpNoChain
    248 dvmJitToInterpNoChain:
    249 #if defined(WITH_JIT_TUNING)
    250     bl     dvmBumpNoChain
    251 #endif
    252     ldr    r10, [rGLUE, #offGlue_self]  @ callee saved r10 <- glue->self
    253     mov    r0,rPC
    254     bl     dvmJitGetCodeAddr        @ Is there a translation?
    255     str    r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
    256     mov    r1, rPC                  @ arg1 of translation may need this
    257     mov    lr, #0                   @  in case target is HANDLER_INTERPRET
    258     cmp    r0,#0
    259     bxne   r0                       @ continue native execution if so
    260 #endif
    261 
    262 /*
    263  * No translation, restore interpreter regs and start interpreting.
    264  * rGLUE & rFP were preserved in the translated code, and rPC has
    265  * already been restored by the time we get here.  We'll need to set
    266  * up rIBASE & rINST, and load the address of the JitTable into r0.
    267  */
    268 toInterpreter:
    269     EXPORT_PC()
    270     adrl   rIBASE, dvmAsmInstructionStart
    271     FETCH_INST()
    272     GET_JIT_PROF_TABLE(r0)
    273     @ NOTE: intended fallthrough
    274 
    275 /*
    276  * Common code to update potential trace start counter, and initiate
    277  * a trace-build if appropriate.  On entry, rPC should point to the
    278  * next instruction to execute, and rINST should be already loaded with
    279  * the next opcode word, and r0 holds a pointer to the jit profile
    280  * table (pJitProfTable).
    281  */
    282 common_testUpdateProfile:
    283     cmp     r0,#0
    284     GET_INST_OPCODE(ip)
    285     GOTO_OPCODE_IFEQ(ip)       @ if not profiling, fallthrough otherwise */
    286 
    287 common_updateProfile:
    288     eor     r3,rPC,rPC,lsr #12 @ cheap, but fast hash function
    289     lsl     r3,r3,#(32 - JIT_PROF_SIZE_LOG_2)          @ shift out excess bits
    290     ldrb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter
    291     GET_INST_OPCODE(ip)
    292     subs    r1,r1,#1           @ decrement counter
    293     strb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it
    294     GOTO_OPCODE_IFNE(ip)       @ if not threshold, fallthrough otherwise */
    295 
    296 /*
    297  * Here, we switch to the debug interpreter to request
    298  * trace selection.  First, though, check to see if there
    299  * is already a native translation in place (and, if so,
    300  * jump to it now).
    301  */
    302     GET_JIT_THRESHOLD(r1)
    303     ldr     r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self
    304     strb    r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
    305     EXPORT_PC()
    306     mov     r0,rPC
    307     bl      dvmJitGetCodeAddr           @ r0<- dvmJitGetCodeAddr(rPC)
    308     str     r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
    309     mov     r1, rPC                     @ arg1 of translation may need this
    310     mov     lr, #0                      @  in case target is HANDLER_INTERPRET
    311     cmp     r0,#0
    312 #if !defined(WITH_SELF_VERIFICATION)
    313     bxne    r0                          @ jump to the translation
    314     mov     r2,#kJitTSelectRequest      @ ask for trace selection
    315     @ fall-through to common_selectTrace
    316 #else
    317     moveq   r2,#kJitTSelectRequest      @ ask for trace selection
    318     beq     common_selectTrace
    319     /*
    320      * At this point, we have a target translation.  However, if
    321      * that translation is actually the interpret-only pseudo-translation
    322      * we want to treat it the same as no translation.
    323      */
    324     mov     r10, r0                     @ save target
    325     bl      dvmCompilerGetInterpretTemplate
    326     cmp     r0, r10                     @ special case?
    327     bne     jitSVShadowRunStart         @ set up self verification shadow space
    328     @ Need to clear the inJitCodeCache flag
    329     ldr    r10, [rGLUE, #offGlue_self]  @ r10 <- glue->self
    330     mov    r3, #0                       @ 0 means not in the JIT code cache
    331     str    r3, [r10, #offThread_inJitCodeCache] @ back to the interp land
    332     GET_INST_OPCODE(ip)
    333     GOTO_OPCODE(ip)
    334     /* no return */
    335 #endif
    336 
    337 /*
    338  * On entry:
    339  *  r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot
    340  */
    341 common_selectTrace:
    342     str     r2,[rGLUE,#offGlue_jitState]
    343     mov     r2,#kInterpEntryInstr       @ normal entry reason
    344     str     r2,[rGLUE,#offGlue_entryPoint]
    345     mov     r1,#1                       @ set changeInterp
    346     b       common_gotoBail
    347 
    348 #if defined(WITH_SELF_VERIFICATION)
    349 /*
    350  * Save PC and registers to shadow memory for self verification mode
    351  * before jumping to native translation.
    352  * On entry:
    353  *    rPC, rFP, rGLUE: the values that they should contain
    354  *    r10: the address of the target translation.
    355  */
    356 jitSVShadowRunStart:
    357     mov     r0,rPC                      @ r0<- program counter
    358     mov     r1,rFP                      @ r1<- frame pointer
    359     mov     r2,rGLUE                    @ r2<- InterpState pointer
    360     mov     r3,r10                      @ r3<- target translation
    361     bl      dvmSelfVerificationSaveState @ save registers to shadow space
    362     ldr     rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
    363     add     rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space
    364     bx      r10                         @ jump to the translation
    365 
    366 /*
    367  * Restore PC, registers, and interpState to original values
    368  * before jumping back to the interpreter.
    369  */
    370 jitSVShadowRunEnd:
    371     mov    r1,rFP                        @ pass ending fp
    372     bl     dvmSelfVerificationRestoreState @ restore pc and fp values
    373     ldr    rPC,[r0,#offShadowSpace_startPC] @ restore PC
    374     ldr    rFP,[r0,#offShadowSpace_fp]   @ restore FP
    375     ldr    rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState
    376     ldr    r1,[r0,#offShadowSpace_svState] @ get self verification state
    377     cmp    r1,#0                         @ check for punt condition
    378     beq    1f
    379     mov    r2,#kJitSelfVerification      @ ask for self verification
    380     str    r2,[rGLUE,#offGlue_jitState]
    381     mov    r2,#kInterpEntryInstr         @ normal entry reason
    382     str    r2,[rGLUE,#offGlue_entryPoint]
    383     mov    r1,#1                         @ set changeInterp
    384     b      common_gotoBail
    385 
    386 1:                                       @ exit to interpreter without check
    387     EXPORT_PC()
    388     adrl   rIBASE, dvmAsmInstructionStart
    389     FETCH_INST()
    390     GET_INST_OPCODE(ip)
    391     GOTO_OPCODE(ip)
    392 #endif
    393 
    394 #endif
    395 
    396 /*
    397  * Common code when a backward branch is taken.
    398  *
    399  * TODO: we could avoid a branch by just setting r0 and falling through
    400  * into the common_periodicChecks code, and having a test on r0 at the
    401  * end determine if we should return to the caller or update & branch to
    402  * the next instr.
    403  *
    404  * On entry:
    405  *  r9 is PC adjustment *in bytes*
    406  */
    407 common_backwardBranch:
    408     mov     r0, #kInterpEntryInstr
    409     bl      common_periodicChecks
    410 #if defined(WITH_JIT)
    411     GET_JIT_PROF_TABLE(r0)
    412     FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
    413     cmp     r0,#0
    414     bne     common_updateProfile
    415     GET_INST_OPCODE(ip)
    416     GOTO_OPCODE(ip)
    417 #else
    418     FETCH_ADVANCE_INST_RB(r9)           @ update rPC, load rINST
    419     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
    420     GOTO_OPCODE(ip)                     @ jump to next instruction
    421 #endif
    422 
    423 
    424 /*
    425  * Need to see if the thread needs to be suspended or debugger/profiler
    426  * activity has begun.  If so, we suspend the thread or side-exit to
    427  * the debug interpreter as appropriate.
    428  *
    429  * The common case is no activity on any of these, so we want to figure
    430  * that out quickly.  If something is up, we can then sort out what.
    431  *
    432  * We want to be fast if the VM was built without debugger or profiler
    433  * support, but we also need to recognize that the system is usually
    434  * shipped with both of these enabled.
    435  *
    436  * TODO: reduce this so we're just checking a single location.
    437  *
    438  * On entry:
    439  *  r0 is reentry type, e.g. kInterpEntryInstr (for debugger/profiling)
    440  *  r9 is trampoline PC adjustment *in bytes*
    441  */
    442 common_periodicChecks:
    443     ldr     r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount
    444 
    445     ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
    446     ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
    447 
    448     ldr     ip, [r3]                    @ ip<- suspendCount (int)
    449 
    450     cmp     r1, #0                      @ debugger enabled?
    451     ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
    452     ldr     r2, [r2]                    @ r2<- activeProfilers (int)
    453     orrne   ip, ip, r1                  @ ip<- suspendCount | debuggerActive
    454     orrs    ip, ip, r2                  @ ip<- suspend|debugger|profiler; set Z
    455 
    456     bxeq    lr                          @ all zero, return
    457 
    458     /*
    459      * One or more interesting events have happened.  Figure out what.
    460      *
    461      * If debugging or profiling are compiled in, we need to disambiguate.
    462      *
    463      * r0 still holds the reentry type.
    464      */
    465     ldr     ip, [r3]                    @ ip<- suspendCount (int)
    466     cmp     ip, #0                      @ want suspend?
    467     beq     1f                          @ no, must be debugger/profiler
    468 
    469     stmfd   sp!, {r0, lr}               @ preserve r0 and lr
    470 #if defined(WITH_JIT)
    471     /*
    472      * Refresh the Jit's cached copy of profile table pointer.  This pointer
    473      * doubles as the Jit's on/off switch.
    474      */
    475     ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable
    476     ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
    477     ldr     r3, [r3] @ r3 <- pJitProfTable
    478     EXPORT_PC()                         @ need for precise GC
    479     str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch
    480 #else
    481     ldr     r0, [rGLUE, #offGlue_self]  @ r0<- glue->self
    482     EXPORT_PC()                         @ need for precise GC
    483 #endif
    484     bl      dvmCheckSuspendPending      @ do full check, suspend if necessary
    485     ldmfd   sp!, {r0, lr}               @ restore r0 and lr
    486 
    487     /*
    488      * Reload the debugger/profiler enable flags.  We're checking to see
    489      * if either of these got set while we were suspended.
    490      *
    491      * We can't really avoid the #ifdefs here, because the fields don't
    492      * exist when the feature is disabled.
    493      */
    494     ldr     r1, [rGLUE, #offGlue_pDebuggerActive]   @ r1<- &debuggerActive
    495     cmp     r1, #0                      @ debugger enabled?
    496     ldrneb  r1, [r1]                    @ yes, r1<- debuggerActive (boolean)
    497     ldr     r2, [rGLUE, #offGlue_pActiveProfilers]  @ r2<- &activeProfilers
    498     ldr     r2, [r2]                    @ r2<- activeProfilers (int)
    499 
    500     orrs    r1, r1, r2
    501     beq     2f
    502 
    503 1:  @ debugger/profiler enabled, bail out; glue->entryPoint was set above
    504     str     r0, [rGLUE, #offGlue_entryPoint]    @ store r0, need for debug/prof
    505     add     rPC, rPC, r9                @ update rPC
    506     mov     r1, #1                      @ "want switch" = true
    507     b       common_gotoBail             @ side exit
    508 
    509 2:
    510     bx      lr                          @ nothing to do, return
    511 
    512 
    513 /*
    514  * The equivalent of "goto bail", this calls through the "bail handler".
    515  *
    516  * State registers will be saved to the "glue" area before bailing.
    517  *
    518  * On entry:
    519  *  r1 is "bool changeInterp", indicating if we want to switch to the
    520  *     other interpreter or just bail all the way out
    521  */
    522 common_gotoBail:
    523     SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
    524     mov     r0, rGLUE                   @ r0<- glue ptr
    525     b       dvmMterpStdBail             @ call(glue, changeInterp)
    526 
    527     @add     r1, r1, #1                  @ using (boolean+1)
    528     @add     r0, rGLUE, #offGlue_jmpBuf  @ r0<- &glue->jmpBuf
    529     @bl      _longjmp                    @ does not return
    530     @bl      common_abort
    531 
    532 
    533 /*
    534  * Common code for method invocation with range.
    535  *
    536  * On entry:
    537  *  r0 is "Method* methodToCall", the method we're trying to call
    538  */
    539 common_invokeMethodRange:
    540 .LinvokeNewRange:
    541     @ prepare to copy args to "outs" area of current frame
    542     movs    r2, rINST, lsr #8           @ r2<- AA (arg count) -- test for zero
    543     SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
    544     beq     .LinvokeArgsDone            @ if no args, skip the rest
    545     FETCH(r1, 2)                        @ r1<- CCCC
    546 
    547     @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
    548     @ (very few methods have > 10 args; could unroll for common cases)
    549     add     r3, rFP, r1, lsl #2         @ r3<- &fp[CCCC]
    550     sub     r10, r10, r2, lsl #2        @ r10<- "outs" area, for call args
    551     ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
    552 1:  ldr     r1, [r3], #4                @ val = *fp++
    553     subs    r2, r2, #1                  @ count--
    554     str     r1, [r10], #4               @ *outs++ = val
    555     bne     1b                          @ ...while count != 0
    556     ldrh    r3, [r0, #offMethod_outsSize]   @ r3<- methodToCall->outsSize
    557     b       .LinvokeArgsDone
    558 
    559 /*
    560  * Common code for method invocation without range.
    561  *
    562  * On entry:
    563  *  r0 is "Method* methodToCall", the method we're trying to call
    564  */
    565 common_invokeMethodNoRange:
    566 .LinvokeNewNoRange:
    567     @ prepare to copy args to "outs" area of current frame
    568     movs    r2, rINST, lsr #12          @ r2<- B (arg count) -- test for zero
    569     SAVEAREA_FROM_FP(r10, rFP)          @ r10<- stack save area
    570     FETCH(r1, 2)                        @ r1<- GFED (load here to hide latency)
    571     ldrh    r9, [r0, #offMethod_registersSize]  @ r9<- methodToCall->regsSize
    572     ldrh    r3, [r0, #offMethod_outsSize]  @ r3<- methodToCall->outsSize
    573     beq     .LinvokeArgsDone
    574 
    575     @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs
    576 .LinvokeNonRange:
    577     rsb     r2, r2, #5                  @ r2<- 5-r2
    578     add     pc, pc, r2, lsl #4          @ computed goto, 4 instrs each
    579     bl      common_abort                @ (skipped due to ARM prefetch)
    580 5:  and     ip, rINST, #0x0f00          @ isolate A
    581     ldr     r2, [rFP, ip, lsr #6]       @ r2<- vA (shift right 8, left 2)
    582     mov     r0, r0                      @ nop
    583     str     r2, [r10, #-4]!             @ *--outs = vA
    584 4:  and     ip, r1, #0xf000             @ isolate G
    585     ldr     r2, [rFP, ip, lsr #10]      @ r2<- vG (shift right 12, left 2)
    586     mov     r0, r0                      @ nop
    587     str     r2, [r10, #-4]!             @ *--outs = vG
    588 3:  and     ip, r1, #0x0f00             @ isolate F
    589     ldr     r2, [rFP, ip, lsr #6]       @ r2<- vF
    590     mov     r0, r0                      @ nop
    591     str     r2, [r10, #-4]!             @ *--outs = vF
    592 2:  and     ip, r1, #0x00f0             @ isolate E
    593     ldr     r2, [rFP, ip, lsr #2]       @ r2<- vE
    594     mov     r0, r0                      @ nop
    595     str     r2, [r10, #-4]!             @ *--outs = vE
    596 1:  and     ip, r1, #0x000f             @ isolate D
    597     ldr     r2, [rFP, ip, lsl #2]       @ r2<- vD
    598     mov     r0, r0                      @ nop
    599     str     r2, [r10, #-4]!             @ *--outs = vD
    600 0:  @ fall through to .LinvokeArgsDone
    601 
    602 .LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize
    603     ldr     r2, [r0, #offMethod_insns]  @ r2<- method->insns
    604     ldr     rINST, [r0, #offMethod_clazz]  @ rINST<- method->clazz
    605     @ find space for the new stack frame, check for overflow
    606     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- stack save area
    607     sub     r1, r1, r9, lsl #2          @ r1<- newFp (old savearea - regsSize)
    608     SAVEAREA_FROM_FP(r10, r1)           @ r10<- newSaveArea
    609 @    bl      common_dumpRegs
    610     ldr     r9, [rGLUE, #offGlue_interpStackEnd]    @ r9<- interpStackEnd
    611     sub     r3, r10, r3, lsl #2         @ r3<- bottom (newsave - outsSize)
    612     cmp     r3, r9                      @ bottom < interpStackEnd?
    613     ldr     r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
    614     blo     .LstackOverflow             @ yes, this frame will overflow stack
    615 
    616     @ set up newSaveArea
    617 #ifdef EASY_GDB
    618     SAVEAREA_FROM_FP(ip, rFP)           @ ip<- stack save area
    619     str     ip, [r10, #offStackSaveArea_prevSave]
    620 #endif
    621     str     rFP, [r10, #offStackSaveArea_prevFrame]
    622     str     rPC, [r10, #offStackSaveArea_savedPc]
    623 #if defined(WITH_JIT)
    624     mov     r9, #0
    625     str     r9, [r10, #offStackSaveArea_returnAddr]
    626 #endif
    627     str     r0, [r10, #offStackSaveArea_method]
    628     tst     r3, #ACC_NATIVE
    629     bne     .LinvokeNative
    630 
    631     /*
    632     stmfd   sp!, {r0-r3}
    633     bl      common_printNewline
    634     mov     r0, rFP
    635     mov     r1, #0
    636     bl      dvmDumpFp
    637     ldmfd   sp!, {r0-r3}
    638     stmfd   sp!, {r0-r3}
    639     mov     r0, r1
    640     mov     r1, r10
    641     bl      dvmDumpFp
    642     bl      common_printNewline
    643     ldmfd   sp!, {r0-r3}
    644     */
    645 
    646     ldrh    r9, [r2]                        @ r9 <- load INST from new PC
    647     ldr     r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
    648     mov     rPC, r2                         @ publish new rPC
    649     ldr     r2, [rGLUE, #offGlue_self]      @ r2<- glue->self
    650 
    651     @ Update "glue" values for the new method
    652     @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST
    653     str     r0, [rGLUE, #offGlue_method]    @ glue->method = methodToCall
    654     str     r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ...
    655 #if defined(WITH_JIT)
    656     GET_JIT_PROF_TABLE(r0)
    657     mov     rFP, r1                         @ fp = newFp
    658     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
    659     mov     rINST, r9                       @ publish new rINST
    660     str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
    661     cmp     r0,#0
    662     bne     common_updateProfile
    663     GOTO_OPCODE(ip)                         @ jump to next instruction
    664 #else
    665     mov     rFP, r1                         @ fp = newFp
    666     GET_PREFETCHED_OPCODE(ip, r9)           @ extract prefetched opcode from r9
    667     mov     rINST, r9                       @ publish new rINST
    668     str     r1, [r2, #offThread_curFrame]   @ self->curFrame = newFp
    669     GOTO_OPCODE(ip)                         @ jump to next instruction
    670 #endif
    671 
    672 .LinvokeNative:
    673     @ Prep for the native call
    674     @ r0=methodToCall, r1=newFp, r10=newSaveArea
    675     ldr     r3, [rGLUE, #offGlue_self]      @ r3<- glue->self
    676     ldr     r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
    677     str     r1, [r3, #offThread_curFrame]   @ self->curFrame = newFp
    678     str     r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
    679     mov     r9, r3                      @ r9<- glue->self (preserve)
    680 
    681     mov     r2, r0                      @ r2<- methodToCall
    682     mov     r0, r1                      @ r0<- newFp (points to args)
    683     add     r1, rGLUE, #offGlue_retval  @ r1<- &retval
    684 
    685 #ifdef ASSIST_DEBUGGER
    686     /* insert fake function header to help gdb find the stack frame */
    687     b       .Lskip
    688     .type   dalvik_mterp, %function
    689 dalvik_mterp:
    690     .fnstart
    691     MTERP_ENTRY1
    692     MTERP_ENTRY2
    693 .Lskip:
    694 #endif
    695 
    696     @mov     lr, pc                      @ set return addr
    697     @ldr     pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
    698     LDR_PC_LR "[r2, #offMethod_nativeFunc]"
    699 
    700 #if defined(WITH_JIT)
    701     ldr     r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status
    702 #endif
    703 
    704     @ native return; r9=self, r10=newSaveArea
    705     @ equivalent to dvmPopJniLocals
    706     ldr     r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
    707     ldr     r1, [r9, #offThread_exception] @ check for exception
    708 #if defined(WITH_JIT)
    709     ldr     r3, [r3]                    @ r3 <- gDvmJit.pProfTable
    710 #endif
    711     str     rFP, [r9, #offThread_curFrame]  @ self->curFrame = fp
    712     cmp     r1, #0                      @ null?
    713     str     r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
    714 #if defined(WITH_JIT)
    715     str     r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch
    716 #endif
    717     bne     common_exceptionThrown      @ no, handle exception
    718 
    719     FETCH_ADVANCE_INST(3)               @ advance rPC, load rINST
    720     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
    721     GOTO_OPCODE(ip)                     @ jump to next instruction
    722 
    723 .LstackOverflow:    @ r0=methodToCall
    724     mov     r1, r0                      @ r1<- methodToCall
    725     ldr     r0, [rGLUE, #offGlue_self]  @ r0<- self
    726     bl      dvmHandleStackOverflow
    727     b       common_exceptionThrown
    728 #ifdef ASSIST_DEBUGGER
    729     .fnend
    730 #endif
    731 
    732 
    733     /*
    734      * Common code for method invocation, calling through "glue code".
    735      *
    736      * TODO: now that we have range and non-range invoke handlers, this
    737      *       needs to be split into two.  Maybe just create entry points
    738      *       that set r9 and jump here?
    739      *
    740      * On entry:
    741      *  r0 is "Method* methodToCall", the method we're trying to call
    742      *  r9 is "bool methodCallRange", indicating if this is a /range variant
    743      */
    744      .if    0
    745 .LinvokeOld:
    746     sub     sp, sp, #8                  @ space for args + pad
    747     FETCH(ip, 2)                        @ ip<- FEDC or CCCC
    748     mov     r2, r0                      @ A2<- methodToCall
    749     mov     r0, rGLUE                   @ A0<- glue
    750     SAVE_PC_FP_TO_GLUE()                @ export state to "glue"
    751     mov     r1, r9                      @ A1<- methodCallRange
    752     mov     r3, rINST, lsr #8           @ A3<- AA
    753     str     ip, [sp, #0]                @ A4<- ip
    754     bl      dvmMterp_invokeMethod       @ call the C invokeMethod
    755     add     sp, sp, #8                  @ remove arg area
    756     b       common_resumeAfterGlueCall  @ continue to next instruction
    757     .endif
    758 
    759 
    760 
    761 /*
    762  * Common code for handling a return instruction.
    763  *
    764  * This does not return.
    765  */
    766 common_returnFromMethod:
    767 .LreturnNew:
    768     mov     r0, #kInterpEntryReturn
    769     mov     r9, #0
    770     bl      common_periodicChecks
    771 
    772     SAVEAREA_FROM_FP(r0, rFP)           @ r0<- saveArea (old)
    773     ldr     rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
    774     ldr     r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
    775     ldr     r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
    776                                         @ r2<- method we're returning to
    777     ldr     r3, [rGLUE, #offGlue_self]  @ r3<- glue->self
    778     cmp     r2, #0                      @ is this a break frame?
    779     ldrne   r10, [r2, #offMethod_clazz] @ r10<- method->clazz
    780     mov     r1, #0                      @ "want switch" = false
    781     beq     common_gotoBail             @ break frame, bail out completely
    782 
    783     PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
    784     str     r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method
    785     ldr     r1, [r10, #offClassObject_pDvmDex]   @ r1<- method->clazz->pDvmDex
    786     str     rFP, [r3, #offThread_curFrame]  @ self->curFrame = fp
    787 #if defined(WITH_JIT)
    788     ldr     r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
    789     mov     rPC, r9                     @ publish new rPC
    790     str     r1, [rGLUE, #offGlue_methodClassDex]
    791     str     r10, [r3, #offThread_inJitCodeCache]  @ may return to JIT'ed land
    792     cmp     r10, #0                      @ caller is compiled code
    793     blxne   r10
    794     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
    795     GOTO_OPCODE(ip)                     @ jump to next instruction
    796 #else
    797     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
    798     mov     rPC, r9                     @ publish new rPC
    799     str     r1, [rGLUE, #offGlue_methodClassDex]
    800     GOTO_OPCODE(ip)                     @ jump to next instruction
    801 #endif
    802 
    803     /*
    804      * Return handling, calls through "glue code".
    805      */
    806      .if    0
    807 .LreturnOld:
    808     SAVE_PC_FP_TO_GLUE()                @ export state
    809     mov     r0, rGLUE                   @ arg to function
    810     bl      dvmMterp_returnFromMethod
    811     b       common_resumeAfterGlueCall
    812     .endif
    813 
    814 
    815 /*
    816  * Somebody has thrown an exception.  Handle it.
    817  *
    818  * If the exception processing code returns to us (instead of falling
    819  * out of the interpreter), continue with whatever the next instruction
    820  * now happens to be.
    821  *
    822  * This does not return.
    823  */
    824      .global dvmMterpCommonExceptionThrown
    825 dvmMterpCommonExceptionThrown:
    826 common_exceptionThrown:
    827 .LexceptionNew:
    828     mov     r0, #kInterpEntryThrow
    829     mov     r9, #0
    830     bl      common_periodicChecks
    831 
    832     ldr     r10, [rGLUE, #offGlue_self] @ r10<- glue->self
    833     ldr     r9, [r10, #offThread_exception] @ r9<- self->exception
    834     mov     r1, r10                     @ r1<- self
    835     mov     r0, r9                      @ r0<- exception
    836     bl      dvmAddTrackedAlloc          @ don't let the exception be GCed
    837     mov     r3, #0                      @ r3<- NULL
    838     str     r3, [r10, #offThread_exception] @ self->exception = NULL
    839 
    840     /* set up args and a local for "&fp" */
    841     /* (str sp, [sp, #-4]!  would be perfect here, but is discouraged) */
    842     str     rFP, [sp, #-4]!             @ *--sp = fp
    843     mov     ip, sp                      @ ip<- &fp
    844     mov     r3, #0                      @ r3<- false
    845     str     ip, [sp, #-4]!              @ *--sp = &fp
    846     ldr     r1, [rGLUE, #offGlue_method] @ r1<- glue->method
    847     mov     r0, r10                     @ r0<- self
    848     ldr     r1, [r1, #offMethod_insns]  @ r1<- method->insns
    849     mov     r2, r9                      @ r2<- exception
    850     sub     r1, rPC, r1                 @ r1<- pc - method->insns
    851     mov     r1, r1, asr #1              @ r1<- offset in code units
    852 
    853     /* call, r0 gets catchRelPc (a code-unit offset) */
    854     bl      dvmFindCatchBlock           @ call(self, relPc, exc, scan?, &fp)
    855 
    856     /* fix earlier stack overflow if necessary; may trash rFP */
    857     ldrb    r1, [r10, #offThread_stackOverflowed]
    858     cmp     r1, #0                      @ did we overflow earlier?
    859     beq     1f                          @ no, skip ahead
    860     mov     rFP, r0                     @ save relPc result in rFP
    861     mov     r0, r10                     @ r0<- self
    862     mov     r1, r9                      @ r1<- exception
    863     bl      dvmCleanupStackOverflow     @ call(self)
    864     mov     r0, rFP                     @ restore result
    865 1:
    866 
    867     /* update frame pointer and check result from dvmFindCatchBlock */
    868     ldr     rFP, [sp, #4]               @ retrieve the updated rFP
    869     cmp     r0, #0                      @ is catchRelPc < 0?
    870     add     sp, sp, #8                  @ restore stack
    871     bmi     .LnotCaughtLocally
    872 
    873     /* adjust locals to match self->curFrame and updated PC */
    874     SAVEAREA_FROM_FP(r1, rFP)           @ r1<- new save area
    875     ldr     r1, [r1, #offStackSaveArea_method] @ r1<- new method
    876     str     r1, [rGLUE, #offGlue_method]    @ glue->method = new method
    877     ldr     r2, [r1, #offMethod_clazz]      @ r2<- method->clazz
    878     ldr     r3, [r1, #offMethod_insns]      @ r3<- method->insns
    879     ldr     r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
    880     add     rPC, r3, r0, asl #1             @ rPC<- method->insns + catchRelPc
    881     str     r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth...
    882 
    883     /* release the tracked alloc on the exception */
    884     mov     r0, r9                      @ r0<- exception
    885     mov     r1, r10                     @ r1<- self
    886     bl      dvmReleaseTrackedAlloc      @ release the exception
    887 
    888     /* restore the exception if the handler wants it */
    889     FETCH_INST()                        @ load rINST from rPC
    890     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
    891     cmp     ip, #OP_MOVE_EXCEPTION      @ is it "move-exception"?
    892     streq   r9, [r10, #offThread_exception] @ yes, restore the exception
    893     GOTO_OPCODE(ip)                     @ jump to next instruction
    894 
    895 .LnotCaughtLocally: @ r9=exception, r10=self
    896     /* fix stack overflow if necessary */
    897     ldrb    r1, [r10, #offThread_stackOverflowed]
    898     cmp     r1, #0                      @ did we overflow earlier?
    899     movne   r0, r10                     @ if yes: r0<- self
    900     movne   r1, r9                      @ if yes: r1<- exception
    901     blne    dvmCleanupStackOverflow     @ if yes: call(self)
    902 
    903     @ may want to show "not caught locally" debug messages here
    904 #if DVM_SHOW_EXCEPTION >= 2
    905     /* call __android_log_print(prio, tag, format, ...) */
    906     /* "Exception %s from %s:%d not caught locally" */
    907     @ dvmLineNumFromPC(method, pc - method->insns)
    908     ldr     r0, [rGLUE, #offGlue_method]
    909     ldr     r1, [r0, #offMethod_insns]
    910     sub     r1, rPC, r1
    911     asr     r1, r1, #1
    912     bl      dvmLineNumFromPC
    913     str     r0, [sp, #-4]!
    914     @ dvmGetMethodSourceFile(method)
    915     ldr     r0, [rGLUE, #offGlue_method]
    916     bl      dvmGetMethodSourceFile
    917     str     r0, [sp, #-4]!
    918     @ exception->clazz->descriptor
    919     ldr     r3, [r9, #offObject_clazz]
    920     ldr     r3, [r3, #offClassObject_descriptor]
    921     @
    922     ldr     r2, strExceptionNotCaughtLocally
    923     ldr     r1, strLogTag
    924     mov     r0, #3                      @ LOG_DEBUG
    925     bl      __android_log_print
    926 #endif
    927     str     r9, [r10, #offThread_exception] @ restore exception
    928     mov     r0, r9                      @ r0<- exception
    929     mov     r1, r10                     @ r1<- self
    930     bl      dvmReleaseTrackedAlloc      @ release the exception
    931     mov     r1, #0                      @ "want switch" = false
    932     b       common_gotoBail             @ bail out
    933 
    934 
    935     /*
    936      * Exception handling, calls through "glue code".
    937      */
    938     .if     0
    939 .LexceptionOld:
    940     SAVE_PC_FP_TO_GLUE()                @ export state
    941     mov     r0, rGLUE                   @ arg to function
    942     bl      dvmMterp_exceptionThrown
    943     b       common_resumeAfterGlueCall
    944     .endif
    945 
    946 
    947 /*
    948  * After returning from a "glued" function, pull out the updated
    949  * values and start executing at the next instruction.
    950  */
    951 common_resumeAfterGlueCall:
    952     LOAD_PC_FP_FROM_GLUE()              @ pull rPC and rFP out of glue
    953     FETCH_INST()                        @ load rINST from rPC
    954     GET_INST_OPCODE(ip)                 @ extract opcode from rINST
    955     GOTO_OPCODE(ip)                     @ jump to next instruction
    956 
    957 /*
    958  * Invalid array index.
    959  */
    960 common_errArrayIndex:
    961     EXPORT_PC()
    962     ldr     r0, strArrayIndexException
    963     mov     r1, #0
    964     bl      dvmThrowException
    965     b       common_exceptionThrown
    966 
    967 /*
    968  * Invalid array value.
    969  */
    970 common_errArrayStore:
    971     EXPORT_PC()
    972     ldr     r0, strArrayStoreException
    973     mov     r1, #0
    974     bl      dvmThrowException
    975     b       common_exceptionThrown
    976 
    977 /*
    978  * Integer divide or mod by zero.
    979  */
    980 common_errDivideByZero:
    981     EXPORT_PC()
    982     ldr     r0, strArithmeticException
    983     ldr     r1, strDivideByZero
    984     bl      dvmThrowException
    985     b       common_exceptionThrown
    986 
    987 /*
    988  * Attempt to allocate an array with a negative size.
    989  */
    990 common_errNegativeArraySize:
    991     EXPORT_PC()
    992     ldr     r0, strNegativeArraySizeException
    993     mov     r1, #0
    994     bl      dvmThrowException
    995     b       common_exceptionThrown
    996 
    997 /*
    998  * Invocation of a non-existent method.
    999  */
   1000 common_errNoSuchMethod:
   1001     EXPORT_PC()
   1002     ldr     r0, strNoSuchMethodError
   1003     mov     r1, #0
   1004     bl      dvmThrowException
   1005     b       common_exceptionThrown
   1006 
   1007 /*
   1008  * We encountered a null object when we weren't expecting one.  We
   1009  * export the PC, throw a NullPointerException, and goto the exception
   1010  * processing code.
   1011  */
   1012 common_errNullObject:
   1013     EXPORT_PC()
   1014     ldr     r0, strNullPointerException
   1015     mov     r1, #0
   1016     bl      dvmThrowException
   1017     b       common_exceptionThrown
   1018 
   1019 /*
   1020  * For debugging, cause an immediate fault.  The source address will
   1021  * be in lr (use a bl instruction to jump here).
   1022  */
   1023 common_abort:
   1024     ldr     pc, .LdeadFood
   1025 .LdeadFood:
   1026     .word   0xdeadf00d
   1027 
   1028 /*
   1029  * Spit out a "we were here", preserving all registers.  (The attempt
   1030  * to save ip won't work, but we need to save an even number of
   1031  * registers for EABI 64-bit stack alignment.)
   1032  */
   1033     .macro  SQUEAK num
   1034 common_squeak\num:
   1035     stmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1036     ldr     r0, strSqueak
   1037     mov     r1, #\num
   1038     bl      printf
   1039     ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1040     bx      lr
   1041     .endm
   1042 
   1043     SQUEAK  0
   1044     SQUEAK  1
   1045     SQUEAK  2
   1046     SQUEAK  3
   1047     SQUEAK  4
   1048     SQUEAK  5
   1049 
   1050 /*
   1051  * Spit out the number in r0, preserving registers.
   1052  */
   1053 common_printNum:
   1054     stmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1055     mov     r1, r0
   1056     ldr     r0, strSqueak
   1057     bl      printf
   1058     ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1059     bx      lr
   1060 
   1061 /*
   1062  * Print a newline, preserving registers.
   1063  */
   1064 common_printNewline:
   1065     stmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1066     ldr     r0, strNewline
   1067     bl      printf
   1068     ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1069     bx      lr
   1070 
   1071     /*
   1072      * Print the 32-bit quantity in r0 as a hex value, preserving registers.
   1073      */
   1074 common_printHex:
   1075     stmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1076     mov     r1, r0
   1077     ldr     r0, strPrintHex
   1078     bl      printf
   1079     ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1080     bx      lr
   1081 
   1082 /*
   1083  * Print the 64-bit quantity in r0-r1, preserving registers.
   1084  */
   1085 common_printLong:
   1086     stmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1087     mov     r3, r1
   1088     mov     r2, r0
   1089     ldr     r0, strPrintLong
   1090     bl      printf
   1091     ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1092     bx      lr
   1093 
   1094 /*
   1095  * Print full method info.  Pass the Method* in r0.  Preserves regs.
   1096  */
   1097 common_printMethod:
   1098     stmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1099     bl      dvmMterpPrintMethod
   1100     ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1101     bx      lr
   1102 
   1103 /*
   1104  * Call a C helper function that dumps regs and possibly some
   1105  * additional info.  Requires the C function to be compiled in.
   1106  */
   1107     .if     0
   1108 common_dumpRegs:
   1109     stmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1110     bl      dvmMterpDumpArmRegs
   1111     ldmfd   sp!, {r0, r1, r2, r3, ip, lr}
   1112     bx      lr
   1113     .endif
   1114 
   1115 #if 0
   1116 /*
   1117  * Experiment on VFP mode.
   1118  *
   1119  * uint32_t setFPSCR(uint32_t val, uint32_t mask)
   1120  *
   1121  * Updates the bits specified by "mask", setting them to the values in "val".
   1122  */
   1123 setFPSCR:
   1124     and     r0, r0, r1                  @ make sure no stray bits are set
   1125     fmrx    r2, fpscr                   @ get VFP reg
   1126     mvn     r1, r1                      @ bit-invert mask
   1127     and     r2, r2, r1                  @ clear masked bits
   1128     orr     r2, r2, r0                  @ set specified bits
   1129     fmxr    fpscr, r2                   @ set VFP reg
   1130     mov     r0, r2                      @ return new value
   1131     bx      lr
   1132 
   1133     .align  2
   1134     .global dvmConfigureFP
   1135     .type   dvmConfigureFP, %function
   1136 dvmConfigureFP:
   1137     stmfd   sp!, {ip, lr}
   1138     /* 0x03000000 sets DN/FZ */
   1139     /* 0x00009f00 clears the six exception enable flags */
   1140     bl      common_squeak0
   1141     mov     r0, #0x03000000             @ r0<- 0x03000000
   1142     add     r1, r0, #0x9f00             @ r1<- 0x03009f00
   1143     bl      setFPSCR
   1144     ldmfd   sp!, {ip, pc}
   1145 #endif
   1146 
   1147 
   1148 /*
   1149  * String references, must be close to the code that uses them.
   1150  */
   1151     .align  2
   1152 strArithmeticException:
   1153     .word   .LstrArithmeticException
   1154 strArrayIndexException:
   1155     .word   .LstrArrayIndexException
   1156 strArrayStoreException:
   1157     .word   .LstrArrayStoreException
   1158 strDivideByZero:
   1159     .word   .LstrDivideByZero
   1160 strNegativeArraySizeException:
   1161     .word   .LstrNegativeArraySizeException
   1162 strNoSuchMethodError:
   1163     .word   .LstrNoSuchMethodError
   1164 strNullPointerException:
   1165     .word   .LstrNullPointerException
   1166 
   1167 strLogTag:
   1168     .word   .LstrLogTag
   1169 strExceptionNotCaughtLocally:
   1170     .word   .LstrExceptionNotCaughtLocally
   1171 
   1172 strNewline:
   1173     .word   .LstrNewline
   1174 strSqueak:
   1175     .word   .LstrSqueak
   1176 strPrintHex:
   1177     .word   .LstrPrintHex
   1178 strPrintLong:
   1179     .word   .LstrPrintLong
   1180 
   1181 /*
   1182  * Zero-terminated ASCII string data.
   1183  *
   1184  * On ARM we have two choices: do like gcc does, and LDR from a .word
   1185  * with the address, or use an ADR pseudo-op to get the address
   1186  * directly.  ADR saves 4 bytes and an indirection, but it's using a
   1187  * PC-relative addressing mode and hence has a limited range, which
   1188  * makes it not work well with mergeable string sections.
   1189  */
   1190     .section .rodata.str1.4,"aMS",%progbits,1
   1191 
   1192 .LstrBadEntryPoint:
   1193     .asciz  "Bad entry point %d\n"
   1194 .LstrArithmeticException:
   1195     .asciz  "Ljava/lang/ArithmeticException;"
   1196 .LstrArrayIndexException:
   1197     .asciz  "Ljava/lang/ArrayIndexOutOfBoundsException;"
   1198 .LstrArrayStoreException:
   1199     .asciz  "Ljava/lang/ArrayStoreException;"
   1200 .LstrClassCastException:
   1201     .asciz  "Ljava/lang/ClassCastException;"
   1202 .LstrDivideByZero:
   1203     .asciz  "divide by zero"
   1204 .LstrFilledNewArrayNotImpl:
   1205     .asciz  "filled-new-array only implemented for objects and 'int'"
   1206 .LstrInternalError:
   1207     .asciz  "Ljava/lang/InternalError;"
   1208 .LstrInstantiationError:
   1209     .asciz  "Ljava/lang/InstantiationError;"
   1210 .LstrNegativeArraySizeException:
   1211     .asciz  "Ljava/lang/NegativeArraySizeException;"
   1212 .LstrNoSuchMethodError:
   1213     .asciz  "Ljava/lang/NoSuchMethodError;"
   1214 .LstrNullPointerException:
   1215     .asciz  "Ljava/lang/NullPointerException;"
   1216 
   1217 .LstrLogTag:
   1218     .asciz  "mterp"
   1219 .LstrExceptionNotCaughtLocally:
   1220     .asciz  "Exception %s from %s:%d not caught locally\n"
   1221 
   1222 .LstrNewline:
   1223     .asciz  "\n"
   1224 .LstrSqueak:
   1225     .asciz  "<%d>"
   1226 .LstrPrintHex:
   1227     .asciz  "<0x%x>"
   1228 .LstrPrintLong:
   1229     .asciz  "<%lld>"
   1230