Home | History | Annotate | Download | only in mips64
      1 /*
      2  * We've detected a condition that will result in an exception, but the exception
      3  * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
      4  * TUNING: for consistency, we may want to just go ahead and handle these here.
      5  */
      6 
      7     .extern MterpLogDivideByZeroException
      8 common_errDivideByZero:
      9     EXPORT_PC
     10 #if MTERP_LOGGING
     11     move    a0, rSELF
     12     daddu   a1, rFP, OFF_FP_SHADOWFRAME
     13     jal     MterpLogDivideByZeroException
     14 #endif
     15     b       MterpCommonFallback
     16 
     17     .extern MterpLogArrayIndexException
     18 common_errArrayIndex:
     19     EXPORT_PC
     20 #if MTERP_LOGGING
     21     move    a0, rSELF
     22     daddu   a1, rFP, OFF_FP_SHADOWFRAME
     23     jal     MterpLogArrayIndexException
     24 #endif
     25     b       MterpCommonFallback
     26 
     27     .extern MterpLogNullObjectException
     28 common_errNullObject:
     29     EXPORT_PC
     30 #if MTERP_LOGGING
     31     move    a0, rSELF
     32     daddu   a1, rFP, OFF_FP_SHADOWFRAME
     33     jal     MterpLogNullObjectException
     34 #endif
     35     b       MterpCommonFallback
     36 
     37 /*
     38  * If we're here, something is out of the ordinary.  If there is a pending
     39  * exception, handle it.  Otherwise, roll back and retry with the reference
     40  * interpreter.
     41  */
     42 MterpPossibleException:
     43     ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)
     44     beqzc   a0, MterpFallback                       # If not, fall back to reference interpreter.
     45     /* intentional fallthrough - handle pending exception. */
     46 /*
     47  * On return from a runtime helper routine, we've found a pending exception.
     48  * Can we handle it here - or need to bail out to caller?
     49  *
     50  */
     51     .extern MterpHandleException
     52     .extern MterpShouldSwitchInterpreters
     53 MterpException:
     54     move    a0, rSELF
     55     daddu   a1, rFP, OFF_FP_SHADOWFRAME
     56     jal     MterpHandleException                    # (self, shadow_frame)
     57     beqzc   v0, MterpExceptionReturn                # no local catch, back to caller.
     58     ld      a0, OFF_FP_CODE_ITEM(rFP)
     59     lwu     a1, OFF_FP_DEX_PC(rFP)
     60     REFRESH_IBASE
     61     daddu   rPC, a0, CODEITEM_INSNS_OFFSET
     62     dlsa    rPC, a1, rPC, 1                         # generate new dex_pc_ptr
     63     /* Do we need to switch interpreters? */
     64     jal     MterpShouldSwitchInterpreters
     65     bnezc   v0, MterpFallback
     66     /* resume execution at catch block */
     67     EXPORT_PC
     68     FETCH_INST
     69     GET_INST_OPCODE v0
     70     GOTO_OPCODE v0
     71     /* NOTE: no fallthrough */
     72 
     73 /*
     74  * Common handling for branches with support for Jit profiling.
     75  * On entry:
     76  *    rINST          <= signed offset
     77  *    rPROFILE       <= signed hotness countdown (expanded to 64 bits)
     78  *
     79  * We have quite a few different cases for branch profiling, OSR detection and
     80  * suspend check support here.
     81  *
     82  * Taken backward branches:
     83  *    If profiling active, do hotness countdown and report if we hit zero.
     84  *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
     85  *    Is there a pending suspend request?  If so, suspend.
     86  *
     87  * Taken forward branches and not-taken backward branches:
     88  *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
     89  *
     90  * Our most common case is expected to be a taken backward branch with active jit profiling,
     91  * but no full OSR check and no pending suspend request.
     92  * Next most common case is not-taken branch with no full OSR check.
     93  *
     94  */
     95 MterpCommonTakenBranchNoFlags:
     96     bgtzc   rINST, .L_forward_branch    # don't add forward branches to hotness
     97 /*
     98  * We need to subtract 1 from positive values and we should not see 0 here,
     99  * so we may use the result of the comparison with -1.
    100  */
    101     li      v0, JIT_CHECK_OSR
    102     beqc    rPROFILE, v0, .L_osr_check
    103     bltc    rPROFILE, v0, .L_resume_backward_branch
    104     dsubu   rPROFILE, 1
    105     beqzc   rPROFILE, .L_add_batch      # counted down to zero - report
    106 .L_resume_backward_branch:
    107     lw      ra, THREAD_FLAGS_OFFSET(rSELF)
    108     REFRESH_IBASE
    109     daddu   a2, rINST, rINST            # a2<- byte offset
    110     FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
    111     and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
    112     bnezc   ra, .L_suspend_request_pending
    113     GET_INST_OPCODE v0                  # extract opcode from rINST
    114     GOTO_OPCODE v0                      # jump to next instruction
    115 
    116 .L_suspend_request_pending:
    117     EXPORT_PC
    118     move    a0, rSELF
    119     jal     MterpSuspendCheck           # (self)
    120     bnezc   v0, MterpFallback
    121     REFRESH_IBASE                       # might have changed during suspend
    122     GET_INST_OPCODE v0                  # extract opcode from rINST
    123     GOTO_OPCODE v0                      # jump to next instruction
    124 
    125 .L_no_count_backwards:
    126     li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
    127     bnec    rPROFILE, v0, .L_resume_backward_branch
    128 .L_osr_check:
    129     move    a0, rSELF
    130     daddu   a1, rFP, OFF_FP_SHADOWFRAME
    131     move    a2, rINST
    132     EXPORT_PC
    133     jal MterpMaybeDoOnStackReplacement  # (self, shadow_frame, offset)
    134     bnezc   v0, MterpOnStackReplacement
    135     b       .L_resume_backward_branch
    136 
    137 .L_forward_branch:
    138     li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
    139     beqc    rPROFILE, v0, .L_check_osr_forward
    140 .L_resume_forward_branch:
    141     daddu   a2, rINST, rINST            # a2<- byte offset
    142     FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
    143     GET_INST_OPCODE v0                  # extract opcode from rINST
    144     GOTO_OPCODE v0                      # jump to next instruction
    145 
    146 .L_check_osr_forward:
    147     move    a0, rSELF
    148     daddu   a1, rFP, OFF_FP_SHADOWFRAME
    149     move    a2, rINST
    150     EXPORT_PC
    151     jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
    152     bnezc   v0, MterpOnStackReplacement
    153     b       .L_resume_forward_branch
    154 
    155 .L_add_batch:
    156     daddu   a1, rFP, OFF_FP_SHADOWFRAME
    157     sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
    158     ld      a0, OFF_FP_METHOD(rFP)
    159     move    a2, rSELF
    160     jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
    161     move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
    162     b       .L_no_count_backwards
    163 
    164 /*
    165  * Entered from the conditional branch handlers when OSR check request active on
    166  * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
    167  */
    168 .L_check_not_taken_osr:
    169     move    a0, rSELF
    170     daddu   a1, rFP, OFF_FP_SHADOWFRAME
    171     li      a2, 2
    172     EXPORT_PC
    173     jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
    174     bnezc   v0, MterpOnStackReplacement
    175     FETCH_ADVANCE_INST 2
    176     GET_INST_OPCODE v0                  # extract opcode from rINST
    177     GOTO_OPCODE v0                      # jump to next instruction
    178 
    179 /*
    180  * On-stack replacement has happened, and now we've returned from the compiled method.
    181  */
    182 MterpOnStackReplacement:
    183 #if MTERP_LOGGING
    184     move    a0, rSELF
    185     daddu   a1, rFP, OFF_FP_SHADOWFRAME
    186     move    a2, rINST                               # rINST contains offset
    187     jal     MterpLogOSR
    188 #endif
    189     li      v0, 1                                   # Signal normal return
    190     b       MterpDone
    191 
    192 /*
    193  * Bail out to reference interpreter.
    194  */
    195     .extern MterpLogFallback
    196 MterpFallback:
    197     EXPORT_PC
    198 #if MTERP_LOGGING
    199     move    a0, rSELF
    200     daddu   a1, rFP, OFF_FP_SHADOWFRAME
    201     jal     MterpLogFallback
    202 #endif
    203 MterpCommonFallback:
    204     li      v0, 0                                   # signal retry with reference interpreter.
    205     b       MterpDone
    206 
    207 /*
    208  * We pushed some registers on the stack in ExecuteMterpImpl, then saved
    209  * SP and RA.  Here we restore SP, restore the registers, and then restore
    210  * RA to PC.
    211  *
    212  * On entry:
    213  *  uint32_t* rFP  (should still be live, pointer to base of vregs)
    214  */
    215 MterpExceptionReturn:
    216     li      v0, 1                                   # signal return to caller.
    217     b       MterpDone
    218 /*
    219  * Returned value is expected in a0 and if it's not 64-bit, the 32 most
    220  * significant bits of a0 must be zero-extended or sign-extended
    221  * depending on the return type.
    222  */
    223 MterpReturn:
    224     ld      a2, OFF_FP_RESULT_REGISTER(rFP)
    225     sd      a0, 0(a2)
    226     li      v0, 1                                   # signal return to caller.
    227 MterpDone:
    228 /*
    229  * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
    230  * checking for OSR.  If greater than zero, we might have unreported hotness to register
    231  * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
    232  * should only reach zero immediately after a hotness decrement, and is then reset to either
    233  * a negative special state or the new non-zero countdown value.
    234  */
    235     blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
    236 
    237 MterpProfileActive:
    238     move    rINST, v0                   # stash return value
    239     /* Report cached hotness counts */
    240     ld      a0, OFF_FP_METHOD(rFP)
    241     daddu   a1, rFP, OFF_FP_SHADOWFRAME
    242     move    a2, rSELF
    243     sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
    244     jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
    245     move    v0, rINST                   # restore return value
    246 
    247 .L_pop_and_return:
    248     ld      s6, STACK_OFFSET_S6(sp)
    249     .cfi_restore 22
    250     ld      s5, STACK_OFFSET_S5(sp)
    251     .cfi_restore 21
    252     ld      s4, STACK_OFFSET_S4(sp)
    253     .cfi_restore 20
    254     ld      s3, STACK_OFFSET_S3(sp)
    255     .cfi_restore 19
    256     ld      s2, STACK_OFFSET_S2(sp)
    257     .cfi_restore 18
    258     ld      s1, STACK_OFFSET_S1(sp)
    259     .cfi_restore 17
    260     ld      s0, STACK_OFFSET_S0(sp)
    261     .cfi_restore 16
    262 
    263     ld      ra, STACK_OFFSET_RA(sp)
    264     .cfi_restore 31
    265 
    266     ld      t8, STACK_OFFSET_GP(sp)
    267     .cpreturn
    268     .cfi_restore 28
    269 
    270     .set    noreorder
    271     jr      ra
    272     daddu   sp, sp, STACK_SIZE
    273     .cfi_adjust_cfa_offset -STACK_SIZE
    274 
    275     .cfi_endproc
    276     .set    reorder
    277     .size ExecuteMterpImpl, .-ExecuteMterpImpl
    278