Home | History | Annotate | Download | only in mips
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "asm_support_mips.S"
     18 
     19 #include "arch/quick_alloc_entrypoints.S"
     20 
     21     .set noreorder
     22     .balign 4
     23 
     24     /* Deliver the given exception */
     25     .extern artDeliverExceptionFromCode
     26     /* Deliver an exception pending on a thread */
     27     .extern artDeliverPendingExceptionFromCode
     28 
     29 #define ARG_SLOT_SIZE   32    // space for a0-a3 plus 4 more words
     30 
     31     /*
     32      * Macro that sets up the callee save frame to conform with
     33      * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
     34      * Callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word for Method*
     35      * Clobbers $t0 and $sp
     36      * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
     37      * Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVES + ARG_SLOT_SIZE bytes on the stack
     38      */
     39 .macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
     40     addiu  $sp, $sp, -96
     41     .cfi_adjust_cfa_offset 96
     42 
     43      // Ugly compile-time check, but we only have the preprocessor.
     44 #if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 96)
     45 #error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS) size not as expected."
     46 #endif
     47 
     48     sw     $ra, 92($sp)
     49     .cfi_rel_offset 31, 92
     50     sw     $s8, 88($sp)
     51     .cfi_rel_offset 30, 88
     52     sw     $gp, 84($sp)
     53     .cfi_rel_offset 28, 84
     54     sw     $s7, 80($sp)
     55     .cfi_rel_offset 23, 80
     56     sw     $s6, 76($sp)
     57     .cfi_rel_offset 22, 76
     58     sw     $s5, 72($sp)
     59     .cfi_rel_offset 21, 72
     60     sw     $s4, 68($sp)
     61     .cfi_rel_offset 20, 68
     62     sw     $s3, 64($sp)
     63     .cfi_rel_offset 19, 64
     64     sw     $s2, 60($sp)
     65     .cfi_rel_offset 18, 60
     66     sw     $s1, 56($sp)
     67     .cfi_rel_offset 17, 56
     68     sw     $s0, 52($sp)
     69     .cfi_rel_offset 16, 52
     70 
     71     SDu $f30, $f31, 44, $sp, $t1
     72     SDu $f28, $f29, 36, $sp, $t1
     73     SDu $f26, $f27, 28, $sp, $t1
     74     SDu $f24, $f25, 20, $sp, $t1
     75     SDu $f22, $f23, 12, $sp, $t1
     76     SDu $f20, $f21, 4,  $sp, $t1
     77 
     78     # 1 word for holding Method*
     79 
     80     lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
     81     lw $t0, 0($t0)
     82     lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET($t0)
     83     sw $t0, 0($sp)                                # Place Method* at bottom of stack.
     84     sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
     85     addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
     86     .cfi_adjust_cfa_offset ARG_SLOT_SIZE
     87 .endm
     88 
     89     /*
     90      * Macro that sets up the callee save frame to conform with
     91      * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). Restoration assumes non-moving GC.
     92      * Does not include rSUSPEND or rSELF
     93      * callee-save: $s2-$s8 + $gp + $ra, 9 total + 2 words padding + 1 word to hold Method*
     94      * Clobbers $t0 and $sp
     95      * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
     96      * Reserves FRAME_SIZE_SAVE_REFS_ONLY + ARG_SLOT_SIZE bytes on the stack
     97      */
     98 .macro SETUP_SAVE_REFS_ONLY_FRAME
     99     addiu  $sp, $sp, -48
    100     .cfi_adjust_cfa_offset 48
    101 
    102     // Ugly compile-time check, but we only have the preprocessor.
    103 #if (FRAME_SIZE_SAVE_REFS_ONLY != 48)
    104 #error "FRAME_SIZE_SAVE_REFS_ONLY(MIPS) size not as expected."
    105 #endif
    106 
    107     sw     $ra, 44($sp)
    108     .cfi_rel_offset 31, 44
    109     sw     $s8, 40($sp)
    110     .cfi_rel_offset 30, 40
    111     sw     $gp, 36($sp)
    112     .cfi_rel_offset 28, 36
    113     sw     $s7, 32($sp)
    114     .cfi_rel_offset 23, 32
    115     sw     $s6, 28($sp)
    116     .cfi_rel_offset 22, 28
    117     sw     $s5, 24($sp)
    118     .cfi_rel_offset 21, 24
    119     sw     $s4, 20($sp)
    120     .cfi_rel_offset 20, 20
    121     sw     $s3, 16($sp)
    122     .cfi_rel_offset 19, 16
    123     sw     $s2, 12($sp)
    124     .cfi_rel_offset 18, 12
    125     # 2 words for alignment and bottom word will hold Method*
    126 
    127     lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
    128     lw $t0, 0($t0)
    129     lw $t0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET($t0)
    130     sw $t0, 0($sp)                                # Place Method* at bottom of stack.
    131     sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
    132     addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
    133     .cfi_adjust_cfa_offset ARG_SLOT_SIZE
    134 .endm
    135 
    136 .macro RESTORE_SAVE_REFS_ONLY_FRAME
    137     addiu  $sp, $sp, ARG_SLOT_SIZE                # remove argument slots on the stack
    138     .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
    139     lw     $ra, 44($sp)
    140     .cfi_restore 31
    141     lw     $s8, 40($sp)
    142     .cfi_restore 30
    143     lw     $gp, 36($sp)
    144     .cfi_restore 28
    145     lw     $s7, 32($sp)
    146     .cfi_restore 23
    147     lw     $s6, 28($sp)
    148     .cfi_restore 22
    149     lw     $s5, 24($sp)
    150     .cfi_restore 21
    151     lw     $s4, 20($sp)
    152     .cfi_restore 20
    153     lw     $s3, 16($sp)
    154     .cfi_restore 19
    155     lw     $s2, 12($sp)
    156     .cfi_restore 18
    157     addiu  $sp, $sp, 48
    158     .cfi_adjust_cfa_offset -48
    159 .endm
    160 
    161 .macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
    162     RESTORE_SAVE_REFS_ONLY_FRAME
    163     jalr   $zero, $ra
    164     nop
    165 .endm
    166 
    167     /*
    168      * Macro that sets up the callee save frame to conform with
    169      * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs).
    170      * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19
    171      *              (26 total + 1 word padding + method*)
    172      */
    173 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
    174     addiu  $sp, $sp, -112
    175     .cfi_adjust_cfa_offset 112
    176 
    177     // Ugly compile-time check, but we only have the preprocessor.
    178 #if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 112)
    179 #error "FRAME_SIZE_SAVE_REFS_AND_ARGS(MIPS) size not as expected."
    180 #endif
    181 
    182     sw     $ra, 108($sp)
    183     .cfi_rel_offset 31, 108
    184     sw     $s8, 104($sp)
    185     .cfi_rel_offset 30, 104
    186     sw     $gp, 100($sp)
    187     .cfi_rel_offset 28, 100
    188     sw     $s7, 96($sp)
    189     .cfi_rel_offset 23, 96
    190     sw     $s6, 92($sp)
    191     .cfi_rel_offset 22, 92
    192     sw     $s5, 88($sp)
    193     .cfi_rel_offset 21, 88
    194     sw     $s4, 84($sp)
    195     .cfi_rel_offset 20, 84
    196     sw     $s3, 80($sp)
    197     .cfi_rel_offset 19, 80
    198     sw     $s2, 76($sp)
    199     .cfi_rel_offset 18, 76
    200     sw     $t1, 72($sp)
    201     .cfi_rel_offset 9, 72
    202     sw     $t0, 68($sp)
    203     .cfi_rel_offset 8, 68
    204     sw     $a3, 64($sp)
    205     .cfi_rel_offset 7, 64
    206     sw     $a2, 60($sp)
    207     .cfi_rel_offset 6, 60
    208     sw     $a1, 56($sp)
    209     .cfi_rel_offset 5, 56
    210     SDu $f18, $f19, 48, $sp, $t8
    211     SDu $f16, $f17, 40, $sp, $t8
    212     SDu $f14, $f15, 32, $sp, $t8
    213     SDu $f12, $f13, 24, $sp, $t8
    214     SDu $f10, $f11, 16, $sp, $t8
    215     SDu $f8, $f9, 8, $sp, $t8
    216     # bottom will hold Method*
    217 .endm
    218 
    219     /*
    220      * Macro that sets up the callee save frame to conform with
    221      * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC.
    222      * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19
    223      *              (26 total + 1 word padding + method*)
    224      * Clobbers $t0 and $sp
    225      * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
    226      * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack
    227      */
    228 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME
    229     SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
    230     lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
    231     lw $t0, 0($t0)
    232     lw $t0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET($t0)
    233     sw $t0, 0($sp)                                # Place Method* at bottom of stack.
    234     sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
    235     addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
    236     .cfi_adjust_cfa_offset ARG_SLOT_SIZE
    237 .endm
    238 
    239     /*
    240      * Macro that sets up the callee save frame to conform with
    241      * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC.
    242      * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19
    243      *              (26 total + 1 word padding + method*)
    244      * Clobbers $sp
    245      * Use $a0 as the Method* and loads it into bottom of stack.
    246      * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
    247      * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack
    248      */
    249 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
    250     SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
    251     sw $a0, 0($sp)                                # Place Method* at bottom of stack.
    252     sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
    253     addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
    254     .cfi_adjust_cfa_offset ARG_SLOT_SIZE
    255 .endm
    256 
    257 .macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
    258     addiu  $sp, $sp, ARG_SLOT_SIZE                # remove argument slots on the stack
    259     .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
    260     lw     $ra, 108($sp)
    261     .cfi_restore 31
    262     lw     $s8, 104($sp)
    263     .cfi_restore 30
    264     lw     $gp, 100($sp)
    265     .cfi_restore 28
    266     lw     $s7, 96($sp)
    267     .cfi_restore 23
    268     lw     $s6, 92($sp)
    269     .cfi_restore 22
    270     lw     $s5, 88($sp)
    271     .cfi_restore 21
    272     lw     $s4, 84($sp)
    273     .cfi_restore 20
    274     lw     $s3, 80($sp)
    275     .cfi_restore 19
    276     lw     $s2, 76($sp)
    277     .cfi_restore 18
    278     lw     $t1, 72($sp)
    279     .cfi_restore 9
    280     lw     $t0, 68($sp)
    281     .cfi_restore 8
    282     lw     $a3, 64($sp)
    283     .cfi_restore 7
    284     lw     $a2, 60($sp)
    285     .cfi_restore 6
    286     lw     $a1, 56($sp)
    287     .cfi_restore 5
    288     LDu $f18, $f19, 48, $sp, $t8
    289     LDu $f16, $f17, 40, $sp, $t8
    290     LDu $f14, $f15, 32, $sp, $t8
    291     LDu $f12, $f13, 24, $sp, $t8
    292     LDu $f10, $f11, 16, $sp, $t8
    293     LDu $f8, $f9, 8, $sp, $t8
    294     addiu  $sp, $sp, 112          # pop frame
    295     .cfi_adjust_cfa_offset -112
    296 .endm
    297 
    298     /*
    299      * Macro that sets up the callee save frame to conform with
    300      * Runtime::CreateCalleeSaveMethod(kSaveEverything).
    301      * when the $sp has already been decremented by FRAME_SIZE_SAVE_EVERYTHING.
    302      * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31;
    303      *              28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method*
    304      * Clobbers $t0 and $t1.
    305      * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
    306      * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack.
    307      * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
    308      */
    309 .macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP
    310      // Ugly compile-time check, but we only have the preprocessor.
    311 #if (FRAME_SIZE_SAVE_EVERYTHING != 256)
    312 #error "FRAME_SIZE_SAVE_EVERYTHING(MIPS) size not as expected."
    313 #endif
    314 
    315     sw     $ra, 252($sp)
    316     .cfi_rel_offset 31, 252
    317     sw     $fp, 248($sp)
    318     .cfi_rel_offset 30, 248
    319     sw     $gp, 244($sp)
    320     .cfi_rel_offset 28, 244
    321     sw     $t9, 240($sp)
    322     .cfi_rel_offset 25, 240
    323     sw     $t8, 236($sp)
    324     .cfi_rel_offset 24, 236
    325     sw     $s7, 232($sp)
    326     .cfi_rel_offset 23, 232
    327     sw     $s6, 228($sp)
    328     .cfi_rel_offset 22, 228
    329     sw     $s5, 224($sp)
    330     .cfi_rel_offset 21, 224
    331     sw     $s4, 220($sp)
    332     .cfi_rel_offset 20, 220
    333     sw     $s3, 216($sp)
    334     .cfi_rel_offset 19, 216
    335     sw     $s2, 212($sp)
    336     .cfi_rel_offset 18, 212
    337     sw     $s1, 208($sp)
    338     .cfi_rel_offset 17, 208
    339     sw     $s0, 204($sp)
    340     .cfi_rel_offset 16, 204
    341     sw     $t7, 200($sp)
    342     .cfi_rel_offset 15, 200
    343     sw     $t6, 196($sp)
    344     .cfi_rel_offset 14, 196
    345     sw     $t5, 192($sp)
    346     .cfi_rel_offset 13, 192
    347     sw     $t4, 188($sp)
    348     .cfi_rel_offset 12, 188
    349     sw     $t3, 184($sp)
    350     .cfi_rel_offset 11, 184
    351     sw     $t2, 180($sp)
    352     .cfi_rel_offset 10, 180
    353     sw     $t1, 176($sp)
    354     .cfi_rel_offset 9, 176
    355     sw     $t0, 172($sp)
    356     .cfi_rel_offset 8, 172
    357     sw     $a3, 168($sp)
    358     .cfi_rel_offset 7, 168
    359     sw     $a2, 164($sp)
    360     .cfi_rel_offset 6, 164
    361     sw     $a1, 160($sp)
    362     .cfi_rel_offset 5, 160
    363     sw     $a0, 156($sp)
    364     .cfi_rel_offset 4, 156
    365     sw     $v1, 152($sp)
    366     .cfi_rel_offset 3, 152
    367     sw     $v0, 148($sp)
    368     .cfi_rel_offset 2, 148
    369 
    370     // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
    371     bal 1f
    372     .set push
    373     .set noat
    374     sw     $at, 144($sp)
    375     .cfi_rel_offset 1, 144
    376     .set pop
    377 1:
    378     .cpload $ra
    379 
    380     SDu $f30, $f31, 136, $sp, $t1
    381     SDu $f28, $f29, 128, $sp, $t1
    382     SDu $f26, $f27, 120, $sp, $t1
    383     SDu $f24, $f25, 112, $sp, $t1
    384     SDu $f22, $f23, 104, $sp, $t1
    385     SDu $f20, $f21, 96,  $sp, $t1
    386     SDu $f18, $f19, 88,  $sp, $t1
    387     SDu $f16, $f17, 80,  $sp, $t1
    388     SDu $f14, $f15, 72,  $sp, $t1
    389     SDu $f12, $f13, 64,  $sp, $t1
    390     SDu $f10, $f11, 56,  $sp, $t1
    391     SDu $f8, $f9, 48,  $sp, $t1
    392     SDu $f6, $f7, 40,  $sp, $t1
    393     SDu $f4, $f5, 32,  $sp, $t1
    394     SDu $f2, $f3, 24,  $sp, $t1
    395     SDu $f0, $f1, 16,  $sp, $t1
    396 
    397     # 3 words padding and 1 word for holding Method*
    398 
    399     lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
    400     lw $t0, 0($t0)
    401     lw $t0, RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET($t0)
    402     sw $t0, 0($sp)                                # Place Method* at bottom of stack.
    403     sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
    404     addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
    405     .cfi_adjust_cfa_offset ARG_SLOT_SIZE
    406 .endm
    407 
    408     /*
    409      * Macro that sets up the callee save frame to conform with
    410      * Runtime::CreateCalleeSaveMethod(kSaveEverything).
    411      * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31;
    412      *              28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method*
    413      * Clobbers $t0 and $t1.
    414      * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
    415      * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack.
    416      * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
    417      */
    418 .macro SETUP_SAVE_EVERYTHING_FRAME
    419     addiu  $sp, $sp, -(FRAME_SIZE_SAVE_EVERYTHING)
    420     .cfi_adjust_cfa_offset (FRAME_SIZE_SAVE_EVERYTHING)
    421     SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP
    422 .endm
    423 
    424 .macro RESTORE_SAVE_EVERYTHING_FRAME restore_a0=1
    425     addiu  $sp, $sp, ARG_SLOT_SIZE                # remove argument slots on the stack
    426     .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
    427 
    428     LDu $f30, $f31, 136, $sp, $t1
    429     LDu $f28, $f29, 128, $sp, $t1
    430     LDu $f26, $f27, 120, $sp, $t1
    431     LDu $f24, $f25, 112, $sp, $t1
    432     LDu $f22, $f23, 104, $sp, $t1
    433     LDu $f20, $f21, 96,  $sp, $t1
    434     LDu $f18, $f19, 88,  $sp, $t1
    435     LDu $f16, $f17, 80,  $sp, $t1
    436     LDu $f14, $f15, 72,  $sp, $t1
    437     LDu $f12, $f13, 64,  $sp, $t1
    438     LDu $f10, $f11, 56,  $sp, $t1
    439     LDu $f8, $f9, 48,  $sp, $t1
    440     LDu $f6, $f7, 40,  $sp, $t1
    441     LDu $f4, $f5, 32,  $sp, $t1
    442     LDu $f2, $f3, 24,  $sp, $t1
    443     LDu $f0, $f1, 16,  $sp, $t1
    444 
    445     lw     $ra, 252($sp)
    446     .cfi_restore 31
    447     lw     $fp, 248($sp)
    448     .cfi_restore 30
    449     lw     $gp, 244($sp)
    450     .cfi_restore 28
    451     lw     $t9, 240($sp)
    452     .cfi_restore 25
    453     lw     $t8, 236($sp)
    454     .cfi_restore 24
    455     lw     $s7, 232($sp)
    456     .cfi_restore 23
    457     lw     $s6, 228($sp)
    458     .cfi_restore 22
    459     lw     $s5, 224($sp)
    460     .cfi_restore 21
    461     lw     $s4, 220($sp)
    462     .cfi_restore 20
    463     lw     $s3, 216($sp)
    464     .cfi_restore 19
    465     lw     $s2, 212($sp)
    466     .cfi_restore 18
    467     lw     $s1, 208($sp)
    468     .cfi_restore 17
    469     lw     $s0, 204($sp)
    470     .cfi_restore 16
    471     lw     $t7, 200($sp)
    472     .cfi_restore 15
    473     lw     $t6, 196($sp)
    474     .cfi_restore 14
    475     lw     $t5, 192($sp)
    476     .cfi_restore 13
    477     lw     $t4, 188($sp)
    478     .cfi_restore 12
    479     lw     $t3, 184($sp)
    480     .cfi_restore 11
    481     lw     $t2, 180($sp)
    482     .cfi_restore 10
    483     lw     $t1, 176($sp)
    484     .cfi_restore 9
    485     lw     $t0, 172($sp)
    486     .cfi_restore 8
    487     lw     $a3, 168($sp)
    488     .cfi_restore 7
    489     lw     $a2, 164($sp)
    490     .cfi_restore 6
    491     lw     $a1, 160($sp)
    492     .cfi_restore 5
    493     .if \restore_a0
    494     lw     $a0, 156($sp)
    495     .cfi_restore 4
    496     .endif
    497     lw     $v1, 152($sp)
    498     .cfi_restore 3
    499     lw     $v0, 148($sp)
    500     .cfi_restore 2
    501     .set push
    502     .set noat
    503     lw     $at, 144($sp)
    504     .cfi_restore 1
    505     .set pop
    506 
    507     addiu  $sp, $sp, 256            # pop frame
    508     .cfi_adjust_cfa_offset -256
    509 .endm
    510 
    511     /*
    512      * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
    513      * exception is Thread::Current()->exception_ when the runtime method frame is ready.
    514      * Requires $gp properly set up.
    515      */
    516 .macro DELIVER_PENDING_EXCEPTION_FRAME_READY
    517     la      $t9, artDeliverPendingExceptionFromCode
    518     jalr    $zero, $t9                   # artDeliverPendingExceptionFromCode(Thread*)
    519     move    $a0, rSELF                   # pass Thread::Current
    520 .endm
    521 
    522     /*
    523      * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
    524      * exception is Thread::Current()->exception_.
    525      * Requires $gp properly set up.
    526      */
    527 .macro DELIVER_PENDING_EXCEPTION
    528     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME    # save callee saves for throw
    529     DELIVER_PENDING_EXCEPTION_FRAME_READY
    530 .endm
    531 
    532 .macro RETURN_IF_NO_EXCEPTION
    533     lw     $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
    534     RESTORE_SAVE_REFS_ONLY_FRAME
    535     bnez   $t0, 1f                       # success if no exception is pending
    536     nop
    537     jalr   $zero, $ra
    538     nop
    539 1:
    540     DELIVER_PENDING_EXCEPTION
    541 .endm
    542 
    543 .macro RETURN_IF_ZERO
    544     RESTORE_SAVE_REFS_ONLY_FRAME
    545     bnez   $v0, 1f                       # success?
    546     nop
    547     jalr   $zero, $ra                    # return on success
    548     nop
    549 1:
    550     DELIVER_PENDING_EXCEPTION
    551 .endm
    552 
    553 .macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
    554     RESTORE_SAVE_REFS_ONLY_FRAME
    555     beqz   $v0, 1f                       # success?
    556     nop
    557     jalr   $zero, $ra                    # return on success
    558     nop
    559 1:
    560     DELIVER_PENDING_EXCEPTION
    561 .endm
    562 
    563     /*
    564      * On stack replacement stub.
    565      * On entry:
    566      *   a0 = stack to copy
    567      *   a1 = size of stack
    568      *   a2 = pc to call
    569      *   a3 = JValue* result
    570      *   [sp + 16] = shorty
    571      *   [sp + 20] = thread
    572      */
    573 ENTRY art_quick_osr_stub
    574     // Save callee general purpose registers, RA and GP.
    575     addiu  $sp, $sp, -48
    576     .cfi_adjust_cfa_offset 48
    577     sw     $ra, 44($sp)
    578     .cfi_rel_offset 31, 44
    579     sw     $s8, 40($sp)
    580     .cfi_rel_offset 30, 40
    581     sw     $gp, 36($sp)
    582     .cfi_rel_offset 28, 36
    583     sw     $s7, 32($sp)
    584     .cfi_rel_offset 23, 32
    585     sw     $s6, 28($sp)
    586     .cfi_rel_offset 22, 28
    587     sw     $s5, 24($sp)
    588     .cfi_rel_offset 21, 24
    589     sw     $s4, 20($sp)
    590     .cfi_rel_offset 20, 20
    591     sw     $s3, 16($sp)
    592     .cfi_rel_offset 19, 16
    593     sw     $s2, 12($sp)
    594     .cfi_rel_offset 18, 12
    595     sw     $s1, 8($sp)
    596     .cfi_rel_offset 17, 8
    597     sw     $s0, 4($sp)
    598     .cfi_rel_offset 16, 4
    599 
    600     move   $s8, $sp                        # Save the stack pointer
    601     move   $s7, $a1                        # Save size of stack
    602     move   $s6, $a2                        # Save the pc to call
    603     lw     rSELF, 48+20($sp)               # Save managed thread pointer into rSELF
    604     addiu  $t0, $sp, -12                   # Reserve space for stack pointer,
    605                                            #    JValue* result, and ArtMethod* slot.
    606     srl    $t0, $t0, 4                     # Align stack pointer to 16 bytes
    607     sll    $sp, $t0, 4                     # Update stack pointer
    608     sw     $s8, 4($sp)                     # Save old stack pointer
    609     sw     $a3, 8($sp)                     # Save JValue* result
    610     sw     $zero, 0($sp)                   # Store null for ArtMethod* at bottom of frame
    611     subu   $sp, $a1                        # Reserve space for callee stack
    612     move   $a2, $a1
    613     move   $a1, $a0
    614     move   $a0, $sp
    615     la     $t9, memcpy
    616     jalr   $t9                             # memcpy (dest a0, src a1, bytes a2)
    617     addiu  $sp, $sp, -16                   # make space for argument slots for memcpy
    618     bal    .Losr_entry                     # Call the method
    619     addiu  $sp, $sp, 16                    # restore stack after memcpy
    620     lw     $a2, 8($sp)                     # Restore JValue* result
    621     lw     $sp, 4($sp)                     # Restore saved stack pointer
    622     lw     $a0, 48+16($sp)                 # load shorty
    623     lbu    $a0, 0($a0)                     # load return type
    624     li     $a1, 'D'                        # put char 'D' into a1
    625     beq    $a0, $a1, .Losr_fp_result       # Test if result type char == 'D'
    626     li     $a1, 'F'                        # put char 'F' into a1
    627     beq    $a0, $a1, .Losr_fp_result       # Test if result type char == 'F'
    628     nop
    629     sw     $v0, 0($a2)
    630     b      .Losr_exit
    631     sw     $v1, 4($a2)                     # store v0/v1 into result
    632 .Losr_fp_result:
    633     SDu    $f0, $f1, 0, $a2, $t0           # store f0/f1 into result
    634 .Losr_exit:
    635     lw     $ra, 44($sp)
    636     .cfi_restore 31
    637     lw     $s8, 40($sp)
    638     .cfi_restore 30
    639     lw     $gp, 36($sp)
    640     .cfi_restore 28
    641     lw     $s7, 32($sp)
    642     .cfi_restore 23
    643     lw     $s6, 28($sp)
    644     .cfi_restore 22
    645     lw     $s5, 24($sp)
    646     .cfi_restore 21
    647     lw     $s4, 20($sp)
    648     .cfi_restore 20
    649     lw     $s3, 16($sp)
    650     .cfi_restore 19
    651     lw     $s2, 12($sp)
    652     .cfi_restore 18
    653     lw     $s1, 8($sp)
    654     .cfi_restore 17
    655     lw     $s0, 4($sp)
    656     .cfi_restore 16
    657     jalr   $zero, $ra
    658     addiu  $sp, $sp, 48
    659     .cfi_adjust_cfa_offset -48
    660 .Losr_entry:
    661     addiu  $s7, $s7, -4
    662     addu   $t0, $s7, $sp
    663     move   $t9, $s6
    664     jalr   $zero, $t9
    665     sw     $ra, 0($t0)                     # Store RA per the compiler ABI
    666 END art_quick_osr_stub
    667 
    668     /*
    669      * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_
    670      * FIXME: just guessing about the shape of the jmpbuf.  Where will pc be?
    671      */
    672 ENTRY art_quick_do_long_jump
    673     LDu  $f0,  $f1,   0*8, $a1, $t1
    674     LDu  $f2,  $f3,   1*8, $a1, $t1
    675     LDu  $f4,  $f5,   2*8, $a1, $t1
    676     LDu  $f6,  $f7,   3*8, $a1, $t1
    677     LDu  $f8,  $f9,   4*8, $a1, $t1
    678     LDu  $f10, $f11,  5*8, $a1, $t1
    679     LDu  $f12, $f13,  6*8, $a1, $t1
    680     LDu  $f14, $f15,  7*8, $a1, $t1
    681     LDu  $f16, $f17,  8*8, $a1, $t1
    682     LDu  $f18, $f19,  9*8, $a1, $t1
    683     LDu  $f20, $f21, 10*8, $a1, $t1
    684     LDu  $f22, $f23, 11*8, $a1, $t1
    685     LDu  $f24, $f25, 12*8, $a1, $t1
    686     LDu  $f26, $f27, 13*8, $a1, $t1
    687     LDu  $f28, $f29, 14*8, $a1, $t1
    688     LDu  $f30, $f31, 15*8, $a1, $t1
    689 
    690     .set push
    691     .set nomacro
    692     .set noat
    693     lw      $at, 4($a0)
    694     .set pop
    695     lw      $v0, 8($a0)
    696     lw      $v1, 12($a0)
    697     lw      $a1, 20($a0)
    698     lw      $a2, 24($a0)
    699     lw      $a3, 28($a0)
    700     lw      $t0, 32($a0)
    701     lw      $t1, 36($a0)
    702     lw      $t2, 40($a0)
    703     lw      $t3, 44($a0)
    704     lw      $t4, 48($a0)
    705     lw      $t5, 52($a0)
    706     lw      $t6, 56($a0)
    707     lw      $t7, 60($a0)
    708     lw      $s0, 64($a0)
    709     lw      $s1, 68($a0)
    710     lw      $s2, 72($a0)
    711     lw      $s3, 76($a0)
    712     lw      $s4, 80($a0)
    713     lw      $s5, 84($a0)
    714     lw      $s6, 88($a0)
    715     lw      $s7, 92($a0)
    716     lw      $t8, 96($a0)
    717     lw      $t9, 100($a0)
    718     lw      $gp, 112($a0)
    719     lw      $sp, 116($a0)
    720     lw      $fp, 120($a0)
    721     lw      $ra, 124($a0)
    722     lw      $a0, 16($a0)
    723     move    $v0, $zero          # clear result registers v0 and v1 (in branch delay slot)
    724     jalr    $zero, $t9          # do long jump
    725     move    $v1, $zero
    726 END art_quick_do_long_jump
    727 
    728     /*
    729      * Called by managed code, saves most registers (forms basis of long jump context) and passes
    730      * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at
    731      * the bottom of the thread. On entry a0 holds Throwable*
    732      */
    733 ENTRY art_quick_deliver_exception
    734     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
    735     la   $t9, artDeliverExceptionFromCode
    736     jalr $zero, $t9                 # artDeliverExceptionFromCode(Throwable*, Thread*)
    737     move $a1, rSELF                 # pass Thread::Current
    738 END art_quick_deliver_exception
    739 
    740     /*
    741      * Called by managed code to create and deliver a NullPointerException
    742      */
    743     .extern artThrowNullPointerExceptionFromCode
    744 ENTRY_NO_GP art_quick_throw_null_pointer_exception
    745     // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
    746     // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
    747     SETUP_SAVE_EVERYTHING_FRAME
    748     la   $t9, artThrowNullPointerExceptionFromCode
    749     jalr $zero, $t9                 # artThrowNullPointerExceptionFromCode(Thread*)
    750     move $a0, rSELF                 # pass Thread::Current
    751 END art_quick_throw_null_pointer_exception
    752 
    753 
    754     /*
    755      * Call installed by a signal handler to create and deliver a NullPointerException.
    756      */
    757     .extern artThrowNullPointerExceptionFromSignal
    758 ENTRY_NO_GP_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, FRAME_SIZE_SAVE_EVERYTHING
    759     SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP
    760     # Retrieve the fault address from the padding where the signal handler stores it.
    761     lw   $a0, (ARG_SLOT_SIZE + __SIZEOF_POINTER__)($sp)
    762     la   $t9, artThrowNullPointerExceptionFromSignal
    763     jalr $zero, $t9                 # artThrowNullPointerExceptionFromSignal(uintptr_t, Thread*)
    764     move $a1, rSELF                 # pass Thread::Current
    765 END art_quick_throw_null_pointer_exception_from_signal
    766 
    767     /*
    768      * Called by managed code to create and deliver an ArithmeticException
    769      */
    770     .extern artThrowDivZeroFromCode
    771 ENTRY_NO_GP art_quick_throw_div_zero
    772     SETUP_SAVE_EVERYTHING_FRAME
    773     la   $t9, artThrowDivZeroFromCode
    774     jalr $zero, $t9                 # artThrowDivZeroFromCode(Thread*)
    775     move $a0, rSELF                 # pass Thread::Current
    776 END art_quick_throw_div_zero
    777 
    778     /*
    779      * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException
    780      */
    781     .extern artThrowArrayBoundsFromCode
    782 ENTRY_NO_GP art_quick_throw_array_bounds
    783     // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
    784     // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
    785     SETUP_SAVE_EVERYTHING_FRAME
    786     la   $t9, artThrowArrayBoundsFromCode
    787     jalr $zero, $t9                 # artThrowArrayBoundsFromCode(index, limit, Thread*)
    788     move $a2, rSELF                 # pass Thread::Current
    789 END art_quick_throw_array_bounds
    790 
    791     /*
    792      * Called by managed code to create and deliver a StringIndexOutOfBoundsException
    793      * as if thrown from a call to String.charAt().
    794      */
    795     .extern artThrowStringBoundsFromCode
    796 ENTRY_NO_GP art_quick_throw_string_bounds
    797     SETUP_SAVE_EVERYTHING_FRAME
    798     la   $t9, artThrowStringBoundsFromCode
    799     jalr $zero, $t9                 # artThrowStringBoundsFromCode(index, limit, Thread*)
    800     move $a2, rSELF                 # pass Thread::Current
    801 END art_quick_throw_string_bounds
    802 
    803     /*
    804      * Called by managed code to create and deliver a StackOverflowError.
    805      */
    806     .extern artThrowStackOverflowFromCode
    807 ENTRY art_quick_throw_stack_overflow
    808     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
    809     la   $t9, artThrowStackOverflowFromCode
    810     jalr $zero, $t9                 # artThrowStackOverflowFromCode(Thread*)
    811     move $a0, rSELF                 # pass Thread::Current
    812 END art_quick_throw_stack_overflow
    813 
    814     /*
    815      * All generated callsites for interface invokes and invocation slow paths will load arguments
    816      * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
    817      * the method_idx.  This wrapper will save arg1-arg3, and call the appropriate C helper.
    818      * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
    819      *
    820      * The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting
    821      * of the target Method* in $v0 and method->code_ in $v1.
    822      *
    823      * If unsuccessful, the helper will return null/null. There will be a pending exception in the
    824      * thread and we branch to another stub to deliver it.
    825      *
    826      * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
    827      * pointing back to the original caller.
    828      */
    829 .macro INVOKE_TRAMPOLINE_BODY cxx_name
    830     .extern \cxx_name
    831     SETUP_SAVE_REFS_AND_ARGS_FRAME         # save callee saves in case allocation triggers GC
    832     move  $a2, rSELF                       # pass Thread::Current
    833     la    $t9, \cxx_name
    834     jalr  $t9                              # (method_idx, this, Thread*, $sp)
    835     addiu $a3, $sp, ARG_SLOT_SIZE          # pass $sp (remove arg slots)
    836     move  $a0, $v0                         # save target Method*
    837     RESTORE_SAVE_REFS_AND_ARGS_FRAME
    838     beqz  $v0, 1f
    839     move  $t9, $v1                         # save $v0->code_
    840     jalr  $zero, $t9
    841     nop
    842 1:
    843     DELIVER_PENDING_EXCEPTION
    844 .endm
    845 .macro INVOKE_TRAMPOLINE c_name, cxx_name
    846 ENTRY \c_name
    847     INVOKE_TRAMPOLINE_BODY \cxx_name
    848 END \c_name
    849 .endm
    850 
    851 INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
    852 
    853 INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
    854 INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
    855 INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
    856 INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
    857 
    858 // Each of the following macros expands into four instructions or 16 bytes.
    859 // They are used to build indexable "tables" of code.
    860 
    861 .macro LOAD_WORD_TO_REG reg, next_arg, index_reg, label
    862     lw    $\reg, -4($\next_arg)   # next_arg points to argument after the current one (offset is 4)
    863     b     \label
    864     addiu $\index_reg, 16
    865     .balign 16
    866 .endm
    867 
    868 .macro LOAD_LONG_TO_REG reg1, reg2, next_arg, index_reg, next_index, label
    869     lw    $\reg1, -8($\next_arg)  # next_arg points to argument after the current one (offset is 8)
    870     lw    $\reg2, -4($\next_arg)
    871     b     \label
    872     li    $\index_reg, \next_index
    873     .balign 16
    874 .endm
    875 
    876 .macro LOAD_FLOAT_TO_REG reg, next_arg, index_reg, label
    877     lwc1  $\reg, -4($\next_arg)   # next_arg points to argument after the current one (offset is 4)
    878     b     \label
    879     addiu $\index_reg, 16
    880     .balign 16
    881 .endm
    882 
    883 #if defined(__mips_isa_rev) && __mips_isa_rev > 2
    884 // LDu expands into 3 instructions for 64-bit FPU, so index_reg cannot be updated here.
    885 .macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index_reg, tmp, label
    886     .set reorder                                # force use of the branch delay slot
    887     LDu  $\reg1, $\reg2, -8, $\next_arg, $\tmp  # next_arg points to argument after the current one
    888                                                 # (offset is 8)
    889     b     \label
    890     .set noreorder
    891     .balign 16
    892 .endm
    893 #else
    894 // LDu expands into 2 instructions for 32-bit FPU, so index_reg is updated here.
    895 .macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index_reg, tmp, label
    896     LDu  $\reg1, $\reg2, -8, $\next_arg, $\tmp  # next_arg points to argument after the current one
    897                                                 # (offset is 8)
    898     b     \label
    899     addiu $\index_reg, 16
    900     .balign 16
    901 .endm
    902 #endif
    903 
    904 .macro LOAD_END index_reg, next_index, label
    905     b     \label
    906     li    $\index_reg, \next_index
    907     .balign 16
    908 .endm
    909 
    910 #define SPILL_SIZE    32
    911 
    912     /*
    913      * Invocation stub for quick code.
    914      * On entry:
    915      *   a0 = method pointer
    916      *   a1 = argument array or null for no argument methods
    917      *   a2 = size of argument array in bytes
    918      *   a3 = (managed) thread pointer
    919      *   [sp + 16] = JValue* result
    920      *   [sp + 20] = shorty
    921      */
    922 ENTRY art_quick_invoke_stub
    923     sw    $a0, 0($sp)           # save out a0
    924     addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp
    925     .cfi_adjust_cfa_offset SPILL_SIZE
    926     sw    $gp, 16($sp)
    927     sw    $ra, 12($sp)
    928     .cfi_rel_offset 31, 12
    929     sw    $fp, 8($sp)
    930     .cfi_rel_offset 30, 8
    931     sw    $s1, 4($sp)
    932     .cfi_rel_offset 17, 4
    933     sw    $s0, 0($sp)
    934     .cfi_rel_offset 16, 0
    935     move  $fp, $sp              # save sp in fp
    936     .cfi_def_cfa_register 30
    937     move  $s1, $a3              # move managed thread pointer into s1
    938     addiu $s0, $zero, SUSPEND_CHECK_INTERVAL  # reset s0 to suspend check interval
    939     addiu $t0, $a2, 4           # create space for ArtMethod* in frame.
    940     subu  $t0, $sp, $t0         # reserve & align *stack* to 16 bytes:
    941     srl   $t0, $t0, 4           #   native calling convention only aligns to 8B,
    942     sll   $sp, $t0, 4           #   so we have to ensure ART 16B alignment ourselves.
    943     addiu $a0, $sp, 4           # pass stack pointer + ArtMethod* as dest for memcpy
    944     la    $t9, memcpy
    945     jalr  $t9                   # (dest, src, bytes)
    946     addiu $sp, $sp, -16         # make space for argument slots for memcpy
    947     addiu $sp, $sp, 16          # restore stack after memcpy
    948     lw    $gp, 16($fp)          # restore $gp
    949     lw    $a0, SPILL_SIZE($fp)  # restore ArtMethod*
    950     lw    $a1, 4($sp)           # a1 = this*
    951     addiu $t8, $sp, 8           # t8 = pointer to the current argument (skip ArtMethod* and this*)
    952     li    $t6, 0                # t6 = gpr_index = 0 (corresponds to A2; A0 and A1 are skipped)
    953     li    $t7, 0                # t7 = fp_index = 0
    954     lw    $t9, 20 + SPILL_SIZE($fp)  # get shorty (20 is offset from the $sp on entry + SPILL_SIZE
    955                                 # as the $fp is SPILL_SIZE bytes below the $sp on entry)
    956     addiu $t9, 1                # t9 = shorty + 1 (skip 1 for return type)
    957 
    958     // Load the base addresses of tabInt ... tabDouble.
    959     // We will use the register indices (gpr_index, fp_index) to branch.
    960     // Note that the indices are scaled by 16, so they can be added to the bases directly.
    961 #if defined(__mips_isa_rev) && __mips_isa_rev >= 6
    962     lapc  $t2, tabInt
    963     lapc  $t3, tabLong
    964     lapc  $t4, tabSingle
    965     lapc  $t5, tabDouble
    966 #else
    967     bltzal $zero, tabBase       # nal
    968     addiu $t2, $ra, %lo(tabInt - tabBase)
    969 tabBase:
    970     addiu $t3, $ra, %lo(tabLong - tabBase)
    971     addiu $t4, $ra, %lo(tabSingle - tabBase)
    972     addiu $t5, $ra, %lo(tabDouble - tabBase)
    973 #endif
    974 
    975 loop:
    976     lbu   $ra, 0($t9)           # ra = shorty[i]
    977     beqz  $ra, loopEnd          # finish getting args when shorty[i] == '\0'
    978     addiu $t9, 1
    979 
    980     addiu $ra, -'J'
    981     beqz  $ra, isLong           # branch if result type char == 'J'
    982     addiu $ra, 'J' - 'D'
    983     beqz  $ra, isDouble         # branch if result type char == 'D'
    984     addiu $ra, 'D' - 'F'
    985     beqz  $ra, isSingle         # branch if result type char == 'F'
    986 
    987     addu  $ra, $t2, $t6
    988     jalr  $zero, $ra
    989     addiu $t8, 4                # next_arg = curr_arg + 4
    990 
    991 isLong:
    992     addu  $ra, $t3, $t6
    993     jalr  $zero, $ra
    994     addiu $t8, 8                # next_arg = curr_arg + 8
    995 
    996 isSingle:
    997     addu  $ra, $t4, $t7
    998     jalr  $zero, $ra
    999     addiu $t8, 4                # next_arg = curr_arg + 4
   1000 
   1001 isDouble:
   1002     addu  $ra, $t5, $t7
   1003 #if defined(__mips_isa_rev) && __mips_isa_rev > 2
   1004     addiu $t7, 16               # fp_index += 16 didn't fit into LOAD_DOUBLE_TO_REG
   1005 #endif
   1006     jalr  $zero, $ra
   1007     addiu $t8, 8                # next_arg = curr_arg + 8
   1008 
   1009 loopEnd:
   1010     lw    $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)  # get pointer to the code
   1011     jalr  $t9                   # call the method
   1012     sw    $zero, 0($sp)         # store null for ArtMethod* at bottom of frame
   1013     move  $sp, $fp              # restore the stack
   1014     lw    $s0, 0($sp)
   1015     .cfi_restore 16
   1016     lw    $s1, 4($sp)
   1017     .cfi_restore 17
   1018     lw    $fp, 8($sp)
   1019     .cfi_restore 30
   1020     lw    $ra, 12($sp)
   1021     .cfi_restore 31
   1022     addiu $sp, $sp, SPILL_SIZE
   1023     .cfi_adjust_cfa_offset -SPILL_SIZE
   1024     lw    $t0, 16($sp)          # get result pointer
   1025     lw    $t1, 20($sp)          # get shorty
   1026     lb    $t1, 0($t1)           # get result type char
   1027     li    $t2, 'D'              # put char 'D' into t2
   1028     beq   $t1, $t2, 5f          # branch if result type char == 'D'
   1029     li    $t3, 'F'              # put char 'F' into t3
   1030     beq   $t1, $t3, 5f          # branch if result type char == 'F'
   1031     sw    $v0, 0($t0)           # store the result
   1032     jalr  $zero, $ra
   1033     sw    $v1, 4($t0)           # store the other half of the result
   1034 5:
   1035     SDu   $f0, $f1, 0, $t0, $t1 # store floating point result
   1036     jalr  $zero, $ra
   1037     nop
   1038 
   1039     // Note that gpr_index is kept within the range of tabInt and tabLong
   1040     // and fp_index is kept within the range of tabSingle and tabDouble.
   1041     .balign 16
   1042 tabInt:
   1043     LOAD_WORD_TO_REG a2, t8, t6, loop             # a2 = current argument, gpr_index += 16
   1044     LOAD_WORD_TO_REG a3, t8, t6, loop             # a3 = current argument, gpr_index += 16
   1045     LOAD_WORD_TO_REG t0, t8, t6, loop             # t0 = current argument, gpr_index += 16
   1046     LOAD_WORD_TO_REG t1, t8, t6, loop             # t1 = current argument, gpr_index += 16
   1047     LOAD_END t6, 4*16, loop                       # no more GPR args, gpr_index = 4*16
   1048 tabLong:
   1049     LOAD_LONG_TO_REG a2, a3, t8, t6, 2*16, loop   # a2_a3 = curr_arg, gpr_index = 2*16
   1050     LOAD_LONG_TO_REG t0, t1, t8, t6, 4*16, loop   # t0_t1 = curr_arg, gpr_index = 4*16
   1051     LOAD_LONG_TO_REG t0, t1, t8, t6, 4*16, loop   # t0_t1 = curr_arg, gpr_index = 4*16
   1052     LOAD_END t6, 4*16, loop                       # no more GPR args, gpr_index = 4*16
   1053     LOAD_END t6, 4*16, loop                       # no more GPR args, gpr_index = 4*16
   1054 tabSingle:
   1055     LOAD_FLOAT_TO_REG f8, t8, t7, loop            # f8 = curr_arg, fp_index += 16
   1056     LOAD_FLOAT_TO_REG f10, t8, t7, loop           # f10 = curr_arg, fp_index += 16
   1057     LOAD_FLOAT_TO_REG f12, t8, t7, loop           # f12 = curr_arg, fp_index += 16
   1058     LOAD_FLOAT_TO_REG f14, t8, t7, loop           # f14 = curr_arg, fp_index += 16
   1059     LOAD_FLOAT_TO_REG f16, t8, t7, loop           # f16 = curr_arg, fp_index += 16
   1060     LOAD_FLOAT_TO_REG f18, t8, t7, loop           # f18 = curr_arg, fp_index += 16
   1061     LOAD_END t7, 6*16, loop                       # no more FPR args, fp_index = 6*16
   1062 tabDouble:
   1063     LOAD_DOUBLE_TO_REG f8, f9, t8, t7, ra, loop   # f8_f9 = curr_arg; if FPU32, fp_index += 16
   1064     LOAD_DOUBLE_TO_REG f10, f11, t8, t7, ra, loop # f10_f11 = curr_arg; if FPU32, fp_index += 16
   1065     LOAD_DOUBLE_TO_REG f12, f13, t8, t7, ra, loop # f12_f13 = curr_arg; if FPU32, fp_index += 16
   1066     LOAD_DOUBLE_TO_REG f14, f15, t8, t7, ra, loop # f14_f15 = curr_arg; if FPU32, fp_index += 16
   1067     LOAD_DOUBLE_TO_REG f16, f17, t8, t7, ra, loop # f16_f17 = curr_arg; if FPU32, fp_index += 16
   1068     LOAD_DOUBLE_TO_REG f18, f19, t8, t7, ra, loop # f18_f19 = curr_arg; if FPU32, fp_index += 16
   1069     LOAD_END t7, 6*16, loop                       # no more FPR args, fp_index = 6*16
   1070 END art_quick_invoke_stub
   1071 
   1072     /*
   1073      * Invocation static stub for quick code.
   1074      * On entry:
   1075      *   a0 = method pointer
   1076      *   a1 = argument array or null for no argument methods
   1077      *   a2 = size of argument array in bytes
   1078      *   a3 = (managed) thread pointer
   1079      *   [sp + 16] = JValue* result
   1080      *   [sp + 20] = shorty
   1081      */
   1082 ENTRY art_quick_invoke_static_stub
   1083     sw    $a0, 0($sp)           # save out a0
   1084     addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp
   1085     .cfi_adjust_cfa_offset SPILL_SIZE
   1086     sw    $gp, 16($sp)
   1087     sw    $ra, 12($sp)
   1088     .cfi_rel_offset 31, 12
   1089     sw    $fp, 8($sp)
   1090     .cfi_rel_offset 30, 8
   1091     sw    $s1, 4($sp)
   1092     .cfi_rel_offset 17, 4
   1093     sw    $s0, 0($sp)
   1094     .cfi_rel_offset 16, 0
   1095     move  $fp, $sp              # save sp in fp
   1096     .cfi_def_cfa_register 30
   1097     move  $s1, $a3              # move managed thread pointer into s1
   1098     addiu $s0, $zero, SUSPEND_CHECK_INTERVAL  # reset s0 to suspend check interval
   1099     addiu $t0, $a2, 4           # create space for ArtMethod* in frame.
   1100     subu  $t0, $sp, $t0         # reserve & align *stack* to 16 bytes:
   1101     srl   $t0, $t0, 4           #   native calling convention only aligns to 8B,
   1102     sll   $sp, $t0, 4           #   so we have to ensure ART 16B alignment ourselves.
   1103     addiu $a0, $sp, 4           # pass stack pointer + ArtMethod* as dest for memcpy
   1104     la    $t9, memcpy
   1105     jalr  $t9                   # (dest, src, bytes)
   1106     addiu $sp, $sp, -16         # make space for argument slots for memcpy
   1107     addiu $sp, $sp, 16          # restore stack after memcpy
   1108     lw    $gp, 16($fp)          # restore $gp
   1109     lw    $a0, SPILL_SIZE($fp)  # restore ArtMethod*
   1110     addiu $t8, $sp, 4           # t8 = pointer to the current argument (skip ArtMethod*)
   1111     li    $t6, 0                # t6 = gpr_index = 0 (corresponds to A1; A0 is skipped)
   1112     li    $t7, 0                # t7 = fp_index = 0
   1113     lw    $t9, 20 + SPILL_SIZE($fp)  # get shorty (20 is offset from the $sp on entry + SPILL_SIZE
   1114                                 # as the $fp is SPILL_SIZE bytes below the $sp on entry)
   1115     addiu $t9, 1                # t9 = shorty + 1 (skip 1 for return type)
   1116 
   1117     // Load the base addresses of tabIntS ... tabDoubleS.
   1118     // We will use the register indices (gpr_index, fp_index) to branch.
   1119     // Note that the indices are scaled by 16, so they can be added to the bases directly.
   1120 #if defined(__mips_isa_rev) && __mips_isa_rev >= 6
   1121     lapc  $t2, tabIntS
   1122     lapc  $t3, tabLongS
   1123     lapc  $t4, tabSingleS
   1124     lapc  $t5, tabDoubleS
   1125 #else
   1126     bltzal $zero, tabBaseS      # nal
   1127     addiu $t2, $ra, %lo(tabIntS - tabBaseS)
   1128 tabBaseS:
   1129     addiu $t3, $ra, %lo(tabLongS - tabBaseS)
   1130     addiu $t4, $ra, %lo(tabSingleS - tabBaseS)
   1131     addiu $t5, $ra, %lo(tabDoubleS - tabBaseS)
   1132 #endif
   1133 
   1134 loopS:
   1135     lbu   $ra, 0($t9)           # ra = shorty[i]
   1136     beqz  $ra, loopEndS         # finish getting args when shorty[i] == '\0'
   1137     addiu $t9, 1
   1138 
   1139     addiu $ra, -'J'
   1140     beqz  $ra, isLongS          # branch if result type char == 'J'
   1141     addiu $ra, 'J' - 'D'
   1142     beqz  $ra, isDoubleS        # branch if result type char == 'D'
   1143     addiu $ra, 'D' - 'F'
   1144     beqz  $ra, isSingleS        # branch if result type char == 'F'
   1145 
   1146     addu  $ra, $t2, $t6
   1147     jalr  $zero, $ra
   1148     addiu $t8, 4                # next_arg = curr_arg + 4
   1149 
   1150 isLongS:
   1151     addu  $ra, $t3, $t6
   1152     jalr  $zero, $ra
   1153     addiu $t8, 8                # next_arg = curr_arg + 8
   1154 
   1155 isSingleS:
   1156     addu  $ra, $t4, $t7
   1157     jalr  $zero, $ra
   1158     addiu $t8, 4                # next_arg = curr_arg + 4
   1159 
   1160 isDoubleS:
   1161     addu  $ra, $t5, $t7
   1162 #if defined(__mips_isa_rev) && __mips_isa_rev > 2
   1163     addiu $t7, 16               # fp_index += 16 didn't fit into LOAD_DOUBLE_TO_REG
   1164 #endif
   1165     jalr  $zero, $ra
   1166     addiu $t8, 8                # next_arg = curr_arg + 8
   1167 
   1168 loopEndS:
   1169     lw    $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)  # get pointer to the code
   1170     jalr  $t9                   # call the method
   1171     sw    $zero, 0($sp)         # store null for ArtMethod* at bottom of frame
   1172     move  $sp, $fp              # restore the stack
   1173     lw    $s0, 0($sp)
   1174     .cfi_restore 16
   1175     lw    $s1, 4($sp)
   1176     .cfi_restore 17
   1177     lw    $fp, 8($sp)
   1178     .cfi_restore 30
   1179     lw    $ra, 12($sp)
   1180     .cfi_restore 31
   1181     addiu $sp, $sp, SPILL_SIZE
   1182     .cfi_adjust_cfa_offset -SPILL_SIZE
   1183     lw    $t0, 16($sp)          # get result pointer
   1184     lw    $t1, 20($sp)          # get shorty
   1185     lb    $t1, 0($t1)           # get result type char
   1186     li    $t2, 'D'              # put char 'D' into t2
   1187     beq   $t1, $t2, 6f          # branch if result type char == 'D'
   1188     li    $t3, 'F'              # put char 'F' into t3
   1189     beq   $t1, $t3, 6f          # branch if result type char == 'F'
   1190     sw    $v0, 0($t0)           # store the result
   1191     jalr  $zero, $ra
   1192     sw    $v1, 4($t0)           # store the other half of the result
   1193 6:
   1194     SDu   $f0, $f1, 0, $t0, $t1 # store floating point result
   1195     jalr  $zero, $ra
   1196     nop
   1197 
   1198     // Note that gpr_index is kept within the range of tabIntS and tabLongS
   1199     // and fp_index is kept within the range of tabSingleS and tabDoubleS.
   1200     .balign 16
   1201 tabIntS:
   1202     LOAD_WORD_TO_REG a1, t8, t6, loopS             # a1 = current argument, gpr_index += 16
   1203     LOAD_WORD_TO_REG a2, t8, t6, loopS             # a2 = current argument, gpr_index += 16
   1204     LOAD_WORD_TO_REG a3, t8, t6, loopS             # a3 = current argument, gpr_index += 16
   1205     LOAD_WORD_TO_REG t0, t8, t6, loopS             # t0 = current argument, gpr_index += 16
   1206     LOAD_WORD_TO_REG t1, t8, t6, loopS             # t1 = current argument, gpr_index += 16
   1207     LOAD_END t6, 5*16, loopS                       # no more GPR args, gpr_index = 5*16
   1208 tabLongS:
   1209     LOAD_LONG_TO_REG a2, a3, t8, t6, 3*16, loopS   # a2_a3 = curr_arg, gpr_index = 3*16
   1210     LOAD_LONG_TO_REG a2, a3, t8, t6, 3*16, loopS   # a2_a3 = curr_arg, gpr_index = 3*16
   1211     LOAD_LONG_TO_REG t0, t1, t8, t6, 5*16, loopS   # t0_t1 = curr_arg, gpr_index = 5*16
   1212     LOAD_LONG_TO_REG t0, t1, t8, t6, 5*16, loopS   # t0_t1 = curr_arg, gpr_index = 5*16
   1213     LOAD_END t6, 5*16, loopS                       # no more GPR args, gpr_index = 5*16
   1214     LOAD_END t6, 5*16, loopS                       # no more GPR args, gpr_index = 5*16
   1215 tabSingleS:
   1216     LOAD_FLOAT_TO_REG f8, t8, t7, loopS            # f8 = curr_arg, fp_index += 16
   1217     LOAD_FLOAT_TO_REG f10, t8, t7, loopS           # f10 = curr_arg, fp_index += 16
   1218     LOAD_FLOAT_TO_REG f12, t8, t7, loopS           # f12 = curr_arg, fp_index += 16
   1219     LOAD_FLOAT_TO_REG f14, t8, t7, loopS           # f14 = curr_arg, fp_index += 16
   1220     LOAD_FLOAT_TO_REG f16, t8, t7, loopS           # f16 = curr_arg, fp_index += 16
   1221     LOAD_FLOAT_TO_REG f18, t8, t7, loopS           # f18 = curr_arg, fp_index += 16
   1222     LOAD_END t7, 6*16, loopS                       # no more FPR args, fp_index = 6*16
   1223 tabDoubleS:
   1224     LOAD_DOUBLE_TO_REG f8, f9, t8, t7, ra, loopS   # f8_f9 = curr_arg; if FPU32, fp_index += 16
   1225     LOAD_DOUBLE_TO_REG f10, f11, t8, t7, ra, loopS # f10_f11 = curr_arg; if FPU32, fp_index += 16
   1226     LOAD_DOUBLE_TO_REG f12, f13, t8, t7, ra, loopS # f12_f13 = curr_arg; if FPU32, fp_index += 16
   1227     LOAD_DOUBLE_TO_REG f14, f15, t8, t7, ra, loopS # f14_f15 = curr_arg; if FPU32, fp_index += 16
   1228     LOAD_DOUBLE_TO_REG f16, f17, t8, t7, ra, loopS # f16_f17 = curr_arg; if FPU32, fp_index += 16
   1229     LOAD_DOUBLE_TO_REG f18, f19, t8, t7, ra, loopS # f18_f19 = curr_arg; if FPU32, fp_index += 16
   1230     LOAD_END t7, 6*16, loopS                       # no more FPR args, fp_index = 6*16
   1231 END art_quick_invoke_static_stub
   1232 
   1233 #undef SPILL_SIZE
   1234 
   1235     /*
   1236      * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
   1237      * failure.
   1238      */
   1239     .extern artHandleFillArrayDataFromCode
   1240 ENTRY art_quick_handle_fill_data
   1241     lw     $a2, 0($sp)                # pass referrer's Method*
   1242     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case exception allocation triggers GC
   1243     la     $t9, artHandleFillArrayDataFromCode
   1244     jalr   $t9                        # (payload offset, Array*, method, Thread*)
   1245     move   $a3, rSELF                 # pass Thread::Current
   1246     RETURN_IF_ZERO
   1247 END art_quick_handle_fill_data
   1248 
   1249     /*
   1250      * Entry from managed code that calls artLockObjectFromCode, may block for GC.
   1251      */
   1252     .extern artLockObjectFromCode
   1253 ENTRY art_quick_lock_object
   1254     beqz    $a0, art_quick_throw_null_pointer_exception
   1255     li      $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
   1256     li      $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
   1257 .Lretry_lock:
   1258     lw      $t0, THREAD_ID_OFFSET(rSELF)  # TODO: Can the thread ID really change during the loop?
   1259     ll      $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
   1260     and     $t2, $t1, $t3                 # zero the gc bits
   1261     bnez    $t2, .Lnot_unlocked           # already thin locked
   1262     # Unlocked case - $t1: original lock word that's zero except for the read barrier bits.
   1263     or      $t2, $t1, $t0                 # $t2 holds thread id with count of 0 with preserved read barrier bits
   1264     sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
   1265     beqz    $t2, .Lretry_lock             # store failed, retry
   1266     nop
   1267     jalr    $zero, $ra
   1268     sync                                  # full (LoadLoad|LoadStore) memory barrier
   1269 .Lnot_unlocked:
   1270     # $t1: original lock word, $t0: thread_id with count of 0 and zero read barrier bits
   1271     srl     $t2, $t1, LOCK_WORD_STATE_SHIFT
   1272     bnez    $t2, .Lslow_lock              # if either of the top two bits are set, go slow path
   1273     xor     $t2, $t1, $t0                 # lock_word.ThreadId() ^ self->ThreadId()
   1274     andi    $t2, $t2, 0xFFFF              # zero top 16 bits
   1275     bnez    $t2, .Lslow_lock              # lock word and self thread id's match -> recursive lock
   1276                                           # otherwise contention, go to slow path
   1277     and     $t2, $t1, $t3                 # zero the gc bits
   1278     addu    $t2, $t2, $t8                 # increment count in lock word
   1279     srl     $t2, $t2, LOCK_WORD_STATE_SHIFT  # if the first gc state bit is set, we overflowed.
   1280     bnez    $t2, .Lslow_lock              # if we overflow the count go slow path
   1281     addu    $t2, $t1, $t8                 # increment count for real
   1282     sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
   1283     beqz    $t2, .Lretry_lock             # store failed, retry
   1284     nop
   1285     jalr    $zero, $ra
   1286     nop
   1287 .Lslow_lock:
   1288     SETUP_SAVE_REFS_ONLY_FRAME            # save callee saves in case we block
   1289     la      $t9, artLockObjectFromCode
   1290     jalr    $t9                           # (Object* obj, Thread*)
   1291     move    $a1, rSELF                    # pass Thread::Current
   1292     RETURN_IF_ZERO
   1293 END art_quick_lock_object
   1294 
   1295 ENTRY art_quick_lock_object_no_inline
   1296     beqz    $a0, art_quick_throw_null_pointer_exception
   1297     nop
   1298     SETUP_SAVE_REFS_ONLY_FRAME            # save callee saves in case we block
   1299     la      $t9, artLockObjectFromCode
   1300     jalr    $t9                           # (Object* obj, Thread*)
   1301     move    $a1, rSELF                    # pass Thread::Current
   1302     RETURN_IF_ZERO
   1303 END art_quick_lock_object_no_inline
   1304 
   1305     /*
   1306      * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
   1307      */
   1308     .extern artUnlockObjectFromCode
   1309 ENTRY art_quick_unlock_object
   1310     beqz    $a0, art_quick_throw_null_pointer_exception
   1311     li      $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
   1312     li      $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
   1313 .Lretry_unlock:
   1314 #ifndef USE_READ_BARRIER
   1315     lw      $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
   1316 #else
   1317     ll      $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)  # Need to use atomic read-modify-write for read barrier
   1318 #endif
   1319     srl     $t2, $t1, LOCK_WORD_STATE_SHIFT
   1320     bnez    $t2, .Lslow_unlock         # if either of the top two bits are set, go slow path
   1321     lw      $t0, THREAD_ID_OFFSET(rSELF)
   1322     and     $t2, $t1, $t3              # zero the gc bits
   1323     xor     $t2, $t2, $t0              # lock_word.ThreadId() ^ self->ThreadId()
   1324     andi    $t2, $t2, 0xFFFF           # zero top 16 bits
   1325     bnez    $t2, .Lslow_unlock         # do lock word and self thread id's match?
   1326     and     $t2, $t1, $t3              # zero the gc bits
   1327     bgeu    $t2, $t8, .Lrecursive_thin_unlock
   1328     # transition to unlocked
   1329     nor     $t2, $zero, $t3            # $t2 = LOCK_WORD_GC_STATE_MASK_SHIFTED
   1330     and     $t2, $t1, $t2              # $t2: zero except for the preserved gc bits
   1331     sync                               # full (LoadStore|StoreStore) memory barrier
   1332 #ifndef USE_READ_BARRIER
   1333     jalr    $zero, $ra
   1334     sw      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
   1335 #else
   1336     sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
   1337     beqz    $t2, .Lretry_unlock        # store failed, retry
   1338     nop
   1339     jalr    $zero, $ra
   1340     nop
   1341 #endif
   1342 .Lrecursive_thin_unlock:
   1343     # t1: original lock word
   1344     subu    $t2, $t1, $t8              # decrement count
   1345 #ifndef USE_READ_BARRIER
   1346     jalr    $zero, $ra
   1347     sw      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
   1348 #else
   1349     sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
   1350     beqz    $t2, .Lretry_unlock        # store failed, retry
   1351     nop
   1352     jalr    $zero, $ra
   1353     nop
   1354 #endif
   1355 .Lslow_unlock:
   1356     SETUP_SAVE_REFS_ONLY_FRAME         # save callee saves in case exception allocation triggers GC
   1357     la      $t9, artUnlockObjectFromCode
   1358     jalr    $t9                        # (Object* obj, Thread*)
   1359     move    $a1, rSELF                 # pass Thread::Current
   1360     RETURN_IF_ZERO
   1361 END art_quick_unlock_object
   1362 
   1363 ENTRY art_quick_unlock_object_no_inline
   1364     beqz    $a0, art_quick_throw_null_pointer_exception
   1365     nop
   1366     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case exception allocation triggers GC
   1367     la      $t9, artUnlockObjectFromCode
   1368     jalr    $t9                       # (Object* obj, Thread*)
   1369     move    $a1, rSELF                # pass Thread::Current
   1370     RETURN_IF_ZERO
   1371 END art_quick_unlock_object_no_inline
   1372 
   1373     /*
   1374      * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
   1375      */
   1376     .extern artInstanceOfFromCode
   1377     .extern artThrowClassCastExceptionForObject
   1378 ENTRY art_quick_check_instance_of
   1379     addiu  $sp, $sp, -32
   1380     .cfi_adjust_cfa_offset 32
   1381     sw     $gp, 16($sp)
   1382     sw     $ra, 12($sp)
   1383     .cfi_rel_offset 31, 12
   1384     sw     $t9, 8($sp)
   1385     sw     $a1, 4($sp)
   1386     sw     $a0, 0($sp)
   1387     la     $t9, artInstanceOfFromCode
   1388     jalr   $t9
   1389     addiu  $sp, $sp, -16             # reserve argument slots on the stack
   1390     addiu  $sp, $sp, 16
   1391     lw     $gp, 16($sp)
   1392     beqz   $v0, .Lthrow_class_cast_exception
   1393     lw     $ra, 12($sp)
   1394     jalr   $zero, $ra
   1395     addiu  $sp, $sp, 32
   1396     .cfi_adjust_cfa_offset -32
   1397 .Lthrow_class_cast_exception:
   1398     lw     $t9, 8($sp)
   1399     lw     $a1, 4($sp)
   1400     lw     $a0, 0($sp)
   1401     addiu  $sp, $sp, 32
   1402     .cfi_adjust_cfa_offset -32
   1403     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
   1404     la   $t9, artThrowClassCastExceptionForObject
   1405     jalr $zero, $t9                 # artThrowClassCastException (Object*, Class*, Thread*)
   1406     move $a2, rSELF                 # pass Thread::Current
   1407 END art_quick_check_instance_of
   1408 
   1409     /*
   1410      * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
   1411      * nReg is the register number for rReg.
   1412      */
   1413 .macro POP_REG_NE rReg, nReg, offset, rExclude
   1414     .ifnc \rReg, \rExclude
   1415         lw \rReg, \offset($sp)      # restore rReg
   1416         .cfi_restore \nReg
   1417     .endif
   1418 .endm
   1419 
   1420     /*
   1421      * Macro to insert read barrier, only used in art_quick_aput_obj.
   1422      * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
   1423      * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
   1424      */
   1425 .macro READ_BARRIER rDest, rObj, offset
   1426 #ifdef USE_READ_BARRIER
   1427     # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 8 words for 16B alignment.
   1428     addiu  $sp, $sp, -32
   1429     .cfi_adjust_cfa_offset 32
   1430     sw     $ra, 28($sp)
   1431     .cfi_rel_offset 31, 28
   1432     sw     $t9, 24($sp)
   1433     .cfi_rel_offset 25, 24
   1434     sw     $t1, 20($sp)
   1435     .cfi_rel_offset 9, 20
   1436     sw     $t0, 16($sp)
   1437     .cfi_rel_offset 8, 16
   1438     sw     $a2, 8($sp)              # padding slot at offset 12 (padding can be any slot in the 32B)
   1439     .cfi_rel_offset 6, 8
   1440     sw     $a1, 4($sp)
   1441     .cfi_rel_offset 5, 4
   1442     sw     $a0, 0($sp)
   1443     .cfi_rel_offset 4, 0
   1444 
   1445     # move $a0, \rRef               # pass ref in a0 (no-op for now since parameter ref is unused)
   1446     .ifnc \rObj, $a1
   1447         move $a1, \rObj             # pass rObj
   1448     .endif
   1449     addiu  $a2, $zero, \offset      # pass offset
   1450     la     $t9, artReadBarrierSlow
   1451     jalr   $t9                      # artReadBarrierSlow(ref, rObj, offset)
   1452     addiu  $sp, $sp, -16            # Use branch delay slot to reserve argument slots on the stack
   1453                                     # before the call to artReadBarrierSlow.
   1454     addiu  $sp, $sp, 16             # restore stack after call to artReadBarrierSlow
   1455     # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
   1456     move \rDest, $v0                # save return value in rDest
   1457                                     # (rDest cannot be v0 in art_quick_aput_obj)
   1458 
   1459     lw     $a0, 0($sp)              # restore registers except rDest
   1460                                     # (rDest can only be t0 or t1 in art_quick_aput_obj)
   1461     .cfi_restore 4
   1462     lw     $a1, 4($sp)
   1463     .cfi_restore 5
   1464     lw     $a2, 8($sp)
   1465     .cfi_restore 6
   1466     POP_REG_NE $t0, 8, 16, \rDest
   1467     POP_REG_NE $t1, 9, 20, \rDest
   1468     lw     $t9, 24($sp)
   1469     .cfi_restore 25
   1470     lw     $ra, 28($sp)             # restore $ra
   1471     .cfi_restore 31
   1472     addiu  $sp, $sp, 32
   1473     .cfi_adjust_cfa_offset -32
   1474 #else
   1475     lw     \rDest, \offset(\rObj)
   1476     UNPOISON_HEAP_REF \rDest
   1477 #endif  // USE_READ_BARRIER
   1478 .endm
   1479 
   1480 #ifdef USE_READ_BARRIER
   1481     .extern artReadBarrierSlow
   1482 #endif
   1483 ENTRY art_quick_aput_obj
   1484     beqz $a2, .Ldo_aput_null
   1485     nop
   1486     READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
   1487     READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
   1488     READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
   1489     bne $t1, $t0, .Lcheck_assignability  # value's type == array's component type - trivial assignability
   1490     nop
   1491 .Ldo_aput:
   1492     sll $a1, $a1, 2
   1493     add $t0, $a0, $a1
   1494     POISON_HEAP_REF $a2
   1495     sw  $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
   1496     lw  $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
   1497     srl $t1, $a0, CARD_TABLE_CARD_SHIFT
   1498     add $t1, $t1, $t0
   1499     sb  $t0, ($t1)
   1500     jalr $zero, $ra
   1501     nop
   1502 .Ldo_aput_null:
   1503     sll $a1, $a1, 2
   1504     add $t0, $a0, $a1
   1505     sw  $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
   1506     jalr $zero, $ra
   1507     nop
   1508 .Lcheck_assignability:
   1509     addiu  $sp, $sp, -32
   1510     .cfi_adjust_cfa_offset 32
   1511     sw     $ra, 28($sp)
   1512     .cfi_rel_offset 31, 28
   1513     sw     $gp, 16($sp)
   1514     sw     $t9, 12($sp)
   1515     sw     $a2, 8($sp)
   1516     sw     $a1, 4($sp)
   1517     sw     $a0, 0($sp)
   1518     move   $a1, $t1
   1519     move   $a0, $t0
   1520     la     $t9, artIsAssignableFromCode
   1521     jalr   $t9               # (Class*, Class*)
   1522     addiu  $sp, $sp, -16     # reserve argument slots on the stack
   1523     addiu  $sp, $sp, 16
   1524     lw     $ra, 28($sp)
   1525     lw     $gp, 16($sp)
   1526     lw     $t9, 12($sp)
   1527     lw     $a2, 8($sp)
   1528     lw     $a1, 4($sp)
   1529     lw     $a0, 0($sp)
   1530     addiu  $sp, 32
   1531     .cfi_adjust_cfa_offset -32
   1532     bnez   $v0, .Ldo_aput
   1533     nop
   1534     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
   1535     move $a1, $a2
   1536     la   $t9, artThrowArrayStoreException
   1537     jalr $zero, $t9                 # artThrowArrayStoreException(Class*, Class*, Thread*)
   1538     move $a2, rSELF                 # pass Thread::Current
   1539 END art_quick_aput_obj
   1540 
   1541 // Macros taking opportunity of code similarities for downcalls.
   1542 .macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
   1543     .extern \entrypoint
   1544 ENTRY \name
   1545     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
   1546     la      $t9, \entrypoint
   1547     jalr    $t9                       # (field_idx, Thread*)
   1548     move    $a1, rSELF                # pass Thread::Current
   1549     \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
   1550 END \name
   1551 .endm
   1552 
   1553 .macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
   1554     .extern \entrypoint
   1555 ENTRY \name
   1556     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
   1557     la      $t9, \entrypoint
   1558     jalr    $t9                       # (field_idx, Object*, Thread*) or
   1559                                       # (field_idx, new_val, Thread*)
   1560     move    $a2, rSELF                # pass Thread::Current
   1561     \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
   1562 END \name
   1563 .endm
   1564 
   1565 .macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
   1566     .extern \entrypoint
   1567 ENTRY \name
   1568     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
   1569     la      $t9, \entrypoint
   1570     jalr    $t9                       # (field_idx, Object*, new_val, Thread*)
   1571     move    $a3, rSELF                # pass Thread::Current
   1572     \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
   1573 END \name
   1574 .endm
   1575 
   1576 .macro FOUR_ARG_REF_DOWNCALL name, entrypoint, return
   1577     .extern \entrypoint
   1578 ENTRY \name
   1579     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
   1580     la      $t9, \entrypoint
   1581     jalr    $t9                       # (field_idx, Object*, 64-bit new_val, Thread*) or
   1582                                       # (field_idx, 64-bit new_val, Thread*)
   1583                                       # Note that a 64-bit new_val needs to be aligned with
   1584                                       # an even-numbered register, hence A1 may be skipped
   1585                                       # for new_val to reside in A2-A3.
   1586     sw      rSELF, 16($sp)            # pass Thread::Current
   1587     \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
   1588 END \name
   1589 .endm
   1590 
   1591     /*
   1592      * Called by managed code to resolve a static/instance field and load/store a value.
   1593      */
   1594 ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1595 ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1596 ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1597 ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1598 ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1599 ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1600 ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1601 TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1602 TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1603 TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1604 TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1605 TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1606 TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1607 TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
   1608 TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
   1609 TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
   1610 TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
   1611 TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
   1612 FOUR_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
   1613 THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
   1614 THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
   1615 THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
   1616 THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
   1617 FOUR_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
   1618 
   1619 // Macro to facilitate adding new allocation entrypoints.
   1620 .macro ONE_ARG_DOWNCALL name, entrypoint, return
   1621     .extern \entrypoint
   1622 ENTRY \name
   1623     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
   1624     la      $t9, \entrypoint
   1625     jalr    $t9
   1626     move    $a1, rSELF                # pass Thread::Current
   1627     \return
   1628 END \name
   1629 .endm
   1630 
   1631 .macro TWO_ARG_DOWNCALL name, entrypoint, return
   1632     .extern \entrypoint
   1633 ENTRY \name
   1634     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
   1635     la      $t9, \entrypoint
   1636     jalr    $t9
   1637     move    $a2, rSELF                # pass Thread::Current
   1638     \return
   1639 END \name
   1640 .endm
   1641 
   1642 .macro THREE_ARG_DOWNCALL name, entrypoint, return
   1643     .extern \entrypoint
   1644 ENTRY \name
   1645     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
   1646     la      $t9, \entrypoint
   1647     jalr    $t9
   1648     move    $a3, rSELF                # pass Thread::Current
   1649     \return
   1650 END \name
   1651 .endm
   1652 
   1653 .macro FOUR_ARG_DOWNCALL name, entrypoint, return
   1654     .extern \entrypoint
   1655 ENTRY \name
   1656     SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
   1657     la      $t9, \entrypoint
   1658     jalr    $t9
   1659     sw      rSELF, 16($sp)            # pass Thread::Current
   1660     \return
   1661 END \name
   1662 .endm
   1663 
   1664 // Generate the allocation entrypoints for each allocator.
   1665 GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
   1666 // Comment out allocators that have mips specific asm.
   1667 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
   1668 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
   1669 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
   1670 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
   1671 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
   1672 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
   1673 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
   1674 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
   1675 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
   1676 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
   1677 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
   1678 
   1679 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
   1680 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
   1681 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
   1682 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
   1683 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
   1684 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
   1685 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
   1686 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
   1687 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
   1688 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
   1689 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
   1690 
   1691 // A hand-written override for:
   1692 //   GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
   1693 //   GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
   1694 .macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
   1695 ENTRY_NO_GP \c_name
   1696     # Fast path rosalloc allocation
   1697     # a0: type
   1698     # s1: Thread::Current
   1699     # -----------------------------
   1700     # t1: object size
   1701     # t2: rosalloc run
   1702     # t3: thread stack top offset
   1703     # t4: thread stack bottom offset
   1704     # v0: free list head
   1705     #
   1706     # t5, t6 : temps
   1707     lw    $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)        # Check if thread local allocation
   1708     lw    $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1)        # stack has any room left.
   1709     bgeu  $t3, $t4, .Lslow_path_\c_name
   1710 
   1711     lw    $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0)  # Load object size (t1).
   1712     li    $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE          # Check if size is for a thread local
   1713                                                                # allocation. Also does the
   1714                                                                # initialized and finalizable checks.
   1715     # When isInitialized == 0, then the class is potentially not yet initialized.
   1716     # If the class is not yet initialized, the object size will be very large to force the branch
   1717     # below to be taken.
   1718     #
   1719     # See InitializeClassVisitors in class-inl.h for more details.
   1720     bgtu  $t1, $t5, .Lslow_path_\c_name
   1721 
   1722     # Compute the rosalloc bracket index from the size. Since the size is already aligned we can
   1723     # combine the two shifts together.
   1724     srl   $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
   1725 
   1726     addu  $t2, $t1, $s1
   1727     lw    $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2)  # Load rosalloc run (t2).
   1728 
   1729     # Load the free list head (v0).
   1730     # NOTE: this will be the return val.
   1731     lw    $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
   1732     beqz  $v0, .Lslow_path_\c_name
   1733     nop
   1734 
   1735     # Load the next pointer of the head and update the list head with the next pointer.
   1736     lw    $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
   1737     sw    $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
   1738 
   1739     # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
   1740     # asserted to match.
   1741 
   1742 #if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
   1743 #error "Class pointer needs to overwrite next pointer."
   1744 #endif
   1745 
   1746     POISON_HEAP_REF $a0
   1747     sw    $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)
   1748 
   1749     # Push the new object onto the thread local allocation stack and increment the thread local
   1750     # allocation stack top.
   1751     sw    $v0, 0($t3)
   1752     addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
   1753     sw    $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
   1754 
   1755     # Decrement the size of the free list.
   1756     lw    $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
   1757     addiu $t5, $t5, -1
   1758     sw    $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
   1759 
   1760 .if \isInitialized == 0
   1761     # This barrier is only necessary when the allocation also requires a class initialization check.
   1762     #
   1763     # If the class is already observably initialized, then new-instance allocations are protected
   1764     # from publishing by the compiler which inserts its own StoreStore barrier.
   1765     sync                                                          # Fence.
   1766 .endif
   1767     jalr  $zero, $ra
   1768     nop
   1769 
   1770   .Lslow_path_\c_name:
   1771     addiu $t9, $t9, (.Lslow_path_\c_name - \c_name) + 4
   1772     .cpload $t9
   1773     SETUP_SAVE_REFS_ONLY_FRAME
   1774     la    $t9, \cxx_name
   1775     jalr  $t9
   1776     move  $a1, $s1                                                # Pass self as argument.
   1777     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
   1778 END \c_name
   1779 .endm
   1780 
   1781 ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc, /* isInitialized */ 0
   1782 ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc, /* isInitialized */ 1
   1783 
   1784 // The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
   1785 // and art_quick_alloc_object_resolved/initialized_region_tlab.
   1786 //
   1787 // a0: type, s1(rSELF): Thread::Current.
   1788 // Need to preserve a0 to the slow path.
   1789 //
   1790 // If isInitialized=1 then the compiler assumes the object's class has already been initialized.
   1791 // If isInitialized=0 the compiler can only assume it's been at least resolved.
   1792 .macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel isInitialized
   1793     lw    $v0, THREAD_LOCAL_POS_OFFSET(rSELF)          # Load thread_local_pos.
   1794     lw    $a2, THREAD_LOCAL_END_OFFSET(rSELF)          # Load thread_local_end.
   1795     subu  $a3, $a2, $v0                                # Compute the remaining buffer size.
   1796     lw    $t0, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0)  # Load the object size.
   1797 
   1798     # When isInitialized == 0, then the class is potentially not yet initialized.
   1799     # If the class is not yet initialized, the object size will be very large to force the branch
   1800     # below to be taken.
   1801     #
   1802     # See InitializeClassVisitors in class-inl.h for more details.
   1803     bgtu  $t0, $a3, \slowPathLabel                     # Check if it fits.
   1804     addu  $t1, $v0, $t0                                # Add object size to tlab pos (in branch
   1805                                                        # delay slot).
   1806     # "Point of no slow path". Won't go to the slow path from here on.
   1807     sw    $t1, THREAD_LOCAL_POS_OFFSET(rSELF)          # Store new thread_local_pos.
   1808     lw    $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)      # Increment thread_local_objects.
   1809     addiu $a2, $a2, 1
   1810     sw    $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
   1811     POISON_HEAP_REF $a0
   1812     sw    $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)         # Store the class pointer.
   1813 
   1814 .if \isInitialized == 0
   1815     # This barrier is only necessary when the allocation also requires a class initialization check.
   1816     #
   1817     # If the class is already observably initialized, then new-instance allocations are protected
   1818     # from publishing by the compiler which inserts its own StoreStore barrier.
   1819     sync                                               # Fence.
   1820 .endif
   1821     jalr  $zero, $ra
   1822     nop
   1823 .endm
   1824 
   1825 // The common code for art_quick_alloc_object_resolved/initialized_tlab
   1826 // and art_quick_alloc_object_resolved/initialized_region_tlab.
   1827 .macro GENERATE_ALLOC_OBJECT_TLAB name, entrypoint, isInitialized
   1828 ENTRY_NO_GP \name
   1829     # Fast path tlab allocation.
   1830     # a0: type, s1(rSELF): Thread::Current.
   1831     ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path_\name, \isInitialized
   1832 .Lslow_path_\name:
   1833     addiu $t9, $t9, (.Lslow_path_\name - \name) + 4
   1834     .cpload $t9
   1835     SETUP_SAVE_REFS_ONLY_FRAME                         # Save callee saves in case of GC.
   1836     la    $t9, \entrypoint
   1837     jalr  $t9                                          # (mirror::Class*, Thread*)
   1838     move  $a1, rSELF                                   # Pass Thread::Current.
   1839     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
   1840 END \name
   1841 .endm
   1842 
   1843 GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, /* isInitialized */ 0
   1844 GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, /* isInitialized */ 1
   1845 GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB, /* isInitialized */ 0
   1846 GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB, /* isInitialized */ 1
   1847 
   1848 // The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
   1849 // and art_quick_alloc_array_resolved/initialized_region_tlab.
   1850 //
   1851 // a0: type, a1: component_count, a2: total_size, s1(rSELF): Thread::Current.
   1852 // Need to preserve a0 and a1 to the slow path.
   1853 .macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
   1854     li    $a3, OBJECT_ALIGNMENT_MASK_TOGGLED           # Apply alignemnt mask
   1855     and   $a2, $a2, $a3                                # (addr + 7) & ~7.
   1856 
   1857     lw    $v0, THREAD_LOCAL_POS_OFFSET(rSELF)          # Load thread_local_pos.
   1858     lw    $t1, THREAD_LOCAL_END_OFFSET(rSELF)          # Load thread_local_end.
   1859     subu  $t2, $t1, $v0                                # Compute the remaining buffer size.
   1860     bgtu  $a2, $t2, \slowPathLabel                     # Check if it fits.
   1861     addu  $a2, $v0, $a2                                # Add object size to tlab pos (in branch
   1862                                                        # delay slot).
   1863 
   1864     # "Point of no slow path". Won't go to the slow path from here on.
   1865     sw    $a2, THREAD_LOCAL_POS_OFFSET(rSELF)          # Store new thread_local_pos.
   1866     lw    $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)      # Increment thread_local_objects.
   1867     addiu $a2, $a2, 1
   1868     sw    $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
   1869     POISON_HEAP_REF $a0
   1870     sw    $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)         # Store the class pointer.
   1871     jalr  $zero, $ra
   1872     sw    $a1, MIRROR_ARRAY_LENGTH_OFFSET($v0)         # Store the array length.
   1873 .endm
   1874 
   1875 .macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
   1876 ENTRY_NO_GP \name
   1877     # Fast path array allocation for region tlab allocation.
   1878     # a0: mirror::Class* type
   1879     # a1: int32_t component_count
   1880     # s1(rSELF): Thread::Current
   1881     \size_setup .Lslow_path_\name
   1882     ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path_\name
   1883 .Lslow_path_\name:
   1884     # a0: mirror::Class* type
   1885     # a1: int32_t component_count
   1886     # a2: Thread* self
   1887     addiu $t9, $t9, (.Lslow_path_\name - \name) + 4
   1888     .cpload $t9
   1889     SETUP_SAVE_REFS_ONLY_FRAME                         # Save callee saves in case of GC.
   1890     la    $t9, \entrypoint
   1891     jalr  $t9
   1892     move  $a2, rSELF                                   # Pass Thread::Current.
   1893     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
   1894 END \name
   1895 .endm
   1896 
   1897 .macro COMPUTE_ARRAY_SIZE_UNKNOWN slow_path
   1898     break                                              # We should never enter here.
   1899                                                        # Code below is for reference.
   1900                                                        # Possibly a large object, go slow.
   1901                                                        # Also does negative array size check.
   1902     li    $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8)
   1903     bgtu  $a1, $a2, \slow_path
   1904                                                        # Array classes are never finalizable
   1905                                                        # or uninitialized, no need to check.
   1906     lw    $a3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($a0) # Load component type.
   1907     UNPOISON_HEAP_REF $a3
   1908     lw    $a3, MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET($a3)
   1909     srl   $a3, $a3, PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT    # Component size shift is in high 16 bits.
   1910     sllv  $a2, $a1, $a3                                # Calculate data size.
   1911                                                        # Add array data offset and alignment.
   1912     addiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
   1913 #if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
   1914 #error Long array data offset must be 4 greater than int array data offset.
   1915 #endif
   1916 
   1917     addiu $a3, $a3, 1                                  # Add 4 to the length only if the component
   1918     andi  $a3, $a3, 4                                  # size shift is 3 (for 64 bit alignment).
   1919     addu  $a2, $a2, $a3
   1920 .endm
   1921 
   1922 .macro COMPUTE_ARRAY_SIZE_8 slow_path
   1923     # Possibly a large object, go slow.
   1924     # Also does negative array size check.
   1925     li    $a2, (MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET)
   1926     bgtu  $a1, $a2, \slow_path
   1927     # Add array data offset and alignment (in branch delay slot).
   1928     addiu $a2, $a1, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
   1929 .endm
   1930 
   1931 .macro COMPUTE_ARRAY_SIZE_16 slow_path
   1932     # Possibly a large object, go slow.
   1933     # Also does negative array size check.
   1934     li    $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2)
   1935     bgtu  $a1, $a2, \slow_path
   1936     sll   $a2, $a1, 1
   1937     # Add array data offset and alignment.
   1938     addiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
   1939 .endm
   1940 
   1941 .macro COMPUTE_ARRAY_SIZE_32 slow_path
   1942     # Possibly a large object, go slow.
   1943     # Also does negative array size check.
   1944     li    $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4)
   1945     bgtu  $a1, $a2, \slow_path
   1946     sll   $a2, $a1, 2
   1947     # Add array data offset and alignment.
   1948     addiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
   1949 .endm
   1950 
   1951 .macro COMPUTE_ARRAY_SIZE_64 slow_path
   1952     # Possibly a large object, go slow.
   1953     # Also does negative array size check.
   1954     li    $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_LONG_ARRAY_DATA_OFFSET) / 8)
   1955     bgtu  $a1, $a2, \slow_path
   1956     sll   $a2, $a1, 3
   1957     # Add array data offset and alignment.
   1958     addiu $a2, $a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
   1959 .endm
   1960 
   1961 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
   1962 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
   1963 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
   1964 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
   1965 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
   1966 
   1967 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
   1968 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
   1969 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
   1970 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
   1971 GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
   1972 
   1973 // Macro for string and type resolution and initialization.
   1974 // $a0 is both input and output.
   1975 .macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint
   1976     .extern \entrypoint
   1977 ENTRY_NO_GP \name
   1978     SETUP_SAVE_EVERYTHING_FRAME       # Save everything in case of GC.
   1979     move    $s2, $gp                  # Preserve $gp across the call for exception delivery.
   1980     la      $t9, \entrypoint
   1981     jalr    $t9                       # (uint32_t index, Thread*)
   1982     move    $a1, rSELF                # Pass Thread::Current (in delay slot).
   1983     beqz    $v0, 1f                   # Success?
   1984     move    $a0, $v0                  # Move result to $a0 (in delay slot).
   1985     RESTORE_SAVE_EVERYTHING_FRAME 0   # Restore everything except $a0.
   1986     jalr    $zero, $ra                # Return on success.
   1987     nop
   1988 1:
   1989     move    $gp, $s2
   1990     DELIVER_PENDING_EXCEPTION_FRAME_READY
   1991 END \name
   1992 .endm
   1993 
   1994     /*
   1995      * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
   1996      * exception on error. On success the String is returned. A0 holds the string index. The fast
   1997      * path check for hit in strings cache has already been performed.
   1998      */
   1999 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
   2000 
   2001     /*
   2002      * Entry from managed code when uninitialized static storage, this stub will run the class
   2003      * initializer and deliver the exception on error. On success the static storage base is
   2004      * returned.
   2005      */
   2006 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
   2007 
   2008     /*
   2009      * Entry from managed code when dex cache misses for a type_idx.
   2010      */
   2011 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode
   2012 
   2013     /*
   2014      * Entry from managed code when type_idx needs to be checked for access and dex cache may also
   2015      * miss.
   2016      */
   2017 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
   2018 
   2019     /*
   2020      * Called by managed code when the value in rSUSPEND has been decremented to 0.
   2021      */
   2022     .extern artTestSuspendFromCode
   2023 ENTRY_NO_GP art_quick_test_suspend
   2024     lh     rSUSPEND, THREAD_FLAGS_OFFSET(rSELF)
   2025     bnez   rSUSPEND, 1f
   2026     addiu  rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL   # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
   2027     jalr   $zero, $ra
   2028     nop
   2029 1:
   2030     SETUP_SAVE_EVERYTHING_FRAME                      # save everything for stack crawl
   2031     la     $t9, artTestSuspendFromCode
   2032     jalr   $t9                                       # (Thread*)
   2033     move   $a0, rSELF
   2034     RESTORE_SAVE_EVERYTHING_FRAME
   2035     jalr   $zero, $ra
   2036     nop
   2037 END art_quick_test_suspend
   2038 
   2039     /*
   2040      * Called by managed code that is attempting to call a method on a proxy class. On entry
   2041      * a0 holds the proxy method; a1, a2 and a3 may contain arguments.
   2042      */
   2043     .extern artQuickProxyInvokeHandler
   2044 ENTRY art_quick_proxy_invoke_handler
   2045     SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
   2046     move    $a2, rSELF                  # pass Thread::Current
   2047     la      $t9, artQuickProxyInvokeHandler
   2048     jalr    $t9                         # (Method* proxy method, receiver, Thread*, SP)
   2049     addiu   $a3, $sp, ARG_SLOT_SIZE     # pass $sp (remove arg slots)
   2050     lw      $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
   2051     RESTORE_SAVE_REFS_AND_ARGS_FRAME
   2052     bnez    $t7, 1f
   2053     # don't care if $v0 and/or $v1 are modified, when exception branch taken
   2054     MTD     $v0, $v1, $f0, $f1          # move float value to return value
   2055     jalr    $zero, $ra
   2056     nop
   2057 1:
   2058     DELIVER_PENDING_EXCEPTION
   2059 END art_quick_proxy_invoke_handler
   2060 
   2061     /*
   2062      * Called to resolve an imt conflict.
   2063      * a0 is the conflict ArtMethod.
   2064      * t7 is a hidden argument that holds the target interface method's dex method index.
   2065      *
   2066      * Note that this stub writes to a0, t7 and t8.
   2067      */
   2068 ENTRY art_quick_imt_conflict_trampoline
   2069 // FIXME: The DexCache method array has been changed to hash-based cache with eviction.
   2070 // We need a relaxed atomic load of a 64-bit location to try and load the method
   2071 // and call artQuickResolutionTrampoline() if the index does not match.
   2072 #if 0
   2073     lw      $t8, 0($sp)                                      # Load referrer.
   2074     lw      $t8, ART_METHOD_DEX_CACHE_METHODS_OFFSET_32($t8) # Load dex cache methods array.
   2075     sll     $t7, $t7, POINTER_SIZE_SHIFT                     # Calculate offset.
   2076     addu    $t7, $t8, $t7                                    # Add offset to base.
   2077     lw      $t7, 0($t7)                                      # Load interface method.
   2078     lw      $a0, ART_METHOD_JNI_OFFSET_32($a0)               # Load ImtConflictTable.
   2079 
   2080 .Limt_table_iterate:
   2081     lw      $t8, 0($a0)                                      # Load next entry in ImtConflictTable.
   2082     # Branch if found.
   2083     beq     $t8, $t7, .Limt_table_found
   2084     nop
   2085     # If the entry is null, the interface method is not in the ImtConflictTable.
   2086     beqz    $t8, .Lconflict_trampoline
   2087     nop
   2088     # Iterate over the entries of the ImtConflictTable.
   2089     b       .Limt_table_iterate
   2090     addiu   $a0, $a0, 2 * __SIZEOF_POINTER__                 # Iterate to the next entry.
   2091 
   2092 .Limt_table_found:
   2093     # We successfully hit an entry in the table. Load the target method and jump to it.
   2094     lw      $a0, __SIZEOF_POINTER__($a0)
   2095     lw      $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)
   2096     jalr    $zero, $t9
   2097     nop
   2098 
   2099 .Lconflict_trampoline:
   2100     # Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
   2101     move    $a0, $t7                                         # Load interface method.
   2102 #else
   2103     move   $a0, $zero
   2104 #endif
   2105     INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
   2106 END art_quick_imt_conflict_trampoline
   2107 
   2108     .extern artQuickResolutionTrampoline
   2109 ENTRY art_quick_resolution_trampoline
   2110     SETUP_SAVE_REFS_AND_ARGS_FRAME
   2111     move    $a2, rSELF                    # pass Thread::Current
   2112     la      $t9, artQuickResolutionTrampoline
   2113     jalr    $t9                           # (Method* called, receiver, Thread*, SP)
   2114     addiu   $a3, $sp, ARG_SLOT_SIZE       # pass $sp (remove arg slots)
   2115     beqz    $v0, 1f
   2116     lw      $a0, ARG_SLOT_SIZE($sp)       # load resolved method to $a0
   2117     RESTORE_SAVE_REFS_AND_ARGS_FRAME
   2118     move    $t9, $v0               # code pointer must be in $t9 to generate the global pointer
   2119     jalr    $zero, $t9             # tail call to method
   2120     nop
   2121 1:
   2122     RESTORE_SAVE_REFS_AND_ARGS_FRAME
   2123     DELIVER_PENDING_EXCEPTION
   2124 END art_quick_resolution_trampoline
   2125 
   2126     .extern artQuickGenericJniTrampoline
   2127     .extern artQuickGenericJniEndTrampoline
   2128 ENTRY art_quick_generic_jni_trampoline
   2129     SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
   2130     move    $s8, $sp               # save $sp to $s8
   2131     move    $s3, $gp               # save $gp to $s3
   2132 
   2133     # prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
   2134     move    $a0, rSELF                     # pass Thread::Current
   2135     addiu   $a1, $sp, ARG_SLOT_SIZE        # save $sp (remove arg slots)
   2136     la      $t9, artQuickGenericJniTrampoline
   2137     jalr    $t9                            # (Thread*, SP)
   2138     addiu   $sp, $sp, -5120                # reserve space on the stack
   2139 
   2140     # The C call will have registered the complete save-frame on success.
   2141     # The result of the call is:
   2142     # v0: ptr to native code, 0 on error.
   2143     # v1: ptr to the bottom of the used area of the alloca, can restore stack till here.
   2144     beq     $v0, $zero, 2f         # check entry error
   2145     move    $t9, $v0               # save the code ptr
   2146     move    $sp, $v1               # release part of the alloca
   2147 
   2148     # Load parameters from stack into registers
   2149     lw      $a0,   0($sp)
   2150     lw      $a1,   4($sp)
   2151     lw      $a2,   8($sp)
   2152     lw      $a3,  12($sp)
   2153 
   2154     # artQuickGenericJniTrampoline sets bit 0 of the native code address to 1
   2155     # when the first two arguments are both single precision floats. This lets
   2156     # us extract them properly from the stack and load into floating point
   2157     # registers.
   2158     MTD     $a0, $a1, $f12, $f13
   2159     andi    $t0, $t9, 1
   2160     xor     $t9, $t9, $t0
   2161     bnez    $t0, 1f
   2162     mtc1    $a1, $f14
   2163     MTD     $a2, $a3, $f14, $f15
   2164 
   2165 1:
   2166     jalr    $t9                    # native call
   2167     nop
   2168     addiu   $sp, $sp, 16           # remove arg slots
   2169 
   2170     move    $gp, $s3               # restore $gp from $s3
   2171 
   2172     # result sign extension is handled in C code
   2173     # prepare for call to artQuickGenericJniEndTrampoline(Thread*, result, result_f)
   2174     move    $a0, rSELF             # pass Thread::Current
   2175     move    $a2, $v0               # pass result
   2176     move    $a3, $v1
   2177     addiu   $sp, $sp, -24          # reserve arg slots
   2178     la      $t9, artQuickGenericJniEndTrampoline
   2179     jalr    $t9
   2180     s.d     $f0, 16($sp)           # pass result_f
   2181 
   2182     lw      $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
   2183     bne     $t0, $zero, 2f         # check for pending exceptions
   2184 
   2185     move    $sp, $s8               # tear down the alloca
   2186 
   2187     # tear down the callee-save frame
   2188     RESTORE_SAVE_REFS_AND_ARGS_FRAME
   2189 
   2190     MTD     $v0, $v1, $f0, $f1     # move float value to return value
   2191     jalr    $zero, $ra
   2192     nop
   2193 
   2194 2:
   2195     lw      $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
   2196     move    $gp, $s3               # restore $gp from $s3
   2197     # This will create a new save-all frame, required by the runtime.
   2198     DELIVER_PENDING_EXCEPTION
   2199 END art_quick_generic_jni_trampoline
   2200 
   2201     .extern artQuickToInterpreterBridge
   2202 ENTRY art_quick_to_interpreter_bridge
   2203     SETUP_SAVE_REFS_AND_ARGS_FRAME
   2204     move    $a1, rSELF                          # pass Thread::Current
   2205     la      $t9, artQuickToInterpreterBridge
   2206     jalr    $t9                                 # (Method* method, Thread*, SP)
   2207     addiu   $a2, $sp, ARG_SLOT_SIZE             # pass $sp (remove arg slots)
   2208     lw      $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
   2209     RESTORE_SAVE_REFS_AND_ARGS_FRAME
   2210     bnez    $t7, 1f
   2211     # don't care if $v0 and/or $v1 are modified, when exception branch taken
   2212     MTD     $v0, $v1, $f0, $f1                  # move float value to return value
   2213     jalr    $zero, $ra
   2214     nop
   2215 1:
   2216     DELIVER_PENDING_EXCEPTION
   2217 END art_quick_to_interpreter_bridge
   2218 
   2219     .extern artInvokeObsoleteMethod
   2220 ENTRY art_invoke_obsolete_method_stub
   2221     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
   2222     la      $t9, artInvokeObsoleteMethod
   2223     jalr    $t9                                 # (Method* method, Thread* self)
   2224     move    $a1, rSELF                          # pass Thread::Current
   2225 END art_invoke_obsolete_method_stub
   2226 
   2227     /*
   2228      * Routine that intercepts method calls and returns.
   2229      */
   2230     .extern artInstrumentationMethodEntryFromCode
   2231     .extern artInstrumentationMethodExitFromCode
   2232 ENTRY art_quick_instrumentation_entry
   2233     SETUP_SAVE_REFS_AND_ARGS_FRAME
   2234     sw       $a0, 28($sp)   # save arg0 in free arg slot
   2235     addiu    $a3, $sp, ARG_SLOT_SIZE      # Pass $sp.
   2236     la       $t9, artInstrumentationMethodEntryFromCode
   2237     jalr     $t9            # (Method*, Object*, Thread*, SP)
   2238     move     $a2, rSELF     # pass Thread::Current
   2239     beqz     $v0, .Ldeliver_instrumentation_entry_exception
   2240     move     $t9, $v0       # $t9 holds reference to code
   2241     lw       $a0, 28($sp)   # restore arg0 from free arg slot
   2242     RESTORE_SAVE_REFS_AND_ARGS_FRAME
   2243     jalr     $t9            # call method
   2244     nop
   2245 END art_quick_instrumentation_entry
   2246     /* intentional fallthrough */
   2247     .global art_quick_instrumentation_exit
   2248 art_quick_instrumentation_exit:
   2249     .cfi_startproc
   2250     addiu    $t9, $ra, 4    # put current address into $t9 to rebuild $gp
   2251     .cpload  $t9
   2252     move     $ra, $zero     # link register is to here, so clobber with 0 for later checks
   2253 
   2254     SETUP_SAVE_REFS_ONLY_FRAME
   2255     addiu    $sp, $sp, -16  # allocate temp storage on the stack
   2256     .cfi_adjust_cfa_offset 16
   2257     sw       $v0, ARG_SLOT_SIZE+8($sp)
   2258     .cfi_rel_offset 2, ARG_SLOT_SIZE+8
   2259     sw       $v1, ARG_SLOT_SIZE+12($sp)
   2260     .cfi_rel_offset 3, ARG_SLOT_SIZE+12
   2261     s.d      $f0, ARG_SLOT_SIZE($sp)
   2262     addiu    $a3, $sp, ARG_SLOT_SIZE      # Pass fpr_res pointer.
   2263     addiu    $a2, $sp, ARG_SLOT_SIZE+8    # Pass gpr_res pointer.
   2264     addiu    $a1, $sp, ARG_SLOT_SIZE+16   # Pass $sp (remove arg slots and temp storage).
   2265     la       $t9, artInstrumentationMethodExitFromCode
   2266     jalr     $t9                          # (Thread*, SP, gpr_res*, fpr_res*)
   2267     move     $a0, rSELF                   # Pass Thread::Current.
   2268     move     $t9, $v0                     # Set aside returned link register.
   2269     move     $ra, $v1                     # Set link register for deoptimization.
   2270     lw       $v0, ARG_SLOT_SIZE+8($sp)    # Restore return values.
   2271     lw       $v1, ARG_SLOT_SIZE+12($sp)
   2272     l.d      $f0, ARG_SLOT_SIZE($sp)
   2273     addiu    $sp, $sp, 16
   2274     .cfi_adjust_cfa_offset -16
   2275     RESTORE_SAVE_REFS_ONLY_FRAME
   2276     beqz     $t9, .Ldo_deliver_instrumentation_exception
   2277     nop                     # Deliver exception if we got nullptr as function.
   2278     jalr     $zero, $t9     # Otherwise, return.
   2279     nop
   2280 .Ldeliver_instrumentation_entry_exception:
   2281     # Deliver exception for art_quick_instrumentation_entry placed after
   2282     # art_quick_instrumentation_exit so that the fallthrough works.
   2283     RESTORE_SAVE_REFS_AND_ARGS_FRAME
   2284 .Ldo_deliver_instrumentation_exception:
   2285     DELIVER_PENDING_EXCEPTION
   2286 END art_quick_instrumentation_exit
   2287 
   2288     /*
   2289      * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
   2290      * will long jump to the upcall with a special exception of -1.
   2291      */
   2292     .extern artDeoptimize
   2293 ENTRY art_quick_deoptimize
   2294     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
   2295     la       $t9, artDeoptimize
   2296     jalr     $t9            # (Thread*)
   2297     move     $a0, rSELF     # pass Thread::current
   2298 END art_quick_deoptimize
   2299 
   2300     /*
   2301      * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
   2302      * will long jump to the upcall with a special exception of -1.
   2303      */
   2304     .extern artDeoptimizeFromCompiledCode
   2305 ENTRY art_quick_deoptimize_from_compiled_code
   2306     SETUP_SAVE_EVERYTHING_FRAME
   2307     la       $t9, artDeoptimizeFromCompiledCode
   2308     jalr     $t9                            # (DeoptimizationKind, Thread*)
   2309     move     $a1, rSELF                     # pass Thread::current
   2310 END art_quick_deoptimize_from_compiled_code
   2311 
   2312     /*
   2313      * Long integer shift.  This is different from the generic 32/64-bit
   2314      * binary operations because vAA/vBB are 64-bit but vCC (the shift
   2315      * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
   2316      * 6 bits.
   2317      * On entry:
   2318      *   $a0: low word
   2319      *   $a1: high word
   2320      *   $a2: shift count
   2321      */
   2322 ENTRY_NO_GP art_quick_shl_long
   2323     /* shl-long vAA, vBB, vCC */
   2324     sll     $v0, $a0, $a2                    #  rlo<- alo << (shift&31)
   2325     not     $v1, $a2                         #  rhi<- 31-shift  (shift is 5b)
   2326     srl     $a0, 1
   2327     srl     $a0, $v1                         #  alo<- alo >> (32-(shift&31))
   2328     sll     $v1, $a1, $a2                    #  rhi<- ahi << (shift&31)
   2329     andi    $a2, 0x20                        #  shift< shift & 0x20
   2330     beqz    $a2, 1f
   2331     or      $v1, $a0                         #  rhi<- rhi | alo
   2332 
   2333     move    $v1, $v0                         #  rhi<- rlo (if shift&0x20)
   2334     move    $v0, $zero                       #  rlo<- 0 (if shift&0x20)
   2335 
   2336 1:  jalr    $zero, $ra
   2337     nop
   2338 END art_quick_shl_long
   2339 
   2340     /*
   2341      * Long integer shift.  This is different from the generic 32/64-bit
   2342      * binary operations because vAA/vBB are 64-bit but vCC (the shift
   2343      * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
   2344      * 6 bits.
   2345      * On entry:
   2346      *   $a0: low word
   2347      *   $a1: high word
   2348      *   $a2: shift count
   2349      */
   2350 ENTRY_NO_GP art_quick_shr_long
   2351     sra     $v1, $a1, $a2                    #  rhi<- ahi >> (shift&31)
   2352     srl     $v0, $a0, $a2                    #  rlo<- alo >> (shift&31)
   2353     sra     $a3, $a1, 31                     #  $a3<- sign(ah)
   2354     not     $a0, $a2                         #  alo<- 31-shift (shift is 5b)
   2355     sll     $a1, 1
   2356     sll     $a1, $a0                         #  ahi<- ahi << (32-(shift&31))
   2357     andi    $a2, 0x20                        #  shift & 0x20
   2358     beqz    $a2, 1f
   2359     or      $v0, $a1                         #  rlo<- rlo | ahi
   2360 
   2361     move    $v0, $v1                         #  rlo<- rhi (if shift&0x20)
   2362     move    $v1, $a3                         #  rhi<- sign(ahi) (if shift&0x20)
   2363 
   2364 1:  jalr    $zero, $ra
   2365     nop
   2366 END art_quick_shr_long
   2367 
   2368     /*
   2369      * Long integer shift.  This is different from the generic 32/64-bit
   2370      * binary operations because vAA/vBB are 64-bit but vCC (the shift
   2371      * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
   2372      * 6 bits.
   2373      * On entry:
   2374      *   $a0: low word
   2375      *   $a1: high word
   2376      *   $a2: shift count
   2377      */
   2378     /* ushr-long vAA, vBB, vCC */
   2379 ENTRY_NO_GP art_quick_ushr_long
   2380     srl     $v1, $a1, $a2                    #  rhi<- ahi >> (shift&31)
   2381     srl     $v0, $a0, $a2                    #  rlo<- alo >> (shift&31)
   2382     not     $a0, $a2                         #  alo<- 31-shift (shift is 5b)
   2383     sll     $a1, 1
   2384     sll     $a1, $a0                         #  ahi<- ahi << (32-(shift&31))
   2385     andi    $a2, 0x20                        #  shift & 0x20
   2386     beqz    $a2, 1f
   2387     or      $v0, $a1                         #  rlo<- rlo | ahi
   2388 
   2389     move    $v0, $v1                         #  rlo<- rhi (if shift&0x20)
   2390     move    $v1, $zero                       #  rhi<- 0 (if shift&0x20)
   2391 
   2392 1:  jalr    $zero, $ra
   2393     nop
   2394 END art_quick_ushr_long
   2395 
   2396 /* java.lang.String.indexOf(int ch, int fromIndex=0) */
   2397 ENTRY_NO_GP art_quick_indexof
   2398 /* $a0 holds address of "this" */
   2399 /* $a1 holds "ch" */
   2400 /* $a2 holds "fromIndex" */
   2401 #if (STRING_COMPRESSION_FEATURE)
   2402     lw    $a3, MIRROR_STRING_COUNT_OFFSET($a0)    # 'count' field of this
   2403 #else
   2404     lw    $t0, MIRROR_STRING_COUNT_OFFSET($a0)    # this.length()
   2405 #endif
   2406     slt   $t1, $a2, $zero # if fromIndex < 0
   2407 #if defined(_MIPS_ARCH_MIPS32R6)
   2408     seleqz $a2, $a2, $t1  #     fromIndex = 0;
   2409 #else
   2410     movn   $a2, $zero, $t1 #    fromIndex = 0;
   2411 #endif
   2412 
   2413 #if (STRING_COMPRESSION_FEATURE)
   2414     srl   $t0, $a3, 1     # $a3 holds count (with flag) and $t0 holds actual length
   2415 #endif
   2416     subu  $t0, $t0, $a2   # this.length() - fromIndex
   2417     blez  $t0, 6f         # if this.length()-fromIndex <= 0
   2418     li    $v0, -1         #     return -1;
   2419 
   2420 #if (STRING_COMPRESSION_FEATURE)
   2421     sll   $a3, $a3, 31    # Extract compression flag.
   2422     beqz  $a3, .Lstring_indexof_compressed
   2423     move  $t2, $a0        # Save a copy in $t2 to later compute result (in branch delay slot).
   2424 #endif
   2425     sll   $v0, $a2, 1     # $a0 += $a2 * 2
   2426     addu  $a0, $a0, $v0   #  "  ditto  "
   2427     move  $v0, $a2        # Set i to fromIndex.
   2428 
   2429 1:
   2430     lhu   $t3, MIRROR_STRING_VALUE_OFFSET($a0)    # if this.charAt(i) == ch
   2431     beq   $t3, $a1, 6f                            #     return i;
   2432     addu  $a0, $a0, 2     # i++
   2433     subu  $t0, $t0, 1     # this.length() - i
   2434     bnez  $t0, 1b         # while this.length() - i > 0
   2435     addu  $v0, $v0, 1     # i++
   2436 
   2437     li    $v0, -1         # if this.length() - i <= 0
   2438                           #     return -1;
   2439 
   2440 6:
   2441     j     $ra
   2442     nop
   2443 
   2444 #if (STRING_COMPRESSION_FEATURE)
   2445 .Lstring_indexof_compressed:
   2446     addu  $a0, $a0, $a2   # $a0 += $a2
   2447 
   2448 .Lstring_indexof_compressed_loop:
   2449     lbu   $t3, MIRROR_STRING_VALUE_OFFSET($a0)
   2450     beq   $t3, $a1, .Lstring_indexof_compressed_matched
   2451     subu  $t0, $t0, 1
   2452     bgtz  $t0, .Lstring_indexof_compressed_loop
   2453     addu  $a0, $a0, 1
   2454 
   2455 .Lstring_indexof_nomatch:
   2456     jalr  $zero, $ra
   2457     li    $v0, -1         # return -1;
   2458 
   2459 .Lstring_indexof_compressed_matched:
   2460     jalr  $zero, $ra
   2461     subu  $v0, $a0, $t2   # return (current - start);
   2462 #endif
   2463 END art_quick_indexof
   2464 
   2465 /* java.lang.String.compareTo(String anotherString) */
   2466 ENTRY_NO_GP art_quick_string_compareto
   2467 /* $a0 holds address of "this" */
   2468 /* $a1 holds address of "anotherString" */
   2469     beq    $a0, $a1, .Lstring_compareto_length_diff   # this and anotherString are the same object
   2470     move   $a3, $a2                                   # trick to return 0 (it returns a2 - a3)
   2471 
   2472 #if (STRING_COMPRESSION_FEATURE)
   2473     lw     $t0, MIRROR_STRING_COUNT_OFFSET($a0)   # 'count' field of this
   2474     lw     $t1, MIRROR_STRING_COUNT_OFFSET($a1)   # 'count' field of anotherString
   2475     sra    $a2, $t0, 1                            # this.length()
   2476     sra    $a3, $t1, 1                            # anotherString.length()
   2477 #else
   2478     lw     $a2, MIRROR_STRING_COUNT_OFFSET($a0)   # this.length()
   2479     lw     $a3, MIRROR_STRING_COUNT_OFFSET($a1)   # anotherString.length()
   2480 #endif
   2481 
   2482     MINu   $t2, $a2, $a3
   2483     # $t2 now holds min(this.length(),anotherString.length())
   2484 
   2485     # while min(this.length(),anotherString.length())-i != 0
   2486     beqz   $t2, .Lstring_compareto_length_diff # if $t2==0
   2487     nop                                        #     return (this.length() - anotherString.length())
   2488 
   2489 #if (STRING_COMPRESSION_FEATURE)
   2490     # Differ cases:
   2491     sll    $t3, $t0, 31
   2492     beqz   $t3, .Lstring_compareto_this_is_compressed
   2493     sll    $t3, $t1, 31                           # In branch delay slot.
   2494     beqz   $t3, .Lstring_compareto_that_is_compressed
   2495     nop
   2496     b      .Lstring_compareto_both_not_compressed
   2497     nop
   2498 
   2499 .Lstring_compareto_this_is_compressed:
   2500     beqz   $t3, .Lstring_compareto_both_compressed
   2501     nop
   2502     /* If (this->IsCompressed() && that->IsCompressed() == false) */
   2503 .Lstring_compareto_loop_comparison_this_compressed:
   2504     lbu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)
   2505     lhu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
   2506     bne    $t0, $t1, .Lstring_compareto_char_diff
   2507     addiu  $a0, $a0, 1    # point at this.charAt(i++) - compressed
   2508     subu   $t2, $t2, 1    # new value of min(this.length(),anotherString.length())-i
   2509     bnez   $t2, .Lstring_compareto_loop_comparison_this_compressed
   2510     addiu  $a1, $a1, 2    # point at anotherString.charAt(i++) - uncompressed
   2511     jalr   $zero, $ra
   2512     subu   $v0, $a2, $a3  # return (this.length() - anotherString.length())
   2513 
   2514 .Lstring_compareto_that_is_compressed:
   2515     lhu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)
   2516     lbu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
   2517     bne    $t0, $t1, .Lstring_compareto_char_diff
   2518     addiu  $a0, $a0, 2    # point at this.charAt(i++) - uncompressed
   2519     subu   $t2, $t2, 1    # new value of min(this.length(),anotherString.length())-i
   2520     bnez   $t2, .Lstring_compareto_that_is_compressed
   2521     addiu  $a1, $a1, 1    # point at anotherString.charAt(i++) - compressed
   2522     jalr   $zero, $ra
   2523     subu   $v0, $a2, $a3  # return (this.length() - anotherString.length())
   2524 
   2525 .Lstring_compareto_both_compressed:
   2526     lbu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)
   2527     lbu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
   2528     bne    $t0, $t1, .Lstring_compareto_char_diff
   2529     addiu  $a0, $a0, 1    # point at this.charAt(i++) - compressed
   2530     subu   $t2, $t2, 1    # new value of min(this.length(),anotherString.length())-i
   2531     bnez   $t2, .Lstring_compareto_both_compressed
   2532     addiu  $a1, $a1, 1    # point at anotherString.charAt(i++) - compressed
   2533     jalr   $zero, $ra
   2534     subu   $v0, $a2, $a3  # return (this.length() - anotherString.length())
   2535 #endif
   2536 
   2537 .Lstring_compareto_both_not_compressed:
   2538     lhu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)   # while this.charAt(i) == anotherString.charAt(i)
   2539     lhu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
   2540     bne    $t0, $t1, .Lstring_compareto_char_diff # if this.charAt(i) != anotherString.charAt(i)
   2541                           #     return (this.charAt(i) - anotherString.charAt(i))
   2542     addiu  $a0, $a0, 2    # point at this.charAt(i++)
   2543     subu   $t2, $t2, 1    # new value of min(this.length(),anotherString.length())-i
   2544     bnez   $t2, .Lstring_compareto_both_not_compressed
   2545     addiu  $a1, $a1, 2    # point at anotherString.charAt(i++)
   2546 
   2547 .Lstring_compareto_length_diff:
   2548     jalr   $zero, $ra
   2549     subu   $v0, $a2, $a3  # return (this.length() - anotherString.length())
   2550 
   2551 .Lstring_compareto_char_diff:
   2552     jalr   $zero, $ra
   2553     subu   $v0, $t0, $t1  # return (this.charAt(i) - anotherString.charAt(i))
   2554 END art_quick_string_compareto
   2555 
   2556     /*
   2557      * Create a function `name` calling the ReadBarrier::Mark routine,
   2558      * getting its argument and returning its result through register
   2559      * `reg`, saving and restoring all caller-save registers.
   2560      */
   2561 .macro READ_BARRIER_MARK_REG name, reg
   2562 ENTRY \name
   2563     // Null check so that we can load the lock word.
   2564     bnez    \reg, .Lnot_null_\name
   2565     nop
   2566 .Lret_rb_\name:
   2567     jalr    $zero, $ra
   2568     nop
   2569 .Lnot_null_\name:
   2570     // Check lock word for mark bit, if marked return.
   2571     lw      $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET(\reg)
   2572     .set push
   2573     .set noat
   2574     sll     $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT     # Move mark bit to sign bit.
   2575     bltz    $at, .Lret_rb_\name
   2576 #if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
   2577     // The below code depends on the lock word state being in the highest bits
   2578     // and the "forwarding address" state having all bits set.
   2579 #error "Unexpected lock word state shift or forwarding address state value."
   2580 #endif
   2581     // Test that both the forwarding state bits are 1.
   2582     sll     $at, $t9, 1
   2583     and     $at, $at, $t9                               # Sign bit = 1 IFF both bits are 1.
   2584     bltz    $at, .Lret_forwarding_address\name
   2585     nop
   2586     .set pop
   2587 
   2588     addiu   $sp, $sp, -160      # Includes 16 bytes of space for argument registers a0-a3.
   2589     .cfi_adjust_cfa_offset 160
   2590 
   2591     sw      $ra, 156($sp)
   2592     .cfi_rel_offset 31, 156
   2593     sw      $t8, 152($sp)
   2594     .cfi_rel_offset 24, 152
   2595     sw      $t7, 148($sp)
   2596     .cfi_rel_offset 15, 148
   2597     sw      $t6, 144($sp)
   2598     .cfi_rel_offset 14, 144
   2599     sw      $t5, 140($sp)
   2600     .cfi_rel_offset 13, 140
   2601     sw      $t4, 136($sp)
   2602     .cfi_rel_offset 12, 136
   2603     sw      $t3, 132($sp)
   2604     .cfi_rel_offset 11, 132
   2605     sw      $t2, 128($sp)
   2606     .cfi_rel_offset 10, 128
   2607     sw      $t1, 124($sp)
   2608     .cfi_rel_offset 9, 124
   2609     sw      $t0, 120($sp)
   2610     .cfi_rel_offset 8, 120
   2611     sw      $a3, 116($sp)
   2612     .cfi_rel_offset 7, 116
   2613     sw      $a2, 112($sp)
   2614     .cfi_rel_offset 6, 112
   2615     sw      $a1, 108($sp)
   2616     .cfi_rel_offset 5, 108
   2617     sw      $a0, 104($sp)
   2618     .cfi_rel_offset 4, 104
   2619     sw      $v1, 100($sp)
   2620     .cfi_rel_offset 3, 100
   2621     sw      $v0, 96($sp)
   2622     .cfi_rel_offset 2, 96
   2623 
   2624     la      $t9, artReadBarrierMark
   2625 
   2626     sdc1    $f18, 88($sp)
   2627     sdc1    $f16, 80($sp)
   2628     sdc1    $f14, 72($sp)
   2629     sdc1    $f12, 64($sp)
   2630     sdc1    $f10, 56($sp)
   2631     sdc1    $f8,  48($sp)
   2632     sdc1    $f6,  40($sp)
   2633     sdc1    $f4,  32($sp)
   2634     sdc1    $f2,  24($sp)
   2635 
   2636     .ifnc \reg, $a0
   2637       move  $a0, \reg           # pass obj from `reg` in a0
   2638     .endif
   2639     jalr    $t9                 # v0 <- artReadBarrierMark(obj)
   2640     sdc1    $f0,  16($sp)       # in delay slot
   2641 
   2642     lw      $ra, 156($sp)
   2643     .cfi_restore 31
   2644     lw      $t8, 152($sp)
   2645     .cfi_restore 24
   2646     lw      $t7, 148($sp)
   2647     .cfi_restore 15
   2648     lw      $t6, 144($sp)
   2649     .cfi_restore 14
   2650     lw      $t5, 140($sp)
   2651     .cfi_restore 13
   2652     lw      $t4, 136($sp)
   2653     .cfi_restore 12
   2654     lw      $t3, 132($sp)
   2655     .cfi_restore 11
   2656     lw      $t2, 128($sp)
   2657     .cfi_restore 10
   2658     lw      $t1, 124($sp)
   2659     .cfi_restore 9
   2660     lw      $t0, 120($sp)
   2661     .cfi_restore 8
   2662     lw      $a3, 116($sp)
   2663     .cfi_restore 7
   2664     lw      $a2, 112($sp)
   2665     .cfi_restore 6
   2666     lw      $a1, 108($sp)
   2667     .cfi_restore 5
   2668     lw      $a0, 104($sp)
   2669     .cfi_restore 4
   2670     lw      $v1, 100($sp)
   2671     .cfi_restore 3
   2672 
   2673     .ifnc \reg, $v0
   2674       move  \reg, $v0           # `reg` <- v0
   2675       lw    $v0, 96($sp)
   2676       .cfi_restore 2
   2677     .endif
   2678 
   2679     ldc1    $f18, 88($sp)
   2680     ldc1    $f16, 80($sp)
   2681     ldc1    $f14, 72($sp)
   2682     ldc1    $f12, 64($sp)
   2683     ldc1    $f10, 56($sp)
   2684     ldc1    $f8,  48($sp)
   2685     ldc1    $f6,  40($sp)
   2686     ldc1    $f4,  32($sp)
   2687     ldc1    $f2,  24($sp)
   2688     ldc1    $f0,  16($sp)
   2689 
   2690     jalr    $zero, $ra
   2691     addiu   $sp, $sp, 160
   2692     .cfi_adjust_cfa_offset -160
   2693 
   2694 .Lret_forwarding_address\name:
   2695     jalr    $zero, $ra
   2696     // Shift left by the forwarding address shift. This clears out the state bits since they are
   2697     // in the top 2 bits of the lock word.
   2698     sll     \reg, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
   2699 END \name
   2700 .endm
   2701 
   2702 // Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1.
   2703 // ZERO (register 0) is reserved.
   2704 // AT (register 1) is reserved as a temporary/scratch register.
   2705 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0
   2706 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1
   2707 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0
   2708 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1
   2709 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2
   2710 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3
   2711 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $t0
   2712 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $t1
   2713 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $t2
   2714 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $t3
   2715 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t4
   2716 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t5
   2717 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t6
   2718 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, $t7
   2719 // S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers.
   2720 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2
   2721 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3
   2722 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4
   2723 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5
   2724 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6
   2725 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7
   2726 // T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers.
   2727 // K0, K1, GP, SP (registers 26 - 29) are reserved.
   2728 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
   2729 // RA (register 31) is reserved.
   2730 
   2731 .extern artInvokePolymorphic
   2732 ENTRY art_quick_invoke_polymorphic
   2733     SETUP_SAVE_REFS_AND_ARGS_FRAME
   2734     move  $a2, rSELF                          # Make $a2 an alias for the current Thread.
   2735     addiu $a3, $sp, ARG_SLOT_SIZE             # Make $a3 a pointer to the saved frame context.
   2736     sw    $zero, 20($sp)                      # Initialize JValue result.
   2737     sw    $zero, 16($sp)
   2738     la    $t9, artInvokePolymorphic
   2739     jalr  $t9                                 # (result, receiver, Thread*, context)
   2740     addiu $a0, $sp, 16                        # Make $a0 a pointer to the JValue result
   2741 .macro MATCH_RETURN_TYPE c, handler
   2742     li    $t0, \c
   2743     beq   $v0, $t0, \handler
   2744 .endm
   2745     MATCH_RETURN_TYPE 'V', .Lcleanup_and_return
   2746     MATCH_RETURN_TYPE 'L', .Lstore_int_result
   2747     MATCH_RETURN_TYPE 'I', .Lstore_int_result
   2748     MATCH_RETURN_TYPE 'J', .Lstore_long_result
   2749     MATCH_RETURN_TYPE 'B', .Lstore_int_result
   2750     MATCH_RETURN_TYPE 'C', .Lstore_char_result
   2751     MATCH_RETURN_TYPE 'D', .Lstore_double_result
   2752     MATCH_RETURN_TYPE 'F', .Lstore_float_result
   2753     MATCH_RETURN_TYPE 'S', .Lstore_int_result
   2754     MATCH_RETURN_TYPE 'Z', .Lstore_boolean_result
   2755 .purgem MATCH_RETURN_TYPE
   2756     nop
   2757     b .Lcleanup_and_return
   2758     nop
   2759 .Lstore_boolean_result:
   2760     b .Lcleanup_and_return
   2761     lbu   $v0, 16($sp)                        # Move byte from JValue result to return value register.
   2762 .Lstore_char_result:
   2763     b .Lcleanup_and_return
   2764     lhu   $v0, 16($sp)                        # Move char from JValue result to return value register.
   2765 .Lstore_double_result:
   2766 .Lstore_float_result:
   2767     LDu   $f0, $f1, 16, $sp, $t0              # Move double/float from JValue result to return value register.
   2768     b .Lcleanup_and_return
   2769     nop
   2770 .Lstore_long_result:
   2771     lw    $v1, 20($sp)                        # Move upper bits from JValue result to return value register.
   2772     // Fall-through for lower bits.
   2773 .Lstore_int_result:
   2774     lw    $v0, 16($sp)                        # Move lower bits from JValue result to return value register.
   2775     // Fall-through to clean up and return.
   2776 .Lcleanup_and_return:
   2777     lw    $t7, THREAD_EXCEPTION_OFFSET(rSELF) # Load Thread::Current()->exception_
   2778     RESTORE_SAVE_REFS_AND_ARGS_FRAME
   2779     bnez  $t7, 1f                             # Success if no exception is pending.
   2780     nop
   2781     jalr  $zero, $ra
   2782     nop
   2783 1:
   2784     DELIVER_PENDING_EXCEPTION
   2785 END art_quick_invoke_polymorphic
   2786