Home | History | Annotate | Download | only in mips
      1 /*
      2  * Copyright (C) 2009 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 /*
     18  * This file contains codegen and support common to all supported
     19  * Mips variants.  It is included by:
     20  *
     21  *        Codegen-$(TARGET_ARCH_VARIANT).c
     22  *
     23  * which combines this common code with specific support found in the
     24  * applicable directory below this one.
     25  */
     26 
     27 /*
     28  * Mark garbage collection card. Skip if the value we're storing is null.
     29  */
     30 static void markCard(CompilationUnit *cUnit, int valReg, int tgtAddrReg)
     31 {
     32     int regCardBase = dvmCompilerAllocTemp(cUnit);
     33     int regCardNo = dvmCompilerAllocTemp(cUnit);
     34     MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBeq, valReg, r_ZERO);
     35     loadWordDisp(cUnit, rSELF, offsetof(Thread, cardTable),
     36                  regCardBase);
     37     opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
     38     storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
     39                      kUnsignedByte);
     40     MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
     41     target->defMask = ENCODE_ALL;
     42     branchOver->generic.target = (LIR *)target;
     43     dvmCompilerFreeTemp(cUnit, regCardBase);
     44     dvmCompilerFreeTemp(cUnit, regCardNo);
     45 }
     46 
     47 static bool genConversionCall(CompilationUnit *cUnit, MIR *mir, void *funct,
     48                                      int srcSize, int tgtSize)
     49 {
     50     /*
     51      * Don't optimize the register usage since it calls out to template
     52      * functions
     53      */
     54     RegLocation rlSrc;
     55     RegLocation rlDest;
     56     int srcReg = 0;
     57     int srcRegHi = 0;
     58     dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
     59 
     60     if (srcSize == kWord) {
     61         srcReg = r_A0;
     62     } else if (srcSize == kSingle) {
     63 #ifdef __mips_hard_float
     64         srcReg = r_F12;
     65 #else
     66         srcReg = r_A0;
     67 #endif
     68     } else if (srcSize == kLong) {
     69         srcReg = r_ARG0;
     70         srcRegHi = r_ARG1;
     71     } else if (srcSize == kDouble) {
     72 #ifdef __mips_hard_float
     73         srcReg = r_FARG0;
     74         srcRegHi = r_FARG1;
     75 #else
     76         srcReg = r_ARG0;
     77         srcRegHi = r_ARG1;
     78 #endif
     79     }
     80     else {
     81         assert(0);
     82     }
     83 
     84     if (srcSize == kWord || srcSize == kSingle) {
     85         rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
     86         loadValueDirectFixed(cUnit, rlSrc, srcReg);
     87     } else {
     88         rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
     89         loadValueDirectWideFixed(cUnit, rlSrc, srcReg, srcRegHi);
     90     }
     91     LOAD_FUNC_ADDR(cUnit, r_T9, (int)funct);
     92     opReg(cUnit, kOpBlx, r_T9);
     93     newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
     94     dvmCompilerClobberCallRegs(cUnit);
     95     if (tgtSize == kWord || tgtSize == kSingle) {
     96         RegLocation rlResult;
     97         rlDest = dvmCompilerGetDest(cUnit, mir, 0);
     98 #ifdef __mips_hard_float
     99         if (tgtSize == kSingle)
    100             rlResult = dvmCompilerGetReturnAlt(cUnit);
    101         else
    102             rlResult = dvmCompilerGetReturn(cUnit);
    103 #else
    104         rlResult = dvmCompilerGetReturn(cUnit);
    105 #endif
    106         storeValue(cUnit, rlDest, rlResult);
    107     } else {
    108         RegLocation rlResult;
    109         rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
    110 #ifdef __mips_hard_float
    111         if (tgtSize == kDouble)
    112             rlResult = dvmCompilerGetReturnWideAlt(cUnit);
    113         else
    114             rlResult = dvmCompilerGetReturnWide(cUnit);
    115 #else
    116         rlResult = dvmCompilerGetReturnWide(cUnit);
    117 #endif
    118         storeValueWide(cUnit, rlDest, rlResult);
    119     }
    120     return false;
    121 }
    122 
    123 
    124 static bool genArithOpFloatPortable(CompilationUnit *cUnit, MIR *mir,
    125                                     RegLocation rlDest, RegLocation rlSrc1,
    126                                     RegLocation rlSrc2)
    127 {
    128     RegLocation rlResult;
    129     void* funct;
    130 
    131     switch (mir->dalvikInsn.opcode) {
    132         case OP_ADD_FLOAT_2ADDR:
    133         case OP_ADD_FLOAT:
    134             funct = (void*) __addsf3;
    135             break;
    136         case OP_SUB_FLOAT_2ADDR:
    137         case OP_SUB_FLOAT:
    138             funct = (void*) __subsf3;
    139             break;
    140         case OP_DIV_FLOAT_2ADDR:
    141         case OP_DIV_FLOAT:
    142             funct = (void*) __divsf3;
    143             break;
    144         case OP_MUL_FLOAT_2ADDR:
    145         case OP_MUL_FLOAT:
    146             funct = (void*) __mulsf3;
    147             break;
    148         case OP_REM_FLOAT_2ADDR:
    149         case OP_REM_FLOAT:
    150             funct = (void*) fmodf;
    151             break;
    152         case OP_NEG_FLOAT: {
    153             genNegFloat(cUnit, rlDest, rlSrc1);
    154             return false;
    155         }
    156         default:
    157             return true;
    158     }
    159 
    160     dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
    161 #ifdef __mips_hard_float
    162     loadValueDirectFixed(cUnit, rlSrc1, r_F12);
    163     loadValueDirectFixed(cUnit, rlSrc2, r_F14);
    164 #else
    165     loadValueDirectFixed(cUnit, rlSrc1, r_A0);
    166     loadValueDirectFixed(cUnit, rlSrc2, r_A1);
    167 #endif
    168     LOAD_FUNC_ADDR(cUnit, r_T9, (int)funct);
    169     opReg(cUnit, kOpBlx, r_T9);
    170     newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
    171     dvmCompilerClobberCallRegs(cUnit);
    172 #ifdef __mips_hard_float
    173     rlResult = dvmCompilerGetReturnAlt(cUnit);
    174 #else
    175     rlResult = dvmCompilerGetReturn(cUnit);
    176 #endif
    177     storeValue(cUnit, rlDest, rlResult);
    178     return false;
    179 }
    180 
    181 static bool genArithOpDoublePortable(CompilationUnit *cUnit, MIR *mir,
    182                                      RegLocation rlDest, RegLocation rlSrc1,
    183                                      RegLocation rlSrc2)
    184 {
    185     RegLocation rlResult;
    186     void* funct;
    187 
    188     switch (mir->dalvikInsn.opcode) {
    189         case OP_ADD_DOUBLE_2ADDR:
    190         case OP_ADD_DOUBLE:
    191             funct = (void*) __adddf3;
    192             break;
    193         case OP_SUB_DOUBLE_2ADDR:
    194         case OP_SUB_DOUBLE:
    195             funct = (void*) __subdf3;
    196             break;
    197         case OP_DIV_DOUBLE_2ADDR:
    198         case OP_DIV_DOUBLE:
    199             funct = (void*) __divsf3;
    200             break;
    201         case OP_MUL_DOUBLE_2ADDR:
    202         case OP_MUL_DOUBLE:
    203             funct = (void*) __muldf3;
    204             break;
    205         case OP_REM_DOUBLE_2ADDR:
    206         case OP_REM_DOUBLE:
    207             funct = (void*) (double (*)(double, double)) fmod;
    208             break;
    209         case OP_NEG_DOUBLE: {
    210             genNegDouble(cUnit, rlDest, rlSrc1);
    211             return false;
    212         }
    213         default:
    214             return true;
    215     }
    216     dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
    217     LOAD_FUNC_ADDR(cUnit, r_T9, (int)funct);
    218 #ifdef __mips_hard_float
    219     loadValueDirectWideFixed(cUnit, rlSrc1, r_F12, r_F13);
    220     loadValueDirectWideFixed(cUnit, rlSrc2, r_F14, r_F15);
    221 #else
    222     loadValueDirectWideFixed(cUnit, rlSrc1, r_ARG0, r_ARG1);
    223     loadValueDirectWideFixed(cUnit, rlSrc2, r_ARG2, r_ARG3);
    224 #endif
    225     opReg(cUnit, kOpBlx, r_T9);
    226     newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
    227     dvmCompilerClobberCallRegs(cUnit);
    228 #ifdef __mips_hard_float
    229     rlResult = dvmCompilerGetReturnWideAlt(cUnit);
    230 #else
    231     rlResult = dvmCompilerGetReturnWide(cUnit);
    232 #endif
    233     storeValueWide(cUnit, rlDest, rlResult);
    234 #if defined(WITH_SELF_VERIFICATION)
    235     cUnit->usesLinkRegister = true;
    236 #endif
    237     return false;
    238 }
    239 
    240 static bool genConversionPortable(CompilationUnit *cUnit, MIR *mir)
    241 {
    242     Opcode opcode = mir->dalvikInsn.opcode;
    243 
    244     switch (opcode) {
    245         case OP_INT_TO_FLOAT:
    246             return genConversionCall(cUnit, mir, (void*)__floatsisf, kWord, kSingle);
    247         case OP_FLOAT_TO_INT:
    248             return genConversionCall(cUnit, mir, (void*)__fixsfsi, kSingle, kWord);
    249         case OP_DOUBLE_TO_FLOAT:
    250             return genConversionCall(cUnit, mir, (void*)__truncdfsf2, kDouble, kSingle);
    251         case OP_FLOAT_TO_DOUBLE:
    252             return genConversionCall(cUnit, mir, (void*)__extendsfdf2, kSingle, kDouble);
    253         case OP_INT_TO_DOUBLE:
    254             return genConversionCall(cUnit, mir, (void*)__floatsidf, kWord, kDouble);
    255         case OP_DOUBLE_TO_INT:
    256             return genConversionCall(cUnit, mir, (void*)__fixdfsi, kDouble, kWord);
    257         case OP_FLOAT_TO_LONG:
    258             return genConversionCall(cUnit, mir, (void*)__fixsfdi, kSingle, kLong);
    259         case OP_LONG_TO_FLOAT:
    260             return genConversionCall(cUnit, mir, (void*)__floatdisf, kLong, kSingle);
    261         case OP_DOUBLE_TO_LONG:
    262             return genConversionCall(cUnit, mir, (void*)__fixdfdi, kDouble, kLong);
    263         case OP_LONG_TO_DOUBLE:
    264             return genConversionCall(cUnit, mir, (void*)__floatdidf, kLong, kDouble);
    265         default:
    266             return true;
    267     }
    268     return false;
    269 }
    270 
    271 #if defined(WITH_SELF_VERIFICATION)
    272 static void selfVerificationBranchInsert(LIR *currentLIR, Mipsopcode opcode,
    273                           int dest, int src1)
    274 {
    275 assert(0); /* MIPSTODO port selfVerificationBranchInsert() */
    276      MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
    277      insn->opcode = opcode;
    278      insn->operands[0] = dest;
    279      insn->operands[1] = src1;
    280      setupResourceMasks(insn);
    281      dvmCompilerInsertLIRBefore(currentLIR, (LIR *) insn);
    282 }
    283 
    284 /*
    285  * Example where r14 (LR) is preserved around a heap access under
    286  * self-verification mode in Thumb2:
    287  *
    288  * D/dalvikvm( 1538): 0x59414c5e (0026): ldr     r14, [r15pc, #220] <-hoisted
    289  * D/dalvikvm( 1538): 0x59414c62 (002a): mla     r4, r0, r8, r4
    290  * D/dalvikvm( 1538): 0x59414c66 (002e): adds    r3, r4, r3
    291  * D/dalvikvm( 1538): 0x59414c6a (0032): push    <r5, r14>    ---+
    292  * D/dalvikvm( 1538): 0x59414c6c (0034): blx_1   0x5940f494      |
    293  * D/dalvikvm( 1538): 0x59414c6e (0036): blx_2   see above       <-MEM_OP_DECODE
    294  * D/dalvikvm( 1538): 0x59414c70 (0038): ldr     r10, [r9, #0]   |
    295  * D/dalvikvm( 1538): 0x59414c74 (003c): pop     <r5, r14>    ---+
    296  * D/dalvikvm( 1538): 0x59414c78 (0040): mov     r11, r10
    297  * D/dalvikvm( 1538): 0x59414c7a (0042): asr     r12, r11, #31
    298  * D/dalvikvm( 1538): 0x59414c7e (0046): movs    r0, r2
    299  * D/dalvikvm( 1538): 0x59414c80 (0048): movs    r1, r3
    300  * D/dalvikvm( 1538): 0x59414c82 (004a): str     r2, [r5, #16]
    301  * D/dalvikvm( 1538): 0x59414c84 (004c): mov     r2, r11
    302  * D/dalvikvm( 1538): 0x59414c86 (004e): str     r3, [r5, #20]
    303  * D/dalvikvm( 1538): 0x59414c88 (0050): mov     r3, r12
    304  * D/dalvikvm( 1538): 0x59414c8a (0052): str     r11, [r5, #24]
    305  * D/dalvikvm( 1538): 0x59414c8e (0056): str     r12, [r5, #28]
    306  * D/dalvikvm( 1538): 0x59414c92 (005a): blx     r14             <-use of LR
    307  *
    308  */
    309 static void selfVerificationBranchInsertPass(CompilationUnit *cUnit)
    310 {
    311 assert(0); /* MIPSTODO port selfVerificationBranchInsertPass() */
    312     MipsLIR *thisLIR;
    313     Templateopcode opcode = TEMPLATE_MEM_OP_DECODE;
    314 
    315     for (thisLIR = (MipsLIR *) cUnit->firstLIRInsn;
    316          thisLIR != (MipsLIR *) cUnit->lastLIRInsn;
    317          thisLIR = NEXT_LIR(thisLIR)) {
    318         if (!thisLIR->flags.isNop && thisLIR->flags.insertWrapper) {
    319             /*
    320              * Push r5(FP) and r14(LR) onto stack. We need to make sure that
    321              * SP is 8-byte aligned, and we use r5 as a temp to restore LR
    322              * for Thumb-only target since LR cannot be directly accessed in
    323              * Thumb mode. Another reason to choose r5 here is it is the Dalvik
    324              * frame pointer and cannot be the target of the emulated heap
    325              * load.
    326              */
    327             if (cUnit->usesLinkRegister) {
    328                 genSelfVerificationPreBranch(cUnit, thisLIR);
    329             }
    330 
    331             /* Branch to mem op decode template */
    332             selfVerificationBranchInsert((LIR *) thisLIR, kThumbBlx1,
    333                        (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
    334                        (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
    335             selfVerificationBranchInsert((LIR *) thisLIR, kThumbBlx2,
    336                        (int) gDvmJit.codeCache + templateEntryOffsets[opcode],
    337                        (int) gDvmJit.codeCache + templateEntryOffsets[opcode]);
    338 
    339             /* Restore LR */
    340             if (cUnit->usesLinkRegister) {
    341                 genSelfVerificationPostBranch(cUnit, thisLIR);
    342             }
    343         }
    344     }
    345 }
    346 #endif
    347 
    348 /* Generate conditional branch instructions */
    349 static MipsLIR *genConditionalBranchMips(CompilationUnit *cUnit,
    350                                     MipsOpCode opc, int rs, int rt,
    351                                     MipsLIR *target)
    352 {
    353     MipsLIR *branch = opCompareBranch(cUnit, opc, rs, rt);
    354     branch->generic.target = (LIR *) target;
    355     return branch;
    356 }
    357 
    358 /* Generate a unconditional branch to go to the interpreter */
    359 static inline MipsLIR *genTrap(CompilationUnit *cUnit, int dOffset,
    360                                   MipsLIR *pcrLabel)
    361 {
    362     MipsLIR *branch = opNone(cUnit, kOpUncondBr);
    363     return genCheckCommon(cUnit, dOffset, branch, pcrLabel);
    364 }
    365 
    366 /* Load a wide field from an object instance */
    367 static void genIGetWide(CompilationUnit *cUnit, MIR *mir, int fieldOffset)
    368 {
    369     RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 0);
    370     RegLocation rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
    371     RegLocation rlResult;
    372     rlObj = loadValue(cUnit, rlObj, kCoreReg);
    373     int regPtr = dvmCompilerAllocTemp(cUnit);
    374 
    375     assert(rlDest.wide);
    376 
    377     genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
    378                  NULL);/* null object? */
    379     opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
    380     rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
    381 
    382     HEAP_ACCESS_SHADOW(true);
    383     loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
    384     HEAP_ACCESS_SHADOW(false);
    385 
    386     dvmCompilerFreeTemp(cUnit, regPtr);
    387     storeValueWide(cUnit, rlDest, rlResult);
    388 }
    389 
    390 /* Store a wide field to an object instance */
    391 static void genIPutWide(CompilationUnit *cUnit, MIR *mir, int fieldOffset)
    392 {
    393     RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
    394     RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 2);
    395     rlObj = loadValue(cUnit, rlObj, kCoreReg);
    396     int regPtr;
    397     rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
    398     genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
    399                  NULL);/* null object? */
    400     regPtr = dvmCompilerAllocTemp(cUnit);
    401     opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
    402 
    403     HEAP_ACCESS_SHADOW(true);
    404     storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
    405     HEAP_ACCESS_SHADOW(false);
    406 
    407     dvmCompilerFreeTemp(cUnit, regPtr);
    408 }
    409 
    410 /*
    411  * Load a field from an object instance
    412  *
    413  */
    414 static void genIGet(CompilationUnit *cUnit, MIR *mir, OpSize size,
    415                     int fieldOffset, bool isVolatile)
    416 {
    417     RegLocation rlResult;
    418     RegisterClass regClass = dvmCompilerRegClassBySize(size);
    419     RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 0);
    420     RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
    421     rlObj = loadValue(cUnit, rlObj, kCoreReg);
    422     rlResult = dvmCompilerEvalLoc(cUnit, rlDest, regClass, true);
    423     genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
    424                  NULL);/* null object? */
    425 
    426     HEAP_ACCESS_SHADOW(true);
    427     loadBaseDisp(cUnit, mir, rlObj.lowReg, fieldOffset, rlResult.lowReg,
    428                  size, rlObj.sRegLow);
    429     HEAP_ACCESS_SHADOW(false);
    430     if (isVolatile) {
    431         dvmCompilerGenMemBarrier(cUnit, 0);
    432     }
    433 
    434     storeValue(cUnit, rlDest, rlResult);
    435 }
    436 
    437 /*
    438  * Store a field to an object instance
    439  *
    440  */
    441 static void genIPut(CompilationUnit *cUnit, MIR *mir, OpSize size,
    442                     int fieldOffset, bool isObject, bool isVolatile)
    443 {
    444     RegisterClass regClass = dvmCompilerRegClassBySize(size);
    445     RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
    446     RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 1);
    447     rlObj = loadValue(cUnit, rlObj, kCoreReg);
    448     rlSrc = loadValue(cUnit, rlSrc, regClass);
    449     genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset,
    450                  NULL);/* null object? */
    451 
    452     if (isVolatile) {
    453         dvmCompilerGenMemBarrier(cUnit, 0);
    454     }
    455     HEAP_ACCESS_SHADOW(true);
    456     storeBaseDisp(cUnit, rlObj.lowReg, fieldOffset, rlSrc.lowReg, size);
    457     HEAP_ACCESS_SHADOW(false);
    458     if (isVolatile) {
    459         dvmCompilerGenMemBarrier(cUnit, 0);
    460     }
    461     if (isObject) {
    462         /* NOTE: marking card based on object head */
    463         markCard(cUnit, rlSrc.lowReg, rlObj.lowReg);
    464     }
    465 }
    466 
    467 
    468 /*
    469  * Generate array load
    470  */
    471 static void genArrayGet(CompilationUnit *cUnit, MIR *mir, OpSize size,
    472                         RegLocation rlArray, RegLocation rlIndex,
    473                         RegLocation rlDest, int scale)
    474 {
    475     RegisterClass regClass = dvmCompilerRegClassBySize(size);
    476     int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
    477     int dataOffset = OFFSETOF_MEMBER(ArrayObject, contents);
    478     RegLocation rlResult;
    479     rlArray = loadValue(cUnit, rlArray, kCoreReg);
    480     rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
    481     int regPtr;
    482 
    483     /* null object? */
    484     MipsLIR * pcrLabel = NULL;
    485 
    486     if (!(mir->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) {
    487         pcrLabel = genNullCheck(cUnit, rlArray.sRegLow,
    488                                 rlArray.lowReg, mir->offset, NULL);
    489     }
    490 
    491     regPtr = dvmCompilerAllocTemp(cUnit);
    492 
    493     assert(IS_SIMM16(dataOffset));
    494     if (scale) {
    495         opRegRegImm(cUnit, kOpLsl, regPtr, rlIndex.lowReg, scale);
    496     }
    497 
    498     if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
    499         int regLen = dvmCompilerAllocTemp(cUnit);
    500         /* Get len */
    501         loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
    502         genBoundsCheck(cUnit, rlIndex.lowReg, regLen, mir->offset,
    503                        pcrLabel);
    504         dvmCompilerFreeTemp(cUnit, regLen);
    505     }
    506 
    507     if (scale) {
    508         opRegReg(cUnit, kOpAdd, regPtr, rlArray.lowReg);
    509     } else {
    510         opRegRegReg(cUnit, kOpAdd, regPtr, rlArray.lowReg, rlIndex.lowReg);
    511     }
    512 
    513     rlResult = dvmCompilerEvalLoc(cUnit, rlDest, regClass, true);
    514     if ((size == kLong) || (size == kDouble)) {
    515         HEAP_ACCESS_SHADOW(true);
    516         loadBaseDispWide(cUnit, mir, regPtr, dataOffset, rlResult.lowReg,
    517                          rlResult.highReg, INVALID_SREG);
    518         HEAP_ACCESS_SHADOW(false);
    519         dvmCompilerFreeTemp(cUnit, regPtr);
    520         storeValueWide(cUnit, rlDest, rlResult);
    521     } else {
    522         HEAP_ACCESS_SHADOW(true);
    523         loadBaseDisp(cUnit, mir, regPtr, dataOffset, rlResult.lowReg,
    524                      size, INVALID_SREG);
    525         HEAP_ACCESS_SHADOW(false);
    526         dvmCompilerFreeTemp(cUnit, regPtr);
    527         storeValue(cUnit, rlDest, rlResult);
    528     }
    529 }
    530 
    531 /*
    532  * Generate array store
    533  *
    534  */
    535 static void genArrayPut(CompilationUnit *cUnit, MIR *mir, OpSize size,
    536                         RegLocation rlArray, RegLocation rlIndex,
    537                         RegLocation rlSrc, int scale)
    538 {
    539     RegisterClass regClass = dvmCompilerRegClassBySize(size);
    540     int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
    541     int dataOffset = OFFSETOF_MEMBER(ArrayObject, contents);
    542 
    543     int regPtr;
    544     rlArray = loadValue(cUnit, rlArray, kCoreReg);
    545     rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
    546 
    547     if (dvmCompilerIsTemp(cUnit, rlArray.lowReg)) {
    548         dvmCompilerClobber(cUnit, rlArray.lowReg);
    549         regPtr = rlArray.lowReg;
    550     } else {
    551         regPtr = dvmCompilerAllocTemp(cUnit);
    552         genRegCopy(cUnit, regPtr, rlArray.lowReg);
    553     }
    554 
    555     /* null object? */
    556     MipsLIR * pcrLabel = NULL;
    557 
    558     if (!(mir->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) {
    559         pcrLabel = genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg,
    560                                 mir->offset, NULL);
    561     }
    562 
    563     assert(IS_SIMM16(dataOffset));
    564     int tReg = dvmCompilerAllocTemp(cUnit);
    565     if (scale) {
    566         opRegRegImm(cUnit, kOpLsl, tReg, rlIndex.lowReg, scale);
    567     }
    568 
    569     if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
    570         int regLen = dvmCompilerAllocTemp(cUnit);
    571         //NOTE: max live temps(4) here.
    572         /* Get len */
    573         loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
    574         genBoundsCheck(cUnit, rlIndex.lowReg, regLen, mir->offset,
    575                        pcrLabel);
    576         dvmCompilerFreeTemp(cUnit, regLen);
    577     }
    578 
    579     if (scale) {
    580         opRegReg(cUnit, kOpAdd, tReg, rlArray.lowReg);
    581     } else {
    582         opRegRegReg(cUnit, kOpAdd, tReg, rlArray.lowReg, rlIndex.lowReg);
    583     }
    584 
    585     /* at this point, tReg points to array, 2 live temps */
    586     if ((size == kLong) || (size == kDouble)) {
    587         rlSrc = loadValueWide(cUnit, rlSrc, regClass);
    588         HEAP_ACCESS_SHADOW(true);
    589         storeBaseDispWide(cUnit, tReg, dataOffset, rlSrc.lowReg, rlSrc.highReg)
    590         HEAP_ACCESS_SHADOW(false);
    591         dvmCompilerFreeTemp(cUnit, tReg);
    592         dvmCompilerFreeTemp(cUnit, regPtr);
    593     } else {
    594         rlSrc = loadValue(cUnit, rlSrc, regClass);
    595         HEAP_ACCESS_SHADOW(true);
    596         storeBaseDisp(cUnit, tReg, dataOffset, rlSrc.lowReg, size);
    597         dvmCompilerFreeTemp(cUnit, tReg);
    598         HEAP_ACCESS_SHADOW(false);
    599     }
    600 }
    601 
    602 /*
    603  * Generate array object store
    604  * Must use explicit register allocation here because of
    605  * call-out to dvmCanPutArrayElement
    606  */
    607 static void genArrayObjectPut(CompilationUnit *cUnit, MIR *mir,
    608                               RegLocation rlArray, RegLocation rlIndex,
    609                               RegLocation rlSrc, int scale)
    610 {
    611     int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
    612     int dataOffset = OFFSETOF_MEMBER(ArrayObject, contents);
    613 
    614     int regLen = r_A0;
    615     int regPtr = r_S0;  /* Preserved across call */
    616     int regArray = r_A1;
    617     int regIndex = r_S4;  /* Preserved across call */
    618 
    619     dvmCompilerFlushAllRegs(cUnit);
    620     // moved lock for r_S0 and r_S4 here from below since genBoundsCheck
    621     // allocates a temporary that can result in clobbering either of them
    622     dvmCompilerLockTemp(cUnit, regPtr);   // r_S0
    623     dvmCompilerLockTemp(cUnit, regIndex); // r_S4
    624 
    625     loadValueDirectFixed(cUnit, rlArray, regArray);
    626     loadValueDirectFixed(cUnit, rlIndex, regIndex);
    627 
    628     /* null object? */
    629     MipsLIR * pcrLabel = NULL;
    630 
    631     if (!(mir->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) {
    632         pcrLabel = genNullCheck(cUnit, rlArray.sRegLow, regArray,
    633                                 mir->offset, NULL);
    634     }
    635 
    636     if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
    637         /* Get len */
    638         loadWordDisp(cUnit, regArray, lenOffset, regLen);
    639         /* regPtr -> array data */
    640         opRegRegImm(cUnit, kOpAdd, regPtr, regArray, dataOffset);
    641         genBoundsCheck(cUnit, regIndex, regLen, mir->offset,
    642                        pcrLabel);
    643     } else {
    644         /* regPtr -> array data */
    645         opRegRegImm(cUnit, kOpAdd, regPtr, regArray, dataOffset);
    646     }
    647 
    648     /* Get object to store */
    649     loadValueDirectFixed(cUnit, rlSrc, r_A0);
    650     LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmCanPutArrayElement);
    651 
    652     /* Are we storing null?  If so, avoid check */
    653     MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBeqz, r_A0, -1);
    654 
    655     /* Make sure the types are compatible */
    656     loadWordDisp(cUnit, regArray, offsetof(Object, clazz), r_A1);
    657     loadWordDisp(cUnit, r_A0, offsetof(Object, clazz), r_A0);
    658     opReg(cUnit, kOpBlx, r_T9);
    659     newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
    660     dvmCompilerClobberCallRegs(cUnit);
    661 
    662     /*
    663      * Using fixed registers here, and counting on r_S0 and r_S4 being
    664      * preserved across the above call.  Tell the register allocation
    665      * utilities about the regs we are using directly
    666      */
    667     dvmCompilerLockTemp(cUnit, r_A0);
    668     dvmCompilerLockTemp(cUnit, r_A1);
    669 
    670     /* Bad? - roll back and re-execute if so */
    671     genRegImmCheck(cUnit, kMipsCondEq, r_V0, 0, mir->offset, pcrLabel);
    672 
    673     /* Resume here - must reload element & array, regPtr & index preserved */
    674     loadValueDirectFixed(cUnit, rlSrc, r_A0);
    675     loadValueDirectFixed(cUnit, rlArray, r_A1);
    676 
    677     MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
    678     target->defMask = ENCODE_ALL;
    679     branchOver->generic.target = (LIR *) target;
    680 
    681     HEAP_ACCESS_SHADOW(true);
    682     storeBaseIndexed(cUnit, regPtr, regIndex, r_A0,
    683                      scale, kWord);
    684     HEAP_ACCESS_SHADOW(false);
    685 
    686     dvmCompilerFreeTemp(cUnit, regPtr);
    687     dvmCompilerFreeTemp(cUnit, regIndex);
    688 
    689     /* NOTE: marking card here based on object head */
    690     markCard(cUnit, r_A0, r_A1);
    691 }
    692 
    693 static bool genShiftOpLong(CompilationUnit *cUnit, MIR *mir,
    694                            RegLocation rlDest, RegLocation rlSrc1,
    695                            RegLocation rlShift)
    696 {
    697     /*
    698      * Don't mess with the regsiters here as there is a particular calling
    699      * convention to the out-of-line handler.
    700      */
    701     RegLocation rlResult;
    702 
    703     loadValueDirectWideFixed(cUnit, rlSrc1, r_ARG0, r_ARG1);
    704     loadValueDirect(cUnit, rlShift, r_A2);
    705     switch( mir->dalvikInsn.opcode) {
    706         case OP_SHL_LONG:
    707         case OP_SHL_LONG_2ADDR:
    708             genDispatchToHandler(cUnit, TEMPLATE_SHL_LONG);
    709             break;
    710         case OP_SHR_LONG:
    711         case OP_SHR_LONG_2ADDR:
    712             genDispatchToHandler(cUnit, TEMPLATE_SHR_LONG);
    713             break;
    714         case OP_USHR_LONG:
    715         case OP_USHR_LONG_2ADDR:
    716             genDispatchToHandler(cUnit, TEMPLATE_USHR_LONG);
    717             break;
    718         default:
    719             return true;
    720     }
    721     rlResult = dvmCompilerGetReturnWide(cUnit);
    722     storeValueWide(cUnit, rlDest, rlResult);
    723     return false;
    724 }
    725 
    726 static bool genArithOpLong(CompilationUnit *cUnit, MIR *mir,
    727                            RegLocation rlDest, RegLocation rlSrc1,
    728                            RegLocation rlSrc2)
    729 {
    730     RegLocation rlResult;
    731     OpKind firstOp = kOpBkpt;
    732     OpKind secondOp = kOpBkpt;
    733     bool callOut = false;
    734     bool checkZero = false;
    735     void *callTgt;
    736 
    737     switch (mir->dalvikInsn.opcode) {
    738         case OP_NOT_LONG:
    739             rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
    740             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
    741             opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
    742             opRegReg(cUnit, kOpMvn, rlResult.highReg, rlSrc2.highReg);
    743             storeValueWide(cUnit, rlDest, rlResult);
    744             return false;
    745             break;
    746         case OP_ADD_LONG:
    747         case OP_ADD_LONG_2ADDR:
    748             firstOp = kOpAdd;
    749             secondOp = kOpAdc;
    750             break;
    751         case OP_SUB_LONG:
    752         case OP_SUB_LONG_2ADDR:
    753             firstOp = kOpSub;
    754             secondOp = kOpSbc;
    755             break;
    756         case OP_MUL_LONG:
    757         case OP_MUL_LONG_2ADDR:
    758             genMulLong(cUnit, rlDest, rlSrc1, rlSrc2);
    759             return false;
    760         case OP_DIV_LONG:
    761         case OP_DIV_LONG_2ADDR:
    762             callOut = true;
    763             checkZero = true;
    764             callTgt = (void*)__divdi3;
    765             break;
    766         case OP_REM_LONG:
    767         case OP_REM_LONG_2ADDR:
    768             callOut = true;
    769             callTgt = (void*)__moddi3;
    770             checkZero = true;
    771             break;
    772         case OP_AND_LONG_2ADDR:
    773         case OP_AND_LONG:
    774             firstOp = kOpAnd;
    775             secondOp = kOpAnd;
    776             break;
    777         case OP_OR_LONG:
    778         case OP_OR_LONG_2ADDR:
    779             firstOp = kOpOr;
    780             secondOp = kOpOr;
    781             break;
    782         case OP_XOR_LONG:
    783         case OP_XOR_LONG_2ADDR:
    784             firstOp = kOpXor;
    785             secondOp = kOpXor;
    786             break;
    787         case OP_NEG_LONG: {
    788             int tReg = dvmCompilerAllocTemp(cUnit);
    789             rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
    790             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
    791             newLIR3(cUnit, kMipsSubu, rlResult.lowReg, r_ZERO, rlSrc2.lowReg);
    792             newLIR3(cUnit, kMipsSubu, tReg, r_ZERO, rlSrc2.highReg);
    793             newLIR3(cUnit, kMipsSltu, rlResult.highReg, r_ZERO, rlResult.lowReg);
    794             newLIR3(cUnit, kMipsSubu, rlResult.highReg, tReg, rlResult.highReg);
    795             dvmCompilerFreeTemp(cUnit, tReg);
    796             storeValueWide(cUnit, rlDest, rlResult);
    797             return false;
    798             break;
    799         }
    800         default:
    801             ALOGE("Invalid long arith op");
    802             dvmCompilerAbort(cUnit);
    803     }
    804     if (!callOut) {
    805         genLong3Addr(cUnit, mir, firstOp, secondOp, rlDest, rlSrc1, rlSrc2);
    806     } else {
    807         dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
    808         loadValueDirectWideFixed(cUnit, rlSrc2, r_ARG2, r_ARG3);
    809         loadValueDirectWideFixed(cUnit, rlSrc1, r_ARG0, r_ARG1);
    810         LOAD_FUNC_ADDR(cUnit, r_T9, (int) callTgt);
    811         if (checkZero) {
    812             int tReg = r_T1; // Using fixed registers during call sequence
    813             opRegRegReg(cUnit, kOpOr, tReg, r_ARG2, r_ARG3);
    814             genRegImmCheck(cUnit, kMipsCondEq, tReg, 0, mir->offset, NULL);
    815         }
    816         opReg(cUnit, kOpBlx, r_T9);
    817         newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
    818         dvmCompilerClobberCallRegs(cUnit);
    819         rlResult = dvmCompilerGetReturnWide(cUnit);
    820         storeValueWide(cUnit, rlDest, rlResult);
    821 #if defined(WITH_SELF_VERIFICATION)
    822         cUnit->usesLinkRegister = true;
    823 #endif
    824     }
    825     return false;
    826 }
    827 
    828 static bool genArithOpInt(CompilationUnit *cUnit, MIR *mir,
    829                           RegLocation rlDest, RegLocation rlSrc1,
    830                           RegLocation rlSrc2)
    831 {
    832     OpKind op = kOpBkpt;
    833     bool checkZero = false;
    834     bool unary = false;
    835     RegLocation rlResult;
    836     bool shiftOp = false;
    837     int isDivRem = false;
    838     MipsOpCode opc;
    839     int divReg;
    840 
    841     switch (mir->dalvikInsn.opcode) {
    842         case OP_NEG_INT:
    843             op = kOpNeg;
    844             unary = true;
    845             break;
    846         case OP_NOT_INT:
    847             op = kOpMvn;
    848             unary = true;
    849             break;
    850         case OP_ADD_INT:
    851         case OP_ADD_INT_2ADDR:
    852             op = kOpAdd;
    853             break;
    854         case OP_SUB_INT:
    855         case OP_SUB_INT_2ADDR:
    856             op = kOpSub;
    857             break;
    858         case OP_MUL_INT:
    859         case OP_MUL_INT_2ADDR:
    860             op = kOpMul;
    861             break;
    862         case OP_DIV_INT:
    863         case OP_DIV_INT_2ADDR:
    864             isDivRem = true;
    865             checkZero = true;
    866             opc = kMipsMflo;
    867             divReg = r_LO;
    868             break;
    869         case OP_REM_INT:
    870         case OP_REM_INT_2ADDR:
    871             isDivRem = true;
    872             checkZero = true;
    873             opc = kMipsMfhi;
    874             divReg = r_HI;
    875             break;
    876         case OP_AND_INT:
    877         case OP_AND_INT_2ADDR:
    878             op = kOpAnd;
    879             break;
    880         case OP_OR_INT:
    881         case OP_OR_INT_2ADDR:
    882             op = kOpOr;
    883             break;
    884         case OP_XOR_INT:
    885         case OP_XOR_INT_2ADDR:
    886             op = kOpXor;
    887             break;
    888         case OP_SHL_INT:
    889         case OP_SHL_INT_2ADDR:
    890             shiftOp = true;
    891             op = kOpLsl;
    892             break;
    893         case OP_SHR_INT:
    894         case OP_SHR_INT_2ADDR:
    895             shiftOp = true;
    896             op = kOpAsr;
    897             break;
    898         case OP_USHR_INT:
    899         case OP_USHR_INT_2ADDR:
    900             shiftOp = true;
    901             op = kOpLsr;
    902             break;
    903         default:
    904             ALOGE("Invalid word arith op: %#x(%d)",
    905                  mir->dalvikInsn.opcode, mir->dalvikInsn.opcode);
    906             dvmCompilerAbort(cUnit);
    907     }
    908 
    909     rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
    910     if (unary) {
    911         rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
    912         opRegReg(cUnit, op, rlResult.lowReg,
    913                  rlSrc1.lowReg);
    914     } else if (isDivRem) {
    915         rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
    916         if (checkZero) {
    917             genNullCheck(cUnit, rlSrc2.sRegLow, rlSrc2.lowReg, mir->offset, NULL);
    918         }
    919         newLIR4(cUnit, kMipsDiv, r_HI, r_LO, rlSrc1.lowReg, rlSrc2.lowReg);
    920         rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
    921         newLIR2(cUnit, opc, rlResult.lowReg, divReg);
    922     } else {
    923         rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
    924         if (shiftOp) {
    925             int tReg = dvmCompilerAllocTemp(cUnit);
    926             opRegRegImm(cUnit, kOpAnd, tReg, rlSrc2.lowReg, 31);
    927             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
    928             opRegRegReg(cUnit, op, rlResult.lowReg,
    929                         rlSrc1.lowReg, tReg);
    930             dvmCompilerFreeTemp(cUnit, tReg);
    931         } else {
    932             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
    933             opRegRegReg(cUnit, op, rlResult.lowReg,
    934                         rlSrc1.lowReg, rlSrc2.lowReg);
    935         }
    936     }
    937     storeValue(cUnit, rlDest, rlResult);
    938 
    939     return false;
    940 }
    941 
    942 static bool genArithOp(CompilationUnit *cUnit, MIR *mir)
    943 {
    944     Opcode opcode = mir->dalvikInsn.opcode;
    945     RegLocation rlDest;
    946     RegLocation rlSrc1;
    947     RegLocation rlSrc2;
    948     /* Deduce sizes of operands */
    949     if (mir->ssaRep->numUses == 2) {
    950         rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 0);
    951         rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 1);
    952     } else if (mir->ssaRep->numUses == 3) {
    953         rlSrc1 = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
    954         rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 2);
    955     } else {
    956         rlSrc1 = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
    957         rlSrc2 = dvmCompilerGetSrcWide(cUnit, mir, 2, 3);
    958         assert(mir->ssaRep->numUses == 4);
    959     }
    960     if (mir->ssaRep->numDefs == 1) {
    961         rlDest = dvmCompilerGetDest(cUnit, mir, 0);
    962     } else {
    963         assert(mir->ssaRep->numDefs == 2);
    964         rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
    965     }
    966 
    967     if ((opcode >= OP_ADD_LONG_2ADDR) && (opcode <= OP_XOR_LONG_2ADDR)) {
    968         return genArithOpLong(cUnit,mir, rlDest, rlSrc1, rlSrc2);
    969     }
    970     if ((opcode >= OP_ADD_LONG) && (opcode <= OP_XOR_LONG)) {
    971         return genArithOpLong(cUnit,mir, rlDest, rlSrc1, rlSrc2);
    972     }
    973     if ((opcode >= OP_SHL_LONG_2ADDR) && (opcode <= OP_USHR_LONG_2ADDR)) {
    974         return genShiftOpLong(cUnit,mir, rlDest, rlSrc1, rlSrc2);
    975     }
    976     if ((opcode >= OP_SHL_LONG) && (opcode <= OP_USHR_LONG)) {
    977         return genShiftOpLong(cUnit,mir, rlDest, rlSrc1, rlSrc2);
    978     }
    979     if ((opcode >= OP_ADD_INT_2ADDR) && (opcode <= OP_USHR_INT_2ADDR)) {
    980         return genArithOpInt(cUnit,mir, rlDest, rlSrc1, rlSrc2);
    981     }
    982     if ((opcode >= OP_ADD_INT) && (opcode <= OP_USHR_INT)) {
    983         return genArithOpInt(cUnit,mir, rlDest, rlSrc1, rlSrc2);
    984     }
    985     if ((opcode >= OP_ADD_FLOAT_2ADDR) && (opcode <= OP_REM_FLOAT_2ADDR)) {
    986         return genArithOpFloat(cUnit,mir, rlDest, rlSrc1, rlSrc2);
    987     }
    988     if ((opcode >= OP_ADD_FLOAT) && (opcode <= OP_REM_FLOAT)) {
    989         return genArithOpFloat(cUnit, mir, rlDest, rlSrc1, rlSrc2);
    990     }
    991     if ((opcode >= OP_ADD_DOUBLE_2ADDR) && (opcode <= OP_REM_DOUBLE_2ADDR)) {
    992         return genArithOpDouble(cUnit,mir, rlDest, rlSrc1, rlSrc2);
    993     }
    994     if ((opcode >= OP_ADD_DOUBLE) && (opcode <= OP_REM_DOUBLE)) {
    995         return genArithOpDouble(cUnit,mir, rlDest, rlSrc1, rlSrc2);
    996     }
    997     return true;
    998 }
    999 
   1000 /* Generate unconditional branch instructions */
   1001 static MipsLIR *genUnconditionalBranch(CompilationUnit *cUnit, MipsLIR *target)
   1002 {
   1003     MipsLIR *branch = opNone(cUnit, kOpUncondBr);
   1004     branch->generic.target = (LIR *) target;
   1005     return branch;
   1006 }
   1007 
   1008 /* Perform the actual operation for OP_RETURN_* */
   1009 void genReturnCommon(CompilationUnit *cUnit, MIR *mir)
   1010 {
   1011     genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
   1012                          TEMPLATE_RETURN_PROF : TEMPLATE_RETURN);
   1013 #if defined(WITH_JIT_TUNING)
   1014     gDvmJit.returnOp++;
   1015 #endif
   1016     int dPC = (int) (cUnit->method->insns + mir->offset);
   1017     /* Insert branch, but defer setting of target */
   1018     MipsLIR *branch = genUnconditionalBranch(cUnit, NULL);
   1019     /* Set up the place holder to reconstruct this Dalvik PC */
   1020     MipsLIR *pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
   1021     pcrLabel->opcode = kMipsPseudoPCReconstructionCell;
   1022     pcrLabel->operands[0] = dPC;
   1023     pcrLabel->operands[1] = mir->offset;
   1024     /* Insert the place holder to the growable list */
   1025     dvmInsertGrowableList(&cUnit->pcReconstructionList, (intptr_t) pcrLabel);
   1026     /* Branch to the PC reconstruction code */
   1027     branch->generic.target = (LIR *) pcrLabel;
   1028 }
   1029 
   1030 static void genProcessArgsNoRange(CompilationUnit *cUnit, MIR *mir,
   1031                                   DecodedInstruction *dInsn,
   1032                                   MipsLIR **pcrLabel)
   1033 {
   1034     unsigned int i;
   1035     unsigned int regMask = 0;
   1036     RegLocation rlArg;
   1037     int numDone = 0;
   1038 
   1039     /*
   1040      * Load arguments to r_A0..r_T0.  Note that these registers may contain
   1041      * live values, so we clobber them immediately after loading to prevent
   1042      * them from being used as sources for subsequent loads.
   1043      */
   1044     dvmCompilerLockAllTemps(cUnit);
   1045     for (i = 0; i < dInsn->vA; i++) {
   1046         regMask |= 1 << i;
   1047         rlArg = dvmCompilerGetSrc(cUnit, mir, numDone++);
   1048         loadValueDirectFixed(cUnit, rlArg, i+r_A0); /* r_A0 thru r_T0 */
   1049     }
   1050     if (regMask) {
   1051         /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
   1052         opRegRegImm(cUnit, kOpSub, r_S4, rFP,
   1053                     sizeof(StackSaveArea) + (dInsn->vA << 2));
   1054         /* generate null check */
   1055         if (pcrLabel) {
   1056             *pcrLabel = genNullCheck(cUnit, dvmCompilerSSASrc(mir, 0), r_A0,
   1057                                      mir->offset, NULL);
   1058         }
   1059         storeMultiple(cUnit, r_S4, regMask);
   1060     }
   1061 }
   1062 
   1063 static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
   1064                                 DecodedInstruction *dInsn,
   1065                                 MipsLIR **pcrLabel)
   1066 {
   1067     int srcOffset = dInsn->vC << 2;
   1068     int numArgs = dInsn->vA;
   1069     int regMask;
   1070 
   1071     /*
   1072      * Note: here, all promoted registers will have been flushed
   1073      * back to the Dalvik base locations, so register usage restrictins
   1074      * are lifted.  All parms loaded from original Dalvik register
   1075      * region - even though some might conceivably have valid copies
   1076      * cached in a preserved register.
   1077      */
   1078     dvmCompilerLockAllTemps(cUnit);
   1079 
   1080     /*
   1081      * r4PC     : &rFP[vC]
   1082      * r_S4: &newFP[0]
   1083      */
   1084     opRegRegImm(cUnit, kOpAdd, r4PC, rFP, srcOffset);
   1085     /* load [r_A0 up to r_A3)] */
   1086     regMask = (1 << ((numArgs < 4) ? numArgs : 4)) - 1;
   1087     /*
   1088      * Protect the loadMultiple instruction from being reordered with other
   1089      * Dalvik stack accesses.
   1090      */
   1091     if (numArgs != 0) loadMultiple(cUnit, r4PC, regMask);
   1092 
   1093     opRegRegImm(cUnit, kOpSub, r_S4, rFP,
   1094                 sizeof(StackSaveArea) + (numArgs << 2));
   1095     /* generate null check */
   1096     if (pcrLabel) {
   1097         *pcrLabel = genNullCheck(cUnit, dvmCompilerSSASrc(mir, 0), r_A0,
   1098                                  mir->offset, NULL);
   1099     }
   1100 
   1101     /*
   1102      * Handle remaining 4n arguments:
   1103      * store previously loaded 4 values and load the next 4 values
   1104      */
   1105     if (numArgs >= 8) {
   1106         MipsLIR *loopLabel = NULL;
   1107         /*
   1108          * r_A0 contains "this" and it will be used later, so push it to the stack
   1109          * first. Pushing r_S1 (rFP) is just for stack alignment purposes.
   1110          */
   1111 
   1112         newLIR2(cUnit, kMipsMove, r_T0, r_A0);
   1113         newLIR2(cUnit, kMipsMove, r_T1, r_S1);
   1114 
   1115         /* No need to generate the loop structure if numArgs <= 11 */
   1116         if (numArgs > 11) {
   1117             loadConstant(cUnit, rFP, ((numArgs - 4) >> 2) << 2);
   1118             loopLabel = newLIR0(cUnit, kMipsPseudoTargetLabel);
   1119             loopLabel->defMask = ENCODE_ALL;
   1120         }
   1121         storeMultiple(cUnit, r_S4, regMask);
   1122         /*
   1123          * Protect the loadMultiple instruction from being reordered with other
   1124          * Dalvik stack accesses.
   1125          */
   1126         loadMultiple(cUnit, r4PC, regMask);
   1127         /* No need to generate the loop structure if numArgs <= 11 */
   1128         if (numArgs > 11) {
   1129             opRegImm(cUnit, kOpSub, rFP, 4);
   1130             genConditionalBranchMips(cUnit, kMipsBne, rFP, r_ZERO, loopLabel);
   1131         }
   1132     }
   1133 
   1134     /* Save the last batch of loaded values */
   1135     if (numArgs != 0) storeMultiple(cUnit, r_S4, regMask);
   1136 
   1137     /* Generate the loop epilogue - don't use r_A0 */
   1138     if ((numArgs > 4) && (numArgs % 4)) {
   1139         regMask = ((1 << (numArgs & 0x3)) - 1) << 1;
   1140         /*
   1141          * Protect the loadMultiple instruction from being reordered with other
   1142          * Dalvik stack accesses.
   1143          */
   1144         loadMultiple(cUnit, r4PC, regMask);
   1145     }
   1146     if (numArgs >= 8) {
   1147         newLIR2(cUnit, kMipsMove, r_A0, r_T0);
   1148         newLIR2(cUnit, kMipsMove, r_S1, r_T1);
   1149     }
   1150 
   1151     /* Save the modulo 4 arguments */
   1152     if ((numArgs > 4) && (numArgs % 4)) {
   1153         storeMultiple(cUnit, r_S4, regMask);
   1154     }
   1155 }
   1156 
   1157 /*
   1158  * Generate code to setup the call stack then jump to the chaining cell if it
   1159  * is not a native method.
   1160  */
   1161 static void genInvokeSingletonCommon(CompilationUnit *cUnit, MIR *mir,
   1162                                      BasicBlock *bb, MipsLIR *labelList,
   1163                                      MipsLIR *pcrLabel,
   1164                                      const Method *calleeMethod)
   1165 {
   1166     /*
   1167      * Note: all Dalvik register state should be flushed to
   1168      * memory by the point, so register usage restrictions no
   1169      * longer apply.  All temp & preserved registers may be used.
   1170      */
   1171     dvmCompilerLockAllTemps(cUnit);
   1172     MipsLIR *retChainingCell = &labelList[bb->fallThrough->id];
   1173 
   1174     /* r_A1 = &retChainingCell */
   1175     dvmCompilerLockTemp(cUnit, r_A1);
   1176     MipsLIR *addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
   1177     addrRetChain->generic.target = (LIR *) retChainingCell;
   1178     addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
   1179     addrRetChain->generic.target = (LIR *) retChainingCell;
   1180 
   1181     /* r4PC = dalvikCallsite */
   1182     loadConstant(cUnit, r4PC,
   1183                  (int) (cUnit->method->insns + mir->offset));
   1184     /*
   1185      * r_A0 = calleeMethod (loaded upon calling genInvokeSingletonCommon)
   1186      * r_A1 = &ChainingCell
   1187      * r4PC = callsiteDPC
   1188      */
   1189     if (dvmIsNativeMethod(calleeMethod)) {
   1190         genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
   1191             TEMPLATE_INVOKE_METHOD_NATIVE_PROF :
   1192             TEMPLATE_INVOKE_METHOD_NATIVE);
   1193 #if defined(WITH_JIT_TUNING)
   1194         gDvmJit.invokeNative++;
   1195 #endif
   1196     } else {
   1197         genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
   1198             TEMPLATE_INVOKE_METHOD_CHAIN_PROF :
   1199             TEMPLATE_INVOKE_METHOD_CHAIN);
   1200 #if defined(WITH_JIT_TUNING)
   1201         gDvmJit.invokeMonomorphic++;
   1202 #endif
   1203         /* Branch to the chaining cell */
   1204         genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
   1205     }
   1206     /* Handle exceptions using the interpreter */
   1207     genTrap(cUnit, mir->offset, pcrLabel);
   1208 }
   1209 
   1210 /*
   1211  * Generate code to check the validity of a predicted chain and take actions
   1212  * based on the result.
   1213  *
   1214  * 0x2f1304c4 :  lui      s0,0x2d22(11554)            # s0 <- dalvikPC
   1215  * 0x2f1304c8 :  ori      s0,s0,0x2d22848c(757236876)
   1216  * 0x2f1304cc :  lahi/lui a1,0x2f13(12051)            # a1 <- &retChainingCell
   1217  * 0x2f1304d0 :  lalo/ori a1,a1,0x2f13055c(789775708)
   1218  * 0x2f1304d4 :  lahi/lui a2,0x2f13(12051)            # a2 <- &predictedChainingCell
   1219  * 0x2f1304d8 :  lalo/ori a2,a2,0x2f13056c(789775724)
   1220  * 0x2f1304dc :  jal      0x2f12d1ec(789762540)       # call TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN
   1221  * 0x2f1304e0 :  nop
   1222  * 0x2f1304e4 :  b        0x2f13056c (L0x11ec10)      # off to the predicted chain
   1223  * 0x2f1304e8 :  nop
   1224  * 0x2f1304ec :  b        0x2f13054c (L0x11fc80)      # punt to the interpreter
   1225  * 0x2f1304f0 :  lui      a0,0x2d22(11554)
   1226  * 0x2f1304f4 :  lw       a0,156(s4)                  # a0 <- this->class->vtable[methodIdx]
   1227  * 0x2f1304f8 :  bgtz     a1,0x2f13051c (L0x11fa40)   # if >0 don't rechain
   1228  * 0x2f1304fc :  nop
   1229  * 0x2f130500 :  lui      t9,0x2aba(10938)
   1230  * 0x2f130504 :  ori      t9,t9,0x2abae3f8(716891128)
   1231  * 0x2f130508 :  move     a1,s2
   1232  * 0x2f13050c :  jalr     ra,t9                       # call dvmJitToPatchPredictedChain
   1233  * 0x2f130510 :  nop
   1234  * 0x2f130514 :  lw       gp,84(sp)
   1235  * 0x2f130518 :  move     a0,v0
   1236  * 0x2f13051c :  lahi/lui a1,0x2f13(12051)            # a1 <- &retChainingCell
   1237  * 0x2f130520 :  lalo/ori a1,a1,0x2f13055c(789775708)
   1238  * 0x2f130524 :  jal      0x2f12d0c4(789762244)       # call TEMPLATE_INVOKE_METHOD_NO_OPT
   1239  * 0x2f130528 :  nop
   1240  */
   1241 static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
   1242                                    int methodIndex,
   1243                                    MipsLIR *retChainingCell,
   1244                                    MipsLIR *predChainingCell,
   1245                                    MipsLIR *pcrLabel)
   1246 {
   1247     /*
   1248      * Note: all Dalvik register state should be flushed to
   1249      * memory by the point, so register usage restrictions no
   1250      * longer apply.  Lock temps to prevent them from being
   1251      * allocated by utility routines.
   1252      */
   1253     dvmCompilerLockAllTemps(cUnit);
   1254 
   1255     /*
   1256      * For verbose printing, store the method pointer in operands[1] first as
   1257      * operands[0] will be clobbered in dvmCompilerMIR2LIR.
   1258      */
   1259     predChainingCell->operands[1] = (int) mir->meta.callsiteInfo->method;
   1260 
   1261     /* "this" is already left in r_A0 by genProcessArgs* */
   1262 
   1263     /* r4PC = dalvikCallsite */
   1264     loadConstant(cUnit, r4PC,
   1265                  (int) (cUnit->method->insns + mir->offset));
   1266 
   1267     /* r_A1 = &retChainingCell */
   1268     MipsLIR *addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
   1269     addrRetChain->generic.target = (LIR *) retChainingCell;
   1270     addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
   1271     addrRetChain->generic.target = (LIR *) retChainingCell;
   1272 
   1273     /* r_A2 = &predictedChainingCell */
   1274     MipsLIR *predictedChainingCell = newLIR2(cUnit, kMipsLahi, r_A2, 0);
   1275     predictedChainingCell->generic.target = (LIR *) predChainingCell;
   1276     predictedChainingCell = newLIR3(cUnit, kMipsLalo, r_A2, r_A2, 0);
   1277     predictedChainingCell->generic.target = (LIR *) predChainingCell;
   1278 
   1279     genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
   1280         TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF :
   1281         TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
   1282 
   1283     /* return through ra - jump to the chaining cell */
   1284     genUnconditionalBranch(cUnit, predChainingCell);
   1285 
   1286     /*
   1287      * null-check on "this" may have been eliminated, but we still need a PC-
   1288      * reconstruction label for stack overflow bailout.
   1289      */
   1290     if (pcrLabel == NULL) {
   1291         int dPC = (int) (cUnit->method->insns + mir->offset);
   1292         pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
   1293         pcrLabel->opcode = kMipsPseudoPCReconstructionCell;
   1294         pcrLabel->operands[0] = dPC;
   1295         pcrLabel->operands[1] = mir->offset;
   1296         /* Insert the place holder to the growable list */
   1297         dvmInsertGrowableList(&cUnit->pcReconstructionList,
   1298                               (intptr_t) pcrLabel);
   1299     }
   1300 
   1301     /* return through ra+8 - punt to the interpreter */
   1302     genUnconditionalBranch(cUnit, pcrLabel);
   1303 
   1304     /*
   1305      * return through ra+16 - fully resolve the callee method.
   1306      * r_A1 <- count
   1307      * r_A2 <- &predictedChainCell
   1308      * r_A3 <- this->class
   1309      * r4 <- dPC
   1310      * r_S4 <- this->class->vtable
   1311      */
   1312 
   1313     /* r_A0 <- calleeMethod */
   1314     loadWordDisp(cUnit, r_S4, methodIndex * 4, r_A0);
   1315 
   1316     /* Check if rechain limit is reached */
   1317     MipsLIR *bypassRechaining = opCompareBranch(cUnit, kMipsBgtz, r_A1, -1);
   1318 
   1319     LOAD_FUNC_ADDR(cUnit, r_T9, (int) dvmJitToPatchPredictedChain);
   1320 
   1321     genRegCopy(cUnit, r_A1, rSELF);
   1322 
   1323     /*
   1324      * r_A0 = calleeMethod
   1325      * r_A2 = &predictedChainingCell
   1326      * r_A3 = class
   1327      *
   1328      * &returnChainingCell has been loaded into r_A1 but is not needed
   1329      * when patching the chaining cell and will be clobbered upon
   1330      * returning so it will be reconstructed again.
   1331      */
   1332     opReg(cUnit, kOpBlx, r_T9);
   1333     newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   1334     newLIR2(cUnit, kMipsMove, r_A0, r_V0);
   1335 
   1336     /* r_A1 = &retChainingCell */
   1337     addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
   1338     addrRetChain->generic.target = (LIR *) retChainingCell;
   1339     bypassRechaining->generic.target = (LIR *) addrRetChain;
   1340     addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
   1341     addrRetChain->generic.target = (LIR *) retChainingCell;
   1342 
   1343     /*
   1344      * r_A0 = calleeMethod,
   1345      * r_A1 = &ChainingCell,
   1346      * r4PC = callsiteDPC,
   1347      */
   1348     genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
   1349         TEMPLATE_INVOKE_METHOD_NO_OPT_PROF :
   1350         TEMPLATE_INVOKE_METHOD_NO_OPT);
   1351 #if defined(WITH_JIT_TUNING)
   1352     gDvmJit.invokePolymorphic++;
   1353 #endif
   1354     /* Handle exceptions using the interpreter */
   1355     genTrap(cUnit, mir->offset, pcrLabel);
   1356 }
   1357 
   1358 /* "this" pointer is already in r0 */
   1359 static void genInvokeVirtualWholeMethod(CompilationUnit *cUnit,
   1360                                         MIR *mir,
   1361                                         void *calleeAddr,
   1362                                         MipsLIR *retChainingCell)
   1363 {
   1364     CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo;
   1365     dvmCompilerLockAllTemps(cUnit);
   1366 
   1367     loadClassPointer(cUnit, r_A1, (int) callsiteInfo);
   1368 
   1369     loadWordDisp(cUnit, r_A0, offsetof(Object, clazz), r_A2);
   1370     /*
   1371      * Set the misPredBranchOver target so that it will be generated when the
   1372      * code for the non-optimized invoke is generated.
   1373      */
   1374     /* Branch to the slow path if classes are not equal */
   1375     MipsLIR *classCheck = opCompareBranch(cUnit, kMipsBne, r_A1, r_A2);
   1376 
   1377     /* a0 = the Dalvik PC of the callsite */
   1378     loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + mir->offset));
   1379 
   1380     newLIR1(cUnit, kMipsJal, (int) calleeAddr);
   1381     genUnconditionalBranch(cUnit, retChainingCell);
   1382 
   1383     /* Target of slow path */
   1384     MipsLIR *slowPathLabel = newLIR0(cUnit, kMipsPseudoTargetLabel);
   1385 
   1386     slowPathLabel->defMask = ENCODE_ALL;
   1387     classCheck->generic.target = (LIR *) slowPathLabel;
   1388 
   1389     // FIXME
   1390     cUnit->printMe = true;
   1391 }
   1392 
   1393 static void genInvokeSingletonWholeMethod(CompilationUnit *cUnit,
   1394                                           MIR *mir,
   1395                                           void *calleeAddr,
   1396                                           MipsLIR *retChainingCell)
   1397 {
   1398     /* a0 = the Dalvik PC of the callsite */
   1399     loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + mir->offset));
   1400 
   1401     newLIR1(cUnit, kMipsJal, (int) calleeAddr);
   1402     genUnconditionalBranch(cUnit, retChainingCell);
   1403 
   1404     // FIXME
   1405     cUnit->printMe = true;
   1406 }
   1407 
   1408 /* Geneate a branch to go back to the interpreter */
   1409 static void genPuntToInterp(CompilationUnit *cUnit, unsigned int offset)
   1410 {
   1411     /* a0 = dalvik pc */
   1412     dvmCompilerFlushAllRegs(cUnit);
   1413     loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + offset));
   1414 #if 0 /* MIPSTODO tempoary workaround unaligned access on sigma hardware
   1415              this can removed when we're not punting to genInterpSingleStep
   1416              for opcodes that haven't been activated yet */
   1417     loadWordDisp(cUnit, r_A0, offsetof(Object, clazz), r_A3);
   1418 #endif
   1419     loadWordDisp(cUnit, rSELF, offsetof(Thread,
   1420                  jitToInterpEntries.dvmJitToInterpPunt), r_A1);
   1421 
   1422     opReg(cUnit, kOpBlx, r_A1);
   1423 }
   1424 
   1425 /*
   1426  * Attempt to single step one instruction using the interpreter and return
   1427  * to the compiled code for the next Dalvik instruction
   1428  */
   1429 static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir)
   1430 {
   1431     int flags = dexGetFlagsFromOpcode(mir->dalvikInsn.opcode);
   1432     int flagsToCheck = kInstrCanBranch | kInstrCanSwitch | kInstrCanReturn;
   1433 
   1434     // Single stepping is considered loop mode breaker
   1435     if (cUnit->jitMode == kJitLoop) {
   1436         cUnit->quitLoopMode = true;
   1437         return;
   1438     }
   1439 
   1440     //If already optimized out, just ignore
   1441     if (mir->dalvikInsn.opcode == OP_NOP)
   1442         return;
   1443 
   1444     //Ugly, but necessary.  Flush all Dalvik regs so Interp can find them
   1445     dvmCompilerFlushAllRegs(cUnit);
   1446 
   1447     if ((mir->next == NULL) || (flags & flagsToCheck)) {
   1448        genPuntToInterp(cUnit, mir->offset);
   1449        return;
   1450     }
   1451     int entryAddr = offsetof(Thread,
   1452                              jitToInterpEntries.dvmJitToInterpSingleStep);
   1453     loadWordDisp(cUnit, rSELF, entryAddr, r_A2);
   1454     /* a0 = dalvik pc */
   1455     loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + mir->offset));
   1456     /* a1 = dalvik pc of following instruction */
   1457     loadConstant(cUnit, r_A1, (int) (cUnit->method->insns + mir->next->offset));
   1458     opReg(cUnit, kOpBlx, r_A2);
   1459 }
   1460 
   1461 /*
   1462  * To prevent a thread in a monitor wait from blocking the Jit from
   1463  * resetting the code cache, heavyweight monitor lock will not
   1464  * be allowed to return to an existing translation.  Instead, we will
   1465  * handle them by branching to a handler, which will in turn call the
   1466  * runtime lock routine and then branch directly back to the
   1467  * interpreter main loop.  Given the high cost of the heavyweight
   1468  * lock operation, this additional cost should be slight (especially when
   1469  * considering that we expect the vast majority of lock operations to
   1470  * use the fast-path thin lock bypass).
   1471  */
   1472 static void genMonitorPortable(CompilationUnit *cUnit, MIR *mir)
   1473 {
   1474     bool isEnter = (mir->dalvikInsn.opcode == OP_MONITOR_ENTER);
   1475     genExportPC(cUnit, mir);
   1476     dvmCompilerFlushAllRegs(cUnit);   /* Send everything to home location */
   1477     RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   1478     loadValueDirectFixed(cUnit, rlSrc, r_A1);
   1479     genRegCopy(cUnit, r_A0, rSELF);
   1480     genNullCheck(cUnit, rlSrc.sRegLow, r_A1, mir->offset, NULL);
   1481     if (isEnter) {
   1482         /* Get dPC of next insn */
   1483         loadConstant(cUnit, r4PC, (int)(cUnit->method->insns + mir->offset +
   1484                  dexGetWidthFromOpcode(OP_MONITOR_ENTER)));
   1485         genDispatchToHandler(cUnit, TEMPLATE_MONITOR_ENTER);
   1486     } else {
   1487         LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmUnlockObject);
   1488         /* Do the call */
   1489         opReg(cUnit, kOpBlx, r_T9);
   1490         newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   1491         /* Did we throw? */
   1492         MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
   1493         loadConstant(cUnit, r_A0,
   1494                      (int) (cUnit->method->insns + mir->offset +
   1495                      dexGetWidthFromOpcode(OP_MONITOR_EXIT)));
   1496         genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
   1497         MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
   1498         target->defMask = ENCODE_ALL;
   1499         branchOver->generic.target = (LIR *) target;
   1500         dvmCompilerClobberCallRegs(cUnit);
   1501     }
   1502 }
   1503 /*#endif*/
   1504 
   1505 /*
   1506  * Fetch *self->info.breakFlags. If the breakFlags are non-zero,
   1507  * punt to the interpreter.
   1508  */
   1509 static void genSuspendPoll(CompilationUnit *cUnit, MIR *mir)
   1510 {
   1511     int rTemp = dvmCompilerAllocTemp(cUnit);
   1512     MipsLIR *ld;
   1513     ld = loadBaseDisp(cUnit, NULL, rSELF,
   1514                       offsetof(Thread, interpBreak.ctl.breakFlags),
   1515                       rTemp, kUnsignedByte, INVALID_SREG);
   1516     setMemRefType(ld, true /* isLoad */, kMustNotAlias);
   1517     genRegImmCheck(cUnit, kMipsCondNe, rTemp, 0, mir->offset, NULL);
   1518 }
   1519 
   1520 /*
   1521  * The following are the first-level codegen routines that analyze the format
   1522  * of each bytecode then either dispatch special purpose codegen routines
   1523  * or produce corresponding Thumb instructions directly.
   1524  */
   1525 
   1526 static bool handleFmt10t_Fmt20t_Fmt30t(CompilationUnit *cUnit, MIR *mir,
   1527                                        BasicBlock *bb, MipsLIR *labelList)
   1528 {
   1529     /* backward branch? */
   1530     bool backwardBranch = (bb->taken->startOffset <= mir->offset);
   1531 
   1532     if (backwardBranch &&
   1533         (gDvmJit.genSuspendPoll || cUnit->jitMode == kJitLoop)) {
   1534         genSuspendPoll(cUnit, mir);
   1535     }
   1536 
   1537     int numPredecessors = dvmCountSetBits(bb->taken->predecessors);
   1538     /*
   1539      * Things could be hoisted out of the taken block into the predecessor, so
   1540      * make sure it is dominated by the predecessor.
   1541      */
   1542     if (numPredecessors == 1 && bb->taken->visited == false &&
   1543         bb->taken->blockType == kDalvikByteCode) {
   1544         cUnit->nextCodegenBlock = bb->taken;
   1545     } else {
   1546         /* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */
   1547         genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
   1548     }
   1549     return false;
   1550 }
   1551 
   1552 static bool handleFmt10x(CompilationUnit *cUnit, MIR *mir)
   1553 {
   1554     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   1555     if ((dalvikOpcode >= OP_UNUSED_3E) && (dalvikOpcode <= OP_UNUSED_43)) {
   1556         ALOGE("Codegen: got unused opcode %#x",dalvikOpcode);
   1557         return true;
   1558     }
   1559     switch (dalvikOpcode) {
   1560         case OP_RETURN_VOID_BARRIER:
   1561             dvmCompilerGenMemBarrier(cUnit, 0);
   1562             // Intentional fallthrough
   1563         case OP_RETURN_VOID:
   1564             genReturnCommon(cUnit,mir);
   1565             break;
   1566         case OP_UNUSED_73:
   1567         case OP_UNUSED_79:
   1568         case OP_UNUSED_7A:
   1569         case OP_UNUSED_FF:
   1570             ALOGE("Codegen: got unused opcode %#x",dalvikOpcode);
   1571             return true;
   1572         case OP_NOP:
   1573             break;
   1574         default:
   1575             return true;
   1576     }
   1577     return false;
   1578 }
   1579 
   1580 static bool handleFmt11n_Fmt31i(CompilationUnit *cUnit, MIR *mir)
   1581 {
   1582     RegLocation rlDest;
   1583     RegLocation rlResult;
   1584     if (mir->ssaRep->numDefs == 2) {
   1585         rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
   1586     } else {
   1587         rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   1588     }
   1589 
   1590     switch (mir->dalvikInsn.opcode) {
   1591         case OP_CONST:
   1592         case OP_CONST_4: {
   1593             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
   1594             loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
   1595             storeValue(cUnit, rlDest, rlResult);
   1596             break;
   1597         }
   1598         case OP_CONST_WIDE_32: {
   1599             //TUNING: single routine to load constant pair for support doubles
   1600             //TUNING: load 0/-1 separately to avoid load dependency
   1601             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   1602             loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
   1603             opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
   1604                         rlResult.lowReg, 31);
   1605             storeValueWide(cUnit, rlDest, rlResult);
   1606             break;
   1607         }
   1608         default:
   1609             return true;
   1610     }
   1611     return false;
   1612 }
   1613 
   1614 static bool handleFmt21h(CompilationUnit *cUnit, MIR *mir)
   1615 {
   1616     RegLocation rlDest;
   1617     RegLocation rlResult;
   1618     if (mir->ssaRep->numDefs == 2) {
   1619         rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
   1620     } else {
   1621         rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   1622     }
   1623     rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
   1624 
   1625     switch (mir->dalvikInsn.opcode) {
   1626         case OP_CONST_HIGH16: {
   1627             loadConstantNoClobber(cUnit, rlResult.lowReg,
   1628                                   mir->dalvikInsn.vB << 16);
   1629             storeValue(cUnit, rlDest, rlResult);
   1630             break;
   1631         }
   1632         case OP_CONST_WIDE_HIGH16: {
   1633             loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
   1634                                   0, mir->dalvikInsn.vB << 16);
   1635             storeValueWide(cUnit, rlDest, rlResult);
   1636             break;
   1637         }
   1638         default:
   1639             return true;
   1640     }
   1641     return false;
   1642 }
   1643 
   1644 static bool handleFmt20bc(CompilationUnit *cUnit, MIR *mir)
   1645 {
   1646     /* For OP_THROW_VERIFICATION_ERROR */
   1647     genInterpSingleStep(cUnit, mir);
   1648     return false;
   1649 }
   1650 
   1651 static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
   1652 {
   1653     RegLocation rlResult;
   1654     RegLocation rlDest;
   1655     RegLocation rlSrc;
   1656 
   1657     switch (mir->dalvikInsn.opcode) {
   1658         case OP_CONST_STRING_JUMBO:
   1659         case OP_CONST_STRING: {
   1660             void *strPtr = (void*)
   1661               (cUnit->method->clazz->pDvmDex->pResStrings[mir->dalvikInsn.vB]);
   1662 
   1663             if (strPtr == NULL) {
   1664                 BAIL_LOOP_COMPILATION();
   1665                 ALOGE("Unexpected null string");
   1666                 dvmAbort();
   1667             }
   1668 
   1669             rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   1670             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   1671             loadConstantNoClobber(cUnit, rlResult.lowReg, (int) strPtr );
   1672             storeValue(cUnit, rlDest, rlResult);
   1673             break;
   1674         }
   1675         case OP_CONST_CLASS: {
   1676             void *classPtr = (void*)
   1677               (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
   1678 
   1679             if (classPtr == NULL) {
   1680                 BAIL_LOOP_COMPILATION();
   1681                 ALOGE("Unexpected null class");
   1682                 dvmAbort();
   1683             }
   1684 
   1685             rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   1686             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   1687             loadConstantNoClobber(cUnit, rlResult.lowReg, (int) classPtr );
   1688             storeValue(cUnit, rlDest, rlResult);
   1689             break;
   1690         }
   1691         case OP_SGET:
   1692         case OP_SGET_VOLATILE:
   1693         case OP_SGET_OBJECT:
   1694         case OP_SGET_OBJECT_VOLATILE:
   1695         case OP_SGET_BOOLEAN:
   1696         case OP_SGET_CHAR:
   1697         case OP_SGET_BYTE:
   1698         case OP_SGET_SHORT: {
   1699             int valOffset = OFFSETOF_MEMBER(StaticField, value);
   1700             int tReg = dvmCompilerAllocTemp(cUnit);
   1701             bool isVolatile;
   1702             const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
   1703                 mir->meta.calleeMethod : cUnit->method;
   1704             void *fieldPtr = (void*)
   1705               (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
   1706 
   1707             if (fieldPtr == NULL) {
   1708                 BAIL_LOOP_COMPILATION();
   1709                 ALOGE("Unexpected null static field");
   1710                 dvmAbort();
   1711             }
   1712 
   1713             /*
   1714              * On SMP systems, Dalvik opcodes found to be referencing
   1715              * volatile fields are rewritten to their _VOLATILE variant.
   1716              * However, this does not happen on non-SMP systems. The JIT
   1717              * still needs to know about volatility to avoid unsafe
   1718              * optimizations so we determine volatility based on either
   1719              * the opcode or the field access flags.
   1720              */
   1721 #if ANDROID_SMP != 0
   1722             Opcode opcode = mir->dalvikInsn.opcode;
   1723             isVolatile = (opcode == OP_SGET_VOLATILE) ||
   1724                          (opcode == OP_SGET_OBJECT_VOLATILE);
   1725             assert(isVolatile == dvmIsVolatileField((Field *) fieldPtr));
   1726 #else
   1727             isVolatile = dvmIsVolatileField((Field *) fieldPtr);
   1728 #endif
   1729 
   1730             rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   1731             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
   1732             loadConstant(cUnit, tReg,  (int) fieldPtr + valOffset);
   1733 
   1734             if (isVolatile) {
   1735                 dvmCompilerGenMemBarrier(cUnit, 0);
   1736             }
   1737             HEAP_ACCESS_SHADOW(true);
   1738             loadWordDisp(cUnit, tReg, 0, rlResult.lowReg);
   1739             HEAP_ACCESS_SHADOW(false);
   1740 
   1741             storeValue(cUnit, rlDest, rlResult);
   1742             break;
   1743         }
   1744         case OP_SGET_WIDE: {
   1745             int valOffset = OFFSETOF_MEMBER(StaticField, value);
   1746             const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
   1747                 mir->meta.calleeMethod : cUnit->method;
   1748             void *fieldPtr = (void*)
   1749               (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
   1750 
   1751             if (fieldPtr == NULL) {
   1752                 BAIL_LOOP_COMPILATION();
   1753                 ALOGE("Unexpected null static field");
   1754                 dvmAbort();
   1755             }
   1756 
   1757             int tReg = dvmCompilerAllocTemp(cUnit);
   1758             rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
   1759             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
   1760             loadConstant(cUnit, tReg,  (int) fieldPtr + valOffset);
   1761 
   1762             HEAP_ACCESS_SHADOW(true);
   1763             loadPair(cUnit, tReg, rlResult.lowReg, rlResult.highReg);
   1764             HEAP_ACCESS_SHADOW(false);
   1765 
   1766             storeValueWide(cUnit, rlDest, rlResult);
   1767             break;
   1768         }
   1769         case OP_SPUT:
   1770         case OP_SPUT_VOLATILE:
   1771         case OP_SPUT_OBJECT:
   1772         case OP_SPUT_OBJECT_VOLATILE:
   1773         case OP_SPUT_BOOLEAN:
   1774         case OP_SPUT_CHAR:
   1775         case OP_SPUT_BYTE:
   1776         case OP_SPUT_SHORT: {
   1777             int valOffset = OFFSETOF_MEMBER(StaticField, value);
   1778             int tReg = dvmCompilerAllocTemp(cUnit);
   1779             int objHead = 0;
   1780             bool isVolatile;
   1781             bool isSputObject;
   1782             const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
   1783                 mir->meta.calleeMethod : cUnit->method;
   1784             void *fieldPtr = (void*)
   1785               (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
   1786             Opcode opcode = mir->dalvikInsn.opcode;
   1787 
   1788             if (fieldPtr == NULL) {
   1789                 BAIL_LOOP_COMPILATION();
   1790                 ALOGE("Unexpected null static field");
   1791                 dvmAbort();
   1792             }
   1793 
   1794 #if ANDROID_SMP != 0
   1795             isVolatile = (opcode == OP_SPUT_VOLATILE) ||
   1796                          (opcode == OP_SPUT_OBJECT_VOLATILE);
   1797             assert(isVolatile == dvmIsVolatileField((Field *) fieldPtr));
   1798 #else
   1799             isVolatile = dvmIsVolatileField((Field *) fieldPtr);
   1800 #endif
   1801 
   1802             isSputObject = (opcode == OP_SPUT_OBJECT) ||
   1803                            (opcode == OP_SPUT_OBJECT_VOLATILE);
   1804 
   1805             rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   1806             rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
   1807             loadConstant(cUnit, tReg,  (int) fieldPtr);
   1808             if (isSputObject) {
   1809                 objHead = dvmCompilerAllocTemp(cUnit);
   1810                 loadWordDisp(cUnit, tReg, OFFSETOF_MEMBER(Field, clazz), objHead);
   1811             }
   1812             if (isVolatile) {
   1813                 dvmCompilerGenMemBarrier(cUnit, 0);
   1814             }
   1815             HEAP_ACCESS_SHADOW(true);
   1816             storeWordDisp(cUnit, tReg, valOffset ,rlSrc.lowReg);
   1817             dvmCompilerFreeTemp(cUnit, tReg);
   1818             HEAP_ACCESS_SHADOW(false);
   1819             if (isVolatile) {
   1820                 dvmCompilerGenMemBarrier(cUnit, 0);
   1821             }
   1822             if (isSputObject) {
   1823                 /* NOTE: marking card based sfield->clazz */
   1824                 markCard(cUnit, rlSrc.lowReg, objHead);
   1825                 dvmCompilerFreeTemp(cUnit, objHead);
   1826             }
   1827 
   1828             break;
   1829         }
   1830         case OP_SPUT_WIDE: {
   1831             int tReg = dvmCompilerAllocTemp(cUnit);
   1832             int valOffset = OFFSETOF_MEMBER(StaticField, value);
   1833             const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
   1834                 mir->meta.calleeMethod : cUnit->method;
   1835             void *fieldPtr = (void*)
   1836               (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
   1837 
   1838             if (fieldPtr == NULL) {
   1839                 BAIL_LOOP_COMPILATION();
   1840                 ALOGE("Unexpected null static field");
   1841                 dvmAbort();
   1842             }
   1843 
   1844             rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
   1845             rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
   1846             loadConstant(cUnit, tReg,  (int) fieldPtr + valOffset);
   1847 
   1848             HEAP_ACCESS_SHADOW(true);
   1849             storePair(cUnit, tReg, rlSrc.lowReg, rlSrc.highReg);
   1850             HEAP_ACCESS_SHADOW(false);
   1851             break;
   1852         }
   1853         case OP_NEW_INSTANCE: {
   1854             /*
   1855              * Obey the calling convention and don't mess with the register
   1856              * usage.
   1857              */
   1858             ClassObject *classPtr = (ClassObject *)
   1859               (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
   1860 
   1861             if (classPtr == NULL) {
   1862                 BAIL_LOOP_COMPILATION();
   1863                 ALOGE("Unexpected null class");
   1864                 dvmAbort();
   1865             }
   1866 
   1867             /*
   1868              * If it is going to throw, it should not make to the trace to begin
   1869              * with.  However, Alloc might throw, so we need to genExportPC()
   1870              */
   1871             assert((classPtr->accessFlags & (ACC_INTERFACE|ACC_ABSTRACT)) == 0);
   1872             dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
   1873             genExportPC(cUnit, mir);
   1874             LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmAllocObject);
   1875             loadConstant(cUnit, r_A0, (int) classPtr);
   1876             loadConstant(cUnit, r_A1, ALLOC_DONT_TRACK);
   1877             opReg(cUnit, kOpBlx, r_T9);
   1878             newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   1879             dvmCompilerClobberCallRegs(cUnit);
   1880             /* generate a branch over if allocation is successful */
   1881             MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
   1882 
   1883             /*
   1884              * OOM exception needs to be thrown here and cannot re-execute
   1885              */
   1886             loadConstant(cUnit, r_A0,
   1887                          (int) (cUnit->method->insns + mir->offset));
   1888             genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
   1889             /* noreturn */
   1890 
   1891             MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
   1892             target->defMask = ENCODE_ALL;
   1893             branchOver->generic.target = (LIR *) target;
   1894             rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   1895             rlResult = dvmCompilerGetReturn(cUnit);
   1896             storeValue(cUnit, rlDest, rlResult);
   1897             break;
   1898         }
   1899         case OP_CHECK_CAST: {
   1900             /*
   1901              * Obey the calling convention and don't mess with the register
   1902              * usage.
   1903              */
   1904             ClassObject *classPtr =
   1905               (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
   1906             /*
   1907              * Note: It is possible that classPtr is NULL at this point,
   1908              * even though this instruction has been successfully interpreted.
   1909              * If the previous interpretation had a null source, the
   1910              * interpreter would not have bothered to resolve the clazz.
   1911              * Bail out to the interpreter in this case, and log it
   1912              * so that we can tell if it happens frequently.
   1913              */
   1914             if (classPtr == NULL) {
   1915                 BAIL_LOOP_COMPILATION();
   1916                 LOGVV("null clazz in OP_CHECK_CAST, single-stepping");
   1917                 genInterpSingleStep(cUnit, mir);
   1918                 return false;
   1919             }
   1920             dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
   1921             loadConstant(cUnit, r_A1, (int) classPtr );
   1922             rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   1923             rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   1924             MipsLIR *branch1 = opCompareBranch(cUnit, kMipsBeqz, rlSrc.lowReg, -1);
   1925             /*
   1926              *  rlSrc.lowReg now contains object->clazz.  Note that
   1927              *  it could have been allocated r_A0, but we're okay so long
   1928              *  as we don't do anything desctructive until r_A0 is loaded
   1929              *  with clazz.
   1930              */
   1931             /* r_A0 now contains object->clazz */
   1932             loadWordDisp(cUnit, rlSrc.lowReg, offsetof(Object, clazz), r_A0);
   1933             LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmInstanceofNonTrivial);
   1934             MipsLIR *branch2 = opCompareBranch(cUnit, kMipsBeq, r_A0, r_A1);
   1935             opReg(cUnit, kOpBlx, r_T9);
   1936             newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   1937             dvmCompilerClobberCallRegs(cUnit);
   1938             /*
   1939              * If null, check cast failed - punt to the interpreter.  Because
   1940              * interpreter will be the one throwing, we don't need to
   1941              * genExportPC() here.
   1942              */
   1943             genRegCopy(cUnit, r_A0, r_V0);
   1944             genZeroCheck(cUnit, r_V0, mir->offset, NULL);
   1945             /* check cast passed - branch target here */
   1946             MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
   1947             target->defMask = ENCODE_ALL;
   1948             branch1->generic.target = (LIR *)target;
   1949             branch2->generic.target = (LIR *)target;
   1950             break;
   1951         }
   1952         case OP_SGET_WIDE_VOLATILE:
   1953         case OP_SPUT_WIDE_VOLATILE:
   1954             genInterpSingleStep(cUnit, mir);
   1955             break;
   1956         default:
   1957             return true;
   1958     }
   1959     return false;
   1960 }
   1961 
   1962 static bool handleFmt11x(CompilationUnit *cUnit, MIR *mir)
   1963 {
   1964     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   1965     RegLocation rlResult;
   1966     switch (dalvikOpcode) {
   1967         case OP_MOVE_EXCEPTION: {
   1968             int exOffset = offsetof(Thread, exception);
   1969             int resetReg = dvmCompilerAllocTemp(cUnit);
   1970             RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   1971             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   1972             loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
   1973             loadConstant(cUnit, resetReg, 0);
   1974             storeWordDisp(cUnit, rSELF, exOffset, resetReg);
   1975             storeValue(cUnit, rlDest, rlResult);
   1976            break;
   1977         }
   1978         case OP_MOVE_RESULT:
   1979         case OP_MOVE_RESULT_OBJECT: {
   1980             /* An inlined move result is effectively no-op */
   1981             if (mir->OptimizationFlags & MIR_INLINED)
   1982                 break;
   1983             RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   1984             RegLocation rlSrc = LOC_DALVIK_RETURN_VAL;
   1985             rlSrc.fp = rlDest.fp;
   1986             storeValue(cUnit, rlDest, rlSrc);
   1987             break;
   1988         }
   1989         case OP_MOVE_RESULT_WIDE: {
   1990             /* An inlined move result is effectively no-op */
   1991             if (mir->OptimizationFlags & MIR_INLINED)
   1992                 break;
   1993             RegLocation rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
   1994             RegLocation rlSrc = LOC_DALVIK_RETURN_VAL_WIDE;
   1995             rlSrc.fp = rlDest.fp;
   1996             storeValueWide(cUnit, rlDest, rlSrc);
   1997             break;
   1998         }
   1999         case OP_RETURN_WIDE: {
   2000             RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
   2001             RegLocation rlDest = LOC_DALVIK_RETURN_VAL_WIDE;
   2002             rlDest.fp = rlSrc.fp;
   2003             storeValueWide(cUnit, rlDest, rlSrc);
   2004             genReturnCommon(cUnit,mir);
   2005             break;
   2006         }
   2007         case OP_RETURN:
   2008         case OP_RETURN_OBJECT: {
   2009             RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   2010             RegLocation rlDest = LOC_DALVIK_RETURN_VAL;
   2011             rlDest.fp = rlSrc.fp;
   2012             storeValue(cUnit, rlDest, rlSrc);
   2013             genReturnCommon(cUnit, mir);
   2014             break;
   2015         }
   2016         case OP_MONITOR_EXIT:
   2017         case OP_MONITOR_ENTER:
   2018             genMonitor(cUnit, mir);
   2019             break;
   2020         case OP_THROW:
   2021             genInterpSingleStep(cUnit, mir);
   2022             break;
   2023         default:
   2024             return true;
   2025     }
   2026     return false;
   2027 }
   2028 
   2029 static bool handleFmt12x(CompilationUnit *cUnit, MIR *mir)
   2030 {
   2031     Opcode opcode = mir->dalvikInsn.opcode;
   2032     RegLocation rlDest;
   2033     RegLocation rlSrc;
   2034     RegLocation rlResult;
   2035 
   2036     if ( (opcode >= OP_ADD_INT_2ADDR) && (opcode <= OP_REM_DOUBLE_2ADDR)) {
   2037         return genArithOp( cUnit, mir );
   2038     }
   2039 
   2040     if (mir->ssaRep->numUses == 2)
   2041         rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
   2042     else
   2043         rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   2044     if (mir->ssaRep->numDefs == 2)
   2045         rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
   2046     else
   2047         rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   2048 
   2049     switch (opcode) {
   2050         case OP_DOUBLE_TO_INT:
   2051         case OP_INT_TO_FLOAT:
   2052         case OP_FLOAT_TO_INT:
   2053         case OP_DOUBLE_TO_FLOAT:
   2054         case OP_FLOAT_TO_DOUBLE:
   2055         case OP_INT_TO_DOUBLE:
   2056         case OP_FLOAT_TO_LONG:
   2057         case OP_LONG_TO_FLOAT:
   2058         case OP_DOUBLE_TO_LONG:
   2059         case OP_LONG_TO_DOUBLE:
   2060             return genConversion(cUnit, mir);
   2061         case OP_NEG_INT:
   2062         case OP_NOT_INT:
   2063             return genArithOpInt(cUnit, mir, rlDest, rlSrc, rlSrc);
   2064         case OP_NEG_LONG:
   2065         case OP_NOT_LONG:
   2066             return genArithOpLong(cUnit, mir, rlDest, rlSrc, rlSrc);
   2067         case OP_NEG_FLOAT:
   2068             return genArithOpFloat(cUnit, mir, rlDest, rlSrc, rlSrc);
   2069         case OP_NEG_DOUBLE:
   2070             return genArithOpDouble(cUnit, mir, rlDest, rlSrc, rlSrc);
   2071         case OP_MOVE_WIDE:
   2072             storeValueWide(cUnit, rlDest, rlSrc);
   2073             break;
   2074         case OP_INT_TO_LONG:
   2075             rlSrc = dvmCompilerUpdateLoc(cUnit, rlSrc);
   2076             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2077             //TUNING: shouldn't loadValueDirect already check for phys reg?
   2078             if (rlSrc.location == kLocPhysReg) {
   2079                 genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
   2080             } else {
   2081                 loadValueDirect(cUnit, rlSrc, rlResult.lowReg);
   2082             }
   2083             opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
   2084                         rlResult.lowReg, 31);
   2085             storeValueWide(cUnit, rlDest, rlResult);
   2086             break;
   2087         case OP_LONG_TO_INT:
   2088             rlSrc = dvmCompilerUpdateLocWide(cUnit, rlSrc);
   2089             rlSrc = dvmCompilerWideToNarrow(cUnit, rlSrc);
   2090             // Intentional fallthrough
   2091         case OP_MOVE:
   2092         case OP_MOVE_OBJECT:
   2093             storeValue(cUnit, rlDest, rlSrc);
   2094             break;
   2095         case OP_INT_TO_BYTE:
   2096             rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2097             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2098             opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc.lowReg);
   2099             storeValue(cUnit, rlDest, rlResult);
   2100             break;
   2101         case OP_INT_TO_SHORT:
   2102             rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2103             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2104             opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc.lowReg);
   2105             storeValue(cUnit, rlDest, rlResult);
   2106             break;
   2107         case OP_INT_TO_CHAR:
   2108             rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2109             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2110             opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc.lowReg);
   2111             storeValue(cUnit, rlDest, rlResult);
   2112             break;
   2113         case OP_ARRAY_LENGTH: {
   2114             int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
   2115             rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2116             genNullCheck(cUnit, rlSrc.sRegLow, rlSrc.lowReg,
   2117                          mir->offset, NULL);
   2118             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2119             loadWordDisp(cUnit, rlSrc.lowReg, lenOffset,
   2120                          rlResult.lowReg);
   2121             storeValue(cUnit, rlDest, rlResult);
   2122             break;
   2123         }
   2124         default:
   2125             return true;
   2126     }
   2127     return false;
   2128 }
   2129 
   2130 static bool handleFmt21s(CompilationUnit *cUnit, MIR *mir)
   2131 {
   2132     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   2133     RegLocation rlDest;
   2134     RegLocation rlResult;
   2135     int BBBB = mir->dalvikInsn.vB;
   2136     if (dalvikOpcode == OP_CONST_WIDE_16) {
   2137         rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
   2138         rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2139         loadConstantNoClobber(cUnit, rlResult.lowReg, BBBB);
   2140         //TUNING: do high separately to avoid load dependency
   2141         opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
   2142         storeValueWide(cUnit, rlDest, rlResult);
   2143     } else if (dalvikOpcode == OP_CONST_16) {
   2144         rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   2145         rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
   2146         loadConstantNoClobber(cUnit, rlResult.lowReg, BBBB);
   2147         storeValue(cUnit, rlDest, rlResult);
   2148     } else
   2149         return true;
   2150     return false;
   2151 }
   2152 
   2153 /* Compare agaist zero */
   2154 static bool handleFmt21t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
   2155                          MipsLIR *labelList)
   2156 {
   2157     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   2158     MipsOpCode opc = kMipsNop;
   2159     int rt = -1;
   2160     /* backward branch? */
   2161     bool backwardBranch = (bb->taken->startOffset <= mir->offset);
   2162 
   2163     if (backwardBranch &&
   2164         (gDvmJit.genSuspendPoll || cUnit->jitMode == kJitLoop)) {
   2165         genSuspendPoll(cUnit, mir);
   2166     }
   2167 
   2168     RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   2169     rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2170 
   2171     switch (dalvikOpcode) {
   2172         case OP_IF_EQZ:
   2173             opc = kMipsBeqz;
   2174             break;
   2175         case OP_IF_NEZ:
   2176             opc = kMipsBne;
   2177             rt = r_ZERO;
   2178             break;
   2179         case OP_IF_LTZ:
   2180             opc = kMipsBltz;
   2181             break;
   2182         case OP_IF_GEZ:
   2183             opc = kMipsBgez;
   2184             break;
   2185         case OP_IF_GTZ:
   2186             opc = kMipsBgtz;
   2187             break;
   2188         case OP_IF_LEZ:
   2189             opc = kMipsBlez;
   2190             break;
   2191         default:
   2192             ALOGE("Unexpected opcode (%d) for Fmt21t", dalvikOpcode);
   2193             dvmCompilerAbort(cUnit);
   2194     }
   2195     genConditionalBranchMips(cUnit, opc, rlSrc.lowReg, rt, &labelList[bb->taken->id]);
   2196     /* This mostly likely will be optimized away in a later phase */
   2197     genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
   2198     return false;
   2199 }
   2200 
   2201 static bool isPowerOfTwo(int x)
   2202 {
   2203     return (x & (x - 1)) == 0;
   2204 }
   2205 
   2206 // Returns true if no more than two bits are set in 'x'.
   2207 static bool isPopCountLE2(unsigned int x)
   2208 {
   2209     x &= x - 1;
   2210     return (x & (x - 1)) == 0;
   2211 }
   2212 
   2213 // Returns the index of the lowest set bit in 'x'.
   2214 static int lowestSetBit(unsigned int x) {
   2215     int bit_posn = 0;
   2216     while ((x & 0xf) == 0) {
   2217         bit_posn += 4;
   2218         x >>= 4;
   2219     }
   2220     while ((x & 1) == 0) {
   2221         bit_posn++;
   2222         x >>= 1;
   2223     }
   2224     return bit_posn;
   2225 }
   2226 
   2227 // Returns true if it added instructions to 'cUnit' to divide 'rlSrc' by 'lit'
   2228 // and store the result in 'rlDest'.
   2229 static bool handleEasyDivide(CompilationUnit *cUnit, Opcode dalvikOpcode,
   2230                              RegLocation rlSrc, RegLocation rlDest, int lit)
   2231 {
   2232     if (lit < 2 || !isPowerOfTwo(lit)) {
   2233         return false;
   2234     }
   2235     int k = lowestSetBit(lit);
   2236     if (k >= 30) {
   2237         // Avoid special cases.
   2238         return false;
   2239     }
   2240     bool div = (dalvikOpcode == OP_DIV_INT_LIT8 || dalvikOpcode == OP_DIV_INT_LIT16);
   2241     rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2242     RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2243     if (div) {
   2244         int tReg = dvmCompilerAllocTemp(cUnit);
   2245         if (lit == 2) {
   2246             // Division by 2 is by far the most common division by constant.
   2247             opRegRegImm(cUnit, kOpLsr, tReg, rlSrc.lowReg, 32 - k);
   2248             opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
   2249             opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
   2250         } else {
   2251             opRegRegImm(cUnit, kOpAsr, tReg, rlSrc.lowReg, 31);
   2252             opRegRegImm(cUnit, kOpLsr, tReg, tReg, 32 - k);
   2253             opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
   2254             opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
   2255         }
   2256     } else {
   2257         int cReg = dvmCompilerAllocTemp(cUnit);
   2258         loadConstant(cUnit, cReg, lit - 1);
   2259         int tReg1 = dvmCompilerAllocTemp(cUnit);
   2260         int tReg2 = dvmCompilerAllocTemp(cUnit);
   2261         if (lit == 2) {
   2262             opRegRegImm(cUnit, kOpLsr, tReg1, rlSrc.lowReg, 32 - k);
   2263             opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
   2264             opRegRegReg(cUnit, kOpAnd, tReg2, tReg2, cReg);
   2265             opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
   2266         } else {
   2267             opRegRegImm(cUnit, kOpAsr, tReg1, rlSrc.lowReg, 31);
   2268             opRegRegImm(cUnit, kOpLsr, tReg1, tReg1, 32 - k);
   2269             opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
   2270             opRegRegReg(cUnit, kOpAnd, tReg2, tReg2, cReg);
   2271             opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
   2272         }
   2273     }
   2274     storeValue(cUnit, rlDest, rlResult);
   2275     return true;
   2276 }
   2277 
   2278 // Returns true if it added instructions to 'cUnit' to multiply 'rlSrc' by 'lit'
   2279 // and store the result in 'rlDest'.
   2280 static bool handleEasyMultiply(CompilationUnit *cUnit,
   2281                                RegLocation rlSrc, RegLocation rlDest, int lit)
   2282 {
   2283     // Can we simplify this multiplication?
   2284     bool powerOfTwo = false;
   2285     bool popCountLE2 = false;
   2286     bool powerOfTwoMinusOne = false;
   2287     if (lit < 2) {
   2288         // Avoid special cases.
   2289         return false;
   2290     } else if (isPowerOfTwo(lit)) {
   2291         powerOfTwo = true;
   2292     } else if (isPopCountLE2(lit)) {
   2293         popCountLE2 = true;
   2294     } else if (isPowerOfTwo(lit + 1)) {
   2295         powerOfTwoMinusOne = true;
   2296     } else {
   2297         return false;
   2298     }
   2299     rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2300     RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2301     if (powerOfTwo) {
   2302         // Shift.
   2303         opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlSrc.lowReg,
   2304                     lowestSetBit(lit));
   2305     } else if (popCountLE2) {
   2306         // Shift and add and shift.
   2307         int firstBit = lowestSetBit(lit);
   2308         int secondBit = lowestSetBit(lit ^ (1 << firstBit));
   2309         genMultiplyByTwoBitMultiplier(cUnit, rlSrc, rlResult, lit,
   2310                                       firstBit, secondBit);
   2311     } else {
   2312         // Reverse subtract: (src << (shift + 1)) - src.
   2313         assert(powerOfTwoMinusOne);
   2314         // TODO: rsb dst, src, src lsl#lowestSetBit(lit + 1)
   2315         int tReg = dvmCompilerAllocTemp(cUnit);
   2316         opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lowestSetBit(lit + 1));
   2317         opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
   2318     }
   2319     storeValue(cUnit, rlDest, rlResult);
   2320     return true;
   2321 }
   2322 
   2323 static bool handleFmt22b_Fmt22s(CompilationUnit *cUnit, MIR *mir)
   2324 {
   2325     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   2326     RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   2327     RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   2328     RegLocation rlResult;
   2329     int lit = mir->dalvikInsn.vC;
   2330     OpKind op = (OpKind)0;      /* Make gcc happy */
   2331     int shiftOp = false;
   2332 
   2333     switch (dalvikOpcode) {
   2334         case OP_RSUB_INT_LIT8:
   2335         case OP_RSUB_INT: {
   2336             int tReg;
   2337             //TUNING: add support for use of Arm rsub op
   2338             rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2339             tReg = dvmCompilerAllocTemp(cUnit);
   2340             loadConstant(cUnit, tReg, lit);
   2341             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2342             opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
   2343                         tReg, rlSrc.lowReg);
   2344             storeValue(cUnit, rlDest, rlResult);
   2345             return false;
   2346             break;
   2347         }
   2348 
   2349         case OP_ADD_INT_LIT8:
   2350         case OP_ADD_INT_LIT16:
   2351             op = kOpAdd;
   2352             break;
   2353         case OP_MUL_INT_LIT8:
   2354         case OP_MUL_INT_LIT16: {
   2355             if (handleEasyMultiply(cUnit, rlSrc, rlDest, lit)) {
   2356                 return false;
   2357             }
   2358             op = kOpMul;
   2359             break;
   2360         }
   2361         case OP_AND_INT_LIT8:
   2362         case OP_AND_INT_LIT16:
   2363             op = kOpAnd;
   2364             break;
   2365         case OP_OR_INT_LIT8:
   2366         case OP_OR_INT_LIT16:
   2367             op = kOpOr;
   2368             break;
   2369         case OP_XOR_INT_LIT8:
   2370         case OP_XOR_INT_LIT16:
   2371             op = kOpXor;
   2372             break;
   2373         case OP_SHL_INT_LIT8:
   2374             lit &= 31;
   2375             shiftOp = true;
   2376             op = kOpLsl;
   2377             break;
   2378         case OP_SHR_INT_LIT8:
   2379             lit &= 31;
   2380             shiftOp = true;
   2381             op = kOpAsr;
   2382             break;
   2383         case OP_USHR_INT_LIT8:
   2384             lit &= 31;
   2385             shiftOp = true;
   2386             op = kOpLsr;
   2387             break;
   2388 
   2389         case OP_DIV_INT_LIT8:
   2390         case OP_DIV_INT_LIT16:
   2391         case OP_REM_INT_LIT8:
   2392         case OP_REM_INT_LIT16: {
   2393             if (lit == 0) {
   2394                 /* Let the interpreter deal with div by 0 */
   2395                 genInterpSingleStep(cUnit, mir);
   2396                 return false;
   2397             }
   2398             if (handleEasyDivide(cUnit, dalvikOpcode, rlSrc, rlDest, lit)) {
   2399                 return false;
   2400             }
   2401 
   2402             MipsOpCode opc;
   2403             int divReg;
   2404 
   2405             if ((dalvikOpcode == OP_DIV_INT_LIT8) ||
   2406                 (dalvikOpcode == OP_DIV_INT_LIT16)) {
   2407                 opc = kMipsMflo;
   2408                 divReg = r_LO;
   2409             } else {
   2410                 opc = kMipsMfhi;
   2411                 divReg = r_HI;
   2412             }
   2413 
   2414             rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2415             int tReg = dvmCompilerAllocTemp(cUnit);
   2416             newLIR3(cUnit, kMipsAddiu, tReg, r_ZERO, lit);
   2417             newLIR4(cUnit, kMipsDiv, r_HI, r_LO, rlSrc.lowReg, tReg);
   2418             rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2419             newLIR2(cUnit, opc, rlResult.lowReg, divReg);
   2420             dvmCompilerFreeTemp(cUnit, tReg);
   2421             storeValue(cUnit, rlDest, rlResult);
   2422             return false;
   2423             break;
   2424         }
   2425         default:
   2426             return true;
   2427     }
   2428     rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   2429     rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   2430     // Avoid shifts by literal 0 - no support in Thumb.  Change to copy
   2431     if (shiftOp && (lit == 0)) {
   2432         genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
   2433     } else {
   2434         opRegRegImm(cUnit, op, rlResult.lowReg, rlSrc.lowReg, lit);
   2435     }
   2436     storeValue(cUnit, rlDest, rlResult);
   2437     return false;
   2438 }
   2439 
   2440 static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
   2441 {
   2442     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   2443     int fieldOffset = -1;
   2444     bool isVolatile = false;
   2445     switch (dalvikOpcode) {
   2446         /*
   2447          * Wide volatiles currently handled via single step.
   2448          * Add them here if generating in-line code.
   2449          *     case OP_IGET_WIDE_VOLATILE:
   2450          *     case OP_IPUT_WIDE_VOLATILE:
   2451          */
   2452         case OP_IGET_VOLATILE:
   2453         case OP_IGET_OBJECT_VOLATILE:
   2454         case OP_IPUT_VOLATILE:
   2455         case OP_IPUT_OBJECT_VOLATILE:
   2456 #if ANDROID_SMP != 0
   2457             isVolatile = true;
   2458         // NOTE: intentional fallthrough
   2459 #endif
   2460         case OP_IGET:
   2461         case OP_IGET_WIDE:
   2462         case OP_IGET_OBJECT:
   2463         case OP_IGET_BOOLEAN:
   2464         case OP_IGET_BYTE:
   2465         case OP_IGET_CHAR:
   2466         case OP_IGET_SHORT:
   2467         case OP_IPUT:
   2468         case OP_IPUT_WIDE:
   2469         case OP_IPUT_OBJECT:
   2470         case OP_IPUT_BOOLEAN:
   2471         case OP_IPUT_BYTE:
   2472         case OP_IPUT_CHAR:
   2473         case OP_IPUT_SHORT: {
   2474             const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
   2475                 mir->meta.calleeMethod : cUnit->method;
   2476             Field *fieldPtr =
   2477                 method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vC];
   2478 
   2479             if (fieldPtr == NULL) {
   2480                 BAIL_LOOP_COMPILATION();
   2481                 ALOGE("Unexpected null instance field");
   2482                 dvmAbort();
   2483             }
   2484 #if ANDROID_SMP != 0
   2485             assert(isVolatile == dvmIsVolatileField((Field *) fieldPtr));
   2486 #else
   2487             isVolatile = dvmIsVolatileField((Field *) fieldPtr);
   2488 #endif
   2489             fieldOffset = ((InstField *)fieldPtr)->byteOffset;
   2490             break;
   2491         }
   2492         default:
   2493             break;
   2494     }
   2495 
   2496     switch (dalvikOpcode) {
   2497         case OP_NEW_ARRAY: {
   2498 #if 0 /* 080 triggers assert in Interp.c:1290 for out of memory exception.
   2499              i think the assert is in error and should be disabled. With
   2500              asserts disabled, 080 passes. */
   2501 genInterpSingleStep(cUnit, mir);
   2502 return false;
   2503 #endif
   2504             // Generates a call - use explicit registers
   2505             RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   2506             RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   2507             RegLocation rlResult;
   2508             void *classPtr = (void*)
   2509               (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vC]);
   2510 
   2511             if (classPtr == NULL) {
   2512                 BAIL_LOOP_COMPILATION();
   2513                 ALOGE("Unexpected null class");
   2514                 dvmAbort();
   2515             }
   2516 
   2517             dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
   2518             genExportPC(cUnit, mir);
   2519             loadValueDirectFixed(cUnit, rlSrc, r_A1);   /* Len */
   2520             loadConstant(cUnit, r_A0, (int) classPtr );
   2521             LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmAllocArrayByClass);
   2522             /*
   2523              * "len < 0": bail to the interpreter to re-execute the
   2524              * instruction
   2525              */
   2526             genRegImmCheck(cUnit, kMipsCondMi, r_A1, 0, mir->offset, NULL);
   2527             loadConstant(cUnit, r_A2, ALLOC_DONT_TRACK);
   2528             opReg(cUnit, kOpBlx, r_T9);
   2529             newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   2530             dvmCompilerClobberCallRegs(cUnit);
   2531             /* generate a branch over if allocation is successful */
   2532             MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
   2533             /*
   2534              * OOM exception needs to be thrown here and cannot re-execute
   2535              */
   2536             loadConstant(cUnit, r_A0,
   2537                          (int) (cUnit->method->insns + mir->offset));
   2538             genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
   2539             /* noreturn */
   2540 
   2541             MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
   2542             target->defMask = ENCODE_ALL;
   2543             branchOver->generic.target = (LIR *) target;
   2544             rlResult = dvmCompilerGetReturn(cUnit);
   2545             storeValue(cUnit, rlDest, rlResult);
   2546             break;
   2547         }
   2548         case OP_INSTANCE_OF: {
   2549             // May generate a call - use explicit registers
   2550             RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   2551             RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   2552             RegLocation rlResult;
   2553             ClassObject *classPtr =
   2554               (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vC]);
   2555             /*
   2556              * Note: It is possible that classPtr is NULL at this point,
   2557              * even though this instruction has been successfully interpreted.
   2558              * If the previous interpretation had a null source, the
   2559              * interpreter would not have bothered to resolve the clazz.
   2560              * Bail out to the interpreter in this case, and log it
   2561              * so that we can tell if it happens frequently.
   2562              */
   2563             if (classPtr == NULL) {
   2564                 BAIL_LOOP_COMPILATION();
   2565                 ALOGD("null clazz in OP_INSTANCE_OF, single-stepping");
   2566                 genInterpSingleStep(cUnit, mir);
   2567                 break;
   2568             }
   2569             dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
   2570             loadValueDirectFixed(cUnit, rlSrc, r_V0);  /* Ref */
   2571             loadConstant(cUnit, r_A2, (int) classPtr );
   2572             /* When taken r_V0 has NULL which can be used for store directly */
   2573             MipsLIR *branch1 = opCompareBranch(cUnit, kMipsBeqz, r_V0, -1);
   2574             /* r_A1 now contains object->clazz */
   2575             loadWordDisp(cUnit, r_V0, offsetof(Object, clazz), r_A1);
   2576             /* r_A1 now contains object->clazz */
   2577             LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmInstanceofNonTrivial);
   2578             loadConstant(cUnit, r_V0, 1);                /* Assume true */
   2579             MipsLIR *branch2 = opCompareBranch(cUnit, kMipsBeq, r_A1, r_A2);
   2580             genRegCopy(cUnit, r_A0, r_A1);
   2581             genRegCopy(cUnit, r_A1, r_A2);
   2582             opReg(cUnit, kOpBlx, r_T9);
   2583             newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   2584             dvmCompilerClobberCallRegs(cUnit);
   2585             /* branch target here */
   2586             MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
   2587             target->defMask = ENCODE_ALL;
   2588             rlResult = dvmCompilerGetReturn(cUnit);
   2589             storeValue(cUnit, rlDest, rlResult);
   2590             branch1->generic.target = (LIR *)target;
   2591             branch2->generic.target = (LIR *)target;
   2592             break;
   2593         }
   2594         case OP_IGET_WIDE:
   2595             genIGetWide(cUnit, mir, fieldOffset);
   2596             break;
   2597         case OP_IGET_VOLATILE:
   2598         case OP_IGET_OBJECT_VOLATILE:
   2599         case OP_IGET:
   2600         case OP_IGET_OBJECT:
   2601         case OP_IGET_BOOLEAN:
   2602         case OP_IGET_BYTE:
   2603         case OP_IGET_CHAR:
   2604         case OP_IGET_SHORT:
   2605             genIGet(cUnit, mir, kWord, fieldOffset, isVolatile);
   2606             break;
   2607         case OP_IPUT_WIDE:
   2608             genIPutWide(cUnit, mir, fieldOffset);
   2609             break;
   2610         case OP_IPUT_VOLATILE:
   2611         case OP_IPUT:
   2612         case OP_IPUT_BOOLEAN:
   2613         case OP_IPUT_BYTE:
   2614         case OP_IPUT_CHAR:
   2615         case OP_IPUT_SHORT:
   2616             genIPut(cUnit, mir, kWord, fieldOffset, false, isVolatile);
   2617             break;
   2618         case OP_IPUT_OBJECT_VOLATILE:
   2619         case OP_IPUT_OBJECT:
   2620             genIPut(cUnit, mir, kWord, fieldOffset, true, isVolatile);
   2621             break;
   2622         case OP_IGET_WIDE_VOLATILE:
   2623         case OP_IPUT_WIDE_VOLATILE:
   2624             genInterpSingleStep(cUnit, mir);
   2625             break;
   2626         default:
   2627             return true;
   2628     }
   2629     return false;
   2630 }
   2631 
   2632 static bool handleFmt22cs(CompilationUnit *cUnit, MIR *mir)
   2633 {
   2634     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   2635     int fieldOffset =  mir->dalvikInsn.vC;
   2636     switch (dalvikOpcode) {
   2637         case OP_IGET_QUICK:
   2638         case OP_IGET_OBJECT_QUICK:
   2639             genIGet(cUnit, mir, kWord, fieldOffset, false);
   2640             break;
   2641         case OP_IPUT_QUICK:
   2642             genIPut(cUnit, mir, kWord, fieldOffset, false, false);
   2643             break;
   2644         case OP_IPUT_OBJECT_QUICK:
   2645             genIPut(cUnit, mir, kWord, fieldOffset, true, false);
   2646             break;
   2647         case OP_IGET_WIDE_QUICK:
   2648             genIGetWide(cUnit, mir, fieldOffset);
   2649             break;
   2650         case OP_IPUT_WIDE_QUICK:
   2651             genIPutWide(cUnit, mir, fieldOffset);
   2652             break;
   2653         default:
   2654             return true;
   2655     }
   2656     return false;
   2657 
   2658 }
   2659 
   2660 /* Compare against zero */
   2661 static bool handleFmt22t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
   2662                          MipsLIR *labelList)
   2663 {
   2664     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   2665     MipsConditionCode cond;
   2666     MipsOpCode opc = kMipsNop;
   2667     MipsLIR * test = NULL;
   2668     /* backward branch? */
   2669     bool backwardBranch = (bb->taken->startOffset <= mir->offset);
   2670 
   2671     if (backwardBranch &&
   2672         (gDvmJit.genSuspendPoll || cUnit->jitMode == kJitLoop)) {
   2673         genSuspendPoll(cUnit, mir);
   2674     }
   2675 
   2676     RegLocation rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 0);
   2677     RegLocation rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 1);
   2678     rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
   2679     rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
   2680     int reg1 = rlSrc1.lowReg;
   2681     int reg2 = rlSrc2.lowReg;
   2682     int tReg;
   2683 
   2684     switch (dalvikOpcode) {
   2685         case OP_IF_EQ:
   2686             opc = kMipsBeq;
   2687             break;
   2688         case OP_IF_NE:
   2689             opc = kMipsBne;
   2690             break;
   2691         case OP_IF_LT:
   2692             opc = kMipsBne;
   2693             tReg = dvmCompilerAllocTemp(cUnit);
   2694             test = newLIR3(cUnit, kMipsSlt, tReg, reg1, reg2);
   2695             reg1 = tReg;
   2696             reg2 = r_ZERO;
   2697             break;
   2698         case OP_IF_LE:
   2699             opc = kMipsBeqz;
   2700             tReg = dvmCompilerAllocTemp(cUnit);
   2701             test = newLIR3(cUnit, kMipsSlt, tReg, reg2, reg1);
   2702             reg1 = tReg;
   2703             reg2 = -1;
   2704             break;
   2705         case OP_IF_GT:
   2706             opc = kMipsBne;
   2707             tReg = dvmCompilerAllocTemp(cUnit);
   2708             test = newLIR3(cUnit, kMipsSlt, tReg, reg2, reg1);
   2709             reg1 = tReg;
   2710             reg2 = r_ZERO;
   2711             break;
   2712         case OP_IF_GE:
   2713             opc = kMipsBeqz;
   2714             tReg = dvmCompilerAllocTemp(cUnit);
   2715             test = newLIR3(cUnit, kMipsSlt, tReg, reg1, reg2);
   2716             reg1 = tReg;
   2717             reg2 = -1;
   2718             break;
   2719         default:
   2720             cond = (MipsConditionCode)0;
   2721             ALOGE("Unexpected opcode (%d) for Fmt22t", dalvikOpcode);
   2722             dvmCompilerAbort(cUnit);
   2723     }
   2724 
   2725     genConditionalBranchMips(cUnit, opc, reg1, reg2, &labelList[bb->taken->id]);
   2726     /* This mostly likely will be optimized away in a later phase */
   2727     genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
   2728     return false;
   2729 }
   2730 
   2731 static bool handleFmt22x_Fmt32x(CompilationUnit *cUnit, MIR *mir)
   2732 {
   2733     Opcode opcode = mir->dalvikInsn.opcode;
   2734 
   2735     switch (opcode) {
   2736         case OP_MOVE_16:
   2737         case OP_MOVE_OBJECT_16:
   2738         case OP_MOVE_FROM16:
   2739         case OP_MOVE_OBJECT_FROM16: {
   2740             storeValue(cUnit, dvmCompilerGetDest(cUnit, mir, 0),
   2741                        dvmCompilerGetSrc(cUnit, mir, 0));
   2742             break;
   2743         }
   2744         case OP_MOVE_WIDE_16:
   2745         case OP_MOVE_WIDE_FROM16: {
   2746             storeValueWide(cUnit, dvmCompilerGetDestWide(cUnit, mir, 0, 1),
   2747                            dvmCompilerGetSrcWide(cUnit, mir, 0, 1));
   2748             break;
   2749         }
   2750         default:
   2751             return true;
   2752     }
   2753     return false;
   2754 }
   2755 
   2756 static bool handleFmt23x(CompilationUnit *cUnit, MIR *mir)
   2757 {
   2758     Opcode opcode = mir->dalvikInsn.opcode;
   2759     RegLocation rlSrc1;
   2760     RegLocation rlSrc2;
   2761     RegLocation rlDest;
   2762 
   2763     if ((opcode >= OP_ADD_INT) && (opcode <= OP_REM_DOUBLE)) {
   2764         return genArithOp( cUnit, mir );
   2765     }
   2766 
   2767     /* APUTs have 3 sources and no targets */
   2768     if (mir->ssaRep->numDefs == 0) {
   2769         if (mir->ssaRep->numUses == 3) {
   2770             rlDest = dvmCompilerGetSrc(cUnit, mir, 0);
   2771             rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 1);
   2772             rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 2);
   2773         } else {
   2774             assert(mir->ssaRep->numUses == 4);
   2775             rlDest = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
   2776             rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 2);
   2777             rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 3);
   2778         }
   2779     } else {
   2780         /* Two sources and 1 dest.  Deduce the operand sizes */
   2781         if (mir->ssaRep->numUses == 4) {
   2782             rlSrc1 = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
   2783             rlSrc2 = dvmCompilerGetSrcWide(cUnit, mir, 2, 3);
   2784         } else {
   2785             assert(mir->ssaRep->numUses == 2);
   2786             rlSrc1 = dvmCompilerGetSrc(cUnit, mir, 0);
   2787             rlSrc2 = dvmCompilerGetSrc(cUnit, mir, 1);
   2788         }
   2789         if (mir->ssaRep->numDefs == 2) {
   2790             rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
   2791         } else {
   2792             assert(mir->ssaRep->numDefs == 1);
   2793             rlDest = dvmCompilerGetDest(cUnit, mir, 0);
   2794         }
   2795     }
   2796 
   2797     switch (opcode) {
   2798         case OP_CMPL_FLOAT:
   2799         case OP_CMPG_FLOAT:
   2800         case OP_CMPL_DOUBLE:
   2801         case OP_CMPG_DOUBLE:
   2802             return genCmpFP(cUnit, mir, rlDest, rlSrc1, rlSrc2);
   2803         case OP_CMP_LONG:
   2804             genCmpLong(cUnit, mir, rlDest, rlSrc1, rlSrc2);
   2805             break;
   2806         case OP_AGET_WIDE:
   2807             genArrayGet(cUnit, mir, kLong, rlSrc1, rlSrc2, rlDest, 3);
   2808             break;
   2809         case OP_AGET:
   2810         case OP_AGET_OBJECT:
   2811             genArrayGet(cUnit, mir, kWord, rlSrc1, rlSrc2, rlDest, 2);
   2812             break;
   2813         case OP_AGET_BOOLEAN:
   2814             genArrayGet(cUnit, mir, kUnsignedByte, rlSrc1, rlSrc2, rlDest, 0);
   2815             break;
   2816         case OP_AGET_BYTE:
   2817             genArrayGet(cUnit, mir, kSignedByte, rlSrc1, rlSrc2, rlDest, 0);
   2818             break;
   2819         case OP_AGET_CHAR:
   2820             genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc1, rlSrc2, rlDest, 1);
   2821             break;
   2822         case OP_AGET_SHORT:
   2823             genArrayGet(cUnit, mir, kSignedHalf, rlSrc1, rlSrc2, rlDest, 1);
   2824             break;
   2825         case OP_APUT_WIDE:
   2826             genArrayPut(cUnit, mir, kLong, rlSrc1, rlSrc2, rlDest, 3);
   2827             break;
   2828         case OP_APUT:
   2829             genArrayPut(cUnit, mir, kWord, rlSrc1, rlSrc2, rlDest, 2);
   2830             break;
   2831         case OP_APUT_OBJECT:
   2832             genArrayObjectPut(cUnit, mir, rlSrc1, rlSrc2, rlDest, 2);
   2833             break;
   2834         case OP_APUT_SHORT:
   2835         case OP_APUT_CHAR:
   2836             genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc1, rlSrc2, rlDest, 1);
   2837             break;
   2838         case OP_APUT_BYTE:
   2839         case OP_APUT_BOOLEAN:
   2840             genArrayPut(cUnit, mir, kUnsignedByte, rlSrc1, rlSrc2, rlDest, 0);
   2841             break;
   2842         default:
   2843             return true;
   2844     }
   2845     return false;
   2846 }
   2847 
   2848 /*
   2849  * Find the matching case.
   2850  *
   2851  * return values:
   2852  * r_RESULT0 (low 32-bit): pc of the chaining cell corresponding to the resolved case,
   2853  *    including default which is placed at MIN(size, MAX_CHAINED_SWITCH_CASES).
   2854  * r_RESULT1 (high 32-bit): the branch offset of the matching case (only for indexes
   2855  *    above MAX_CHAINED_SWITCH_CASES).
   2856  *
   2857  * Instructions around the call are:
   2858  *
   2859  * jalr &findPackedSwitchIndex
   2860  * nop
   2861  * lw gp, 84(sp) |
   2862  * addu          | 20 bytes for these 5 instructions
   2863  * move          | (NOTE: if this sequence is shortened or lengthened, then
   2864  * jr            |  the 20 byte offset added below in 3 places must be changed
   2865  * nop           |  accordingly.)
   2866  * chaining cell for case 0 [16 bytes]
   2867  * chaining cell for case 1 [16 bytes]
   2868  *               :
   2869  * chaining cell for case MIN(size, MAX_CHAINED_SWITCH_CASES)-1 [16 bytes]
   2870  * chaining cell for case default [16 bytes]
   2871  * noChain exit
   2872  */
   2873 static u8 findPackedSwitchIndex(const u2* switchData, int testVal)
   2874 {
   2875     int size;
   2876     int firstKey;
   2877     const int *entries;
   2878     int index;
   2879     int jumpIndex;
   2880     uintptr_t caseDPCOffset = 0;
   2881 
   2882     /*
   2883      * Packed switch data format:
   2884      *  ushort ident = 0x0100   magic value
   2885      *  ushort size             number of entries in the table
   2886      *  int first_key           first (and lowest) switch case value
   2887      *  int targets[size]       branch targets, relative to switch opcode
   2888      *
   2889      * Total size is (4+size*2) 16-bit code units.
   2890      */
   2891     size = switchData[1];
   2892     assert(size > 0);
   2893 
   2894     firstKey = switchData[2];
   2895     firstKey |= switchData[3] << 16;
   2896 
   2897 
   2898     /* The entries are guaranteed to be aligned on a 32-bit boundary;
   2899      * we can treat them as a native int array.
   2900      */
   2901     entries = (const int*) &switchData[4];
   2902     assert(((u4)entries & 0x3) == 0);
   2903 
   2904     index = testVal - firstKey;
   2905 
   2906     /* Jump to the default cell */
   2907     if (index < 0 || index >= size) {
   2908         jumpIndex = MIN(size, MAX_CHAINED_SWITCH_CASES);
   2909     /* Jump to the non-chaining exit point */
   2910     } else if (index >= MAX_CHAINED_SWITCH_CASES) {
   2911         jumpIndex = MAX_CHAINED_SWITCH_CASES + 1;
   2912 #ifdef HAVE_LITTLE_ENDIAN
   2913         caseDPCOffset = entries[index];
   2914 #else
   2915         caseDPCOffset = (unsigned int)entries[index] >> 16 | entries[index] << 16;
   2916 #endif
   2917     /* Jump to the inline chaining cell */
   2918     } else {
   2919         jumpIndex = index;
   2920     }
   2921 
   2922     return (((u8) caseDPCOffset) << 32) | (u8) (jumpIndex * CHAIN_CELL_NORMAL_SIZE + 20);
   2923 }
   2924 
   2925 /* See comments for findPackedSwitchIndex */
   2926 static u8 findSparseSwitchIndex(const u2* switchData, int testVal)
   2927 {
   2928     int size;
   2929     const int *keys;
   2930     const int *entries;
   2931     /* In Thumb mode pc is 4 ahead of the "mov r2, pc" instruction */
   2932     int i;
   2933 
   2934     /*
   2935      * Sparse switch data format:
   2936      *  ushort ident = 0x0200   magic value
   2937      *  ushort size             number of entries in the table; > 0
   2938      *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
   2939      *  int targets[size]       branch targets, relative to switch opcode
   2940      *
   2941      * Total size is (2+size*4) 16-bit code units.
   2942      */
   2943 
   2944     size = switchData[1];
   2945     assert(size > 0);
   2946 
   2947     /* The keys are guaranteed to be aligned on a 32-bit boundary;
   2948      * we can treat them as a native int array.
   2949      */
   2950     keys = (const int*) &switchData[2];
   2951     assert(((u4)keys & 0x3) == 0);
   2952 
   2953     /* The entries are guaranteed to be aligned on a 32-bit boundary;
   2954      * we can treat them as a native int array.
   2955      */
   2956     entries = keys + size;
   2957     assert(((u4)entries & 0x3) == 0);
   2958 
   2959     /*
   2960      * Run through the list of keys, which are guaranteed to
   2961      * be sorted low-to-high.
   2962      *
   2963      * Most tables have 3-4 entries.  Few have more than 10.  A binary
   2964      * search here is probably not useful.
   2965      */
   2966     for (i = 0; i < size; i++) {
   2967 #ifdef HAVE_LITTLE_ENDIAN
   2968         int k = keys[i];
   2969         if (k == testVal) {
   2970             /* MAX_CHAINED_SWITCH_CASES + 1 is the start of the overflow case */
   2971             int jumpIndex = (i < MAX_CHAINED_SWITCH_CASES) ?
   2972                            i : MAX_CHAINED_SWITCH_CASES + 1;
   2973             return (((u8) entries[i]) << 32) | (u8) (jumpIndex * CHAIN_CELL_NORMAL_SIZE + 20);
   2974 #else
   2975         int k = (unsigned int)keys[i] >> 16 | keys[i] << 16;
   2976         if (k == testVal) {
   2977             /* MAX_CHAINED_SWITCH_CASES + 1 is the start of the overflow case */
   2978             int jumpIndex = (i < MAX_CHAINED_SWITCH_CASES) ?
   2979                            i : MAX_CHAINED_SWITCH_CASES + 1;
   2980             int temp = (unsigned int)entries[i] >> 16 | entries[i] << 16;
   2981             return (((u8) temp) << 32) | (u8) (jumpIndex * CHAIN_CELL_NORMAL_SIZE + 20);
   2982 #endif
   2983         } else if (k > testVal) {
   2984             break;
   2985         }
   2986     }
   2987     return MIN(size, MAX_CHAINED_SWITCH_CASES) * CHAIN_CELL_NORMAL_SIZE + 20;
   2988 }
   2989 
   2990 static bool handleFmt31t(CompilationUnit *cUnit, MIR *mir)
   2991 {
   2992     Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   2993     switch (dalvikOpcode) {
   2994         case OP_FILL_ARRAY_DATA: {
   2995             RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   2996             // Making a call - use explicit registers
   2997             dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
   2998             genExportPC(cUnit, mir);
   2999             loadValueDirectFixed(cUnit, rlSrc, r_A0);
   3000             LOAD_FUNC_ADDR(cUnit, r_T9, (int)dvmInterpHandleFillArrayData);
   3001             loadConstant(cUnit, r_A1,
   3002                (int) (cUnit->method->insns + mir->offset + mir->dalvikInsn.vB));
   3003             opReg(cUnit, kOpBlx, r_T9);
   3004             newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   3005             dvmCompilerClobberCallRegs(cUnit);
   3006             /* generate a branch over if successful */
   3007             MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
   3008             loadConstant(cUnit, r_A0,
   3009                          (int) (cUnit->method->insns + mir->offset));
   3010             genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
   3011             MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
   3012             target->defMask = ENCODE_ALL;
   3013             branchOver->generic.target = (LIR *) target;
   3014             break;
   3015         }
   3016         /*
   3017          * Compute the goto target of up to
   3018          * MIN(switchSize, MAX_CHAINED_SWITCH_CASES) + 1 chaining cells.
   3019          * See the comment before findPackedSwitchIndex for the code layout.
   3020          */
   3021         case OP_PACKED_SWITCH:
   3022         case OP_SPARSE_SWITCH: {
   3023             RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   3024             dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
   3025             loadValueDirectFixed(cUnit, rlSrc, r_A1);
   3026             dvmCompilerLockAllTemps(cUnit);
   3027 
   3028             if (dalvikOpcode == OP_PACKED_SWITCH) {
   3029                 LOAD_FUNC_ADDR(cUnit, r_T9, (int)findPackedSwitchIndex);
   3030             } else {
   3031                 LOAD_FUNC_ADDR(cUnit, r_T9, (int)findSparseSwitchIndex);
   3032             }
   3033             /* r_A0 <- Addr of the switch data */
   3034             loadConstant(cUnit, r_A0,
   3035                (int) (cUnit->method->insns + mir->offset + mir->dalvikInsn.vB));
   3036             opReg(cUnit, kOpBlx, r_T9);
   3037             newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   3038             dvmCompilerClobberCallRegs(cUnit);
   3039             /* pc <- computed goto target using value in RA */
   3040             newLIR3(cUnit, kMipsAddu, r_A0, r_RA, r_RESULT0);
   3041             newLIR2(cUnit, kMipsMove, r_A1, r_RESULT1);
   3042             newLIR1(cUnit, kMipsJr, r_A0);
   3043             newLIR0(cUnit, kMipsNop); /* for maintaining 20 byte offset */
   3044             break;
   3045         }
   3046         default:
   3047             return true;
   3048     }
   3049     return false;
   3050 }
   3051 
   3052 /*
   3053  * See the example of predicted inlining listed before the
   3054  * genValidationForPredictedInline function. The function here takes care the
   3055  * branch over at 0x4858de78 and the misprediction target at 0x4858de7a.
   3056  */
   3057 static void genLandingPadForMispredictedCallee(CompilationUnit *cUnit, MIR *mir,
   3058                                                BasicBlock *bb,
   3059                                                MipsLIR *labelList)
   3060 {
   3061     BasicBlock *fallThrough = bb->fallThrough;
   3062 
   3063     /* Bypass the move-result block if there is one */
   3064     if (fallThrough->firstMIRInsn) {
   3065         assert(fallThrough->firstMIRInsn->OptimizationFlags & MIR_INLINED_PRED);
   3066         fallThrough = fallThrough->fallThrough;
   3067     }
   3068     /* Generate a branch over if the predicted inlining is correct */
   3069     genUnconditionalBranch(cUnit, &labelList[fallThrough->id]);
   3070 
   3071     /* Reset the register state */
   3072     dvmCompilerResetRegPool(cUnit);
   3073     dvmCompilerClobberAllRegs(cUnit);
   3074     dvmCompilerResetNullCheck(cUnit);
   3075 
   3076     /* Target for the slow invoke path */
   3077     MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
   3078     target->defMask = ENCODE_ALL;
   3079     /* Hook up the target to the verification branch */
   3080     mir->meta.callsiteInfo->misPredBranchOver->target = (LIR *) target;
   3081 }
   3082 
   3083 static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir,
   3084                              BasicBlock *bb, MipsLIR *labelList)
   3085 {
   3086     MipsLIR *retChainingCell = NULL;
   3087     MipsLIR *pcrLabel = NULL;
   3088 
   3089     /* An invoke with the MIR_INLINED is effectively a no-op */
   3090     if (mir->OptimizationFlags & MIR_INLINED)
   3091         return false;
   3092 
   3093     if (bb->fallThrough != NULL)
   3094         retChainingCell = &labelList[bb->fallThrough->id];
   3095 
   3096     DecodedInstruction *dInsn = &mir->dalvikInsn;
   3097     switch (mir->dalvikInsn.opcode) {
   3098         /*
   3099          * calleeMethod = this->clazz->vtable[
   3100          *     method->clazz->pDvmDex->pResMethods[BBBB]->methodIndex
   3101          * ]
   3102          */
   3103         case OP_INVOKE_VIRTUAL:
   3104         case OP_INVOKE_VIRTUAL_RANGE: {
   3105             MipsLIR *predChainingCell = &labelList[bb->taken->id];
   3106             int methodIndex =
   3107                 cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]->
   3108                 methodIndex;
   3109 
   3110             /*
   3111              * If the invoke has non-null misPredBranchOver, we need to generate
   3112              * the non-inlined version of the invoke here to handle the
   3113              * mispredicted case.
   3114              */
   3115             if (mir->meta.callsiteInfo->misPredBranchOver) {
   3116                 genLandingPadForMispredictedCallee(cUnit, mir, bb, labelList);
   3117             }
   3118 
   3119             if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
   3120                 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
   3121             else
   3122                 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
   3123 
   3124             genInvokeVirtualCommon(cUnit, mir, methodIndex,
   3125                                    retChainingCell,
   3126                                    predChainingCell,
   3127                                    pcrLabel);
   3128             break;
   3129         }
   3130         /*
   3131          * calleeMethod = method->clazz->super->vtable[method->clazz->pDvmDex
   3132          *                ->pResMethods[BBBB]->methodIndex]
   3133          */
   3134         case OP_INVOKE_SUPER:
   3135         case OP_INVOKE_SUPER_RANGE: {
   3136             /* Grab the method ptr directly from what the interpreter sees */
   3137             const Method *calleeMethod = mir->meta.callsiteInfo->method;
   3138             assert(calleeMethod == cUnit->method->clazz->super->vtable[
   3139                                      cUnit->method->clazz->pDvmDex->
   3140                                        pResMethods[dInsn->vB]->methodIndex]);
   3141 
   3142             if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
   3143                 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
   3144             else
   3145                 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
   3146 
   3147             if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
   3148                 const Method *calleeMethod = mir->meta.callsiteInfo->method;
   3149                 void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
   3150                 assert(calleeAddr);
   3151                 genInvokeSingletonWholeMethod(cUnit, mir, calleeAddr,
   3152                                               retChainingCell);
   3153             } else {
   3154                 /* r_A0 = calleeMethod */
   3155                 loadConstant(cUnit, r_A0, (int) calleeMethod);
   3156 
   3157                 genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
   3158                                          calleeMethod);
   3159             }
   3160             break;
   3161         }
   3162         /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
   3163         case OP_INVOKE_DIRECT:
   3164         case OP_INVOKE_DIRECT_RANGE: {
   3165             /* Grab the method ptr directly from what the interpreter sees */
   3166             const Method *calleeMethod = mir->meta.callsiteInfo->method;
   3167             assert(calleeMethod ==
   3168                    cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]);
   3169 
   3170             if (mir->dalvikInsn.opcode == OP_INVOKE_DIRECT)
   3171                 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
   3172             else
   3173                 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
   3174 
   3175             /* r_A0 = calleeMethod */
   3176             loadConstant(cUnit, r_A0, (int) calleeMethod);
   3177 
   3178             genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
   3179                                      calleeMethod);
   3180             break;
   3181         }
   3182         /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
   3183         case OP_INVOKE_STATIC:
   3184         case OP_INVOKE_STATIC_RANGE: {
   3185             /* Grab the method ptr directly from what the interpreter sees */
   3186             const Method *calleeMethod = mir->meta.callsiteInfo->method;
   3187             assert(calleeMethod ==
   3188                    cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]);
   3189 
   3190             if (mir->dalvikInsn.opcode == OP_INVOKE_STATIC)
   3191                 genProcessArgsNoRange(cUnit, mir, dInsn,
   3192                                       NULL /* no null check */);
   3193             else
   3194                 genProcessArgsRange(cUnit, mir, dInsn,
   3195                                     NULL /* no null check */);
   3196 
   3197             if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
   3198                 const Method *calleeMethod = mir->meta.callsiteInfo->method;
   3199                 void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
   3200                 assert(calleeAddr);
   3201                 genInvokeSingletonWholeMethod(cUnit, mir, calleeAddr,
   3202                                               retChainingCell);
   3203             } else {
   3204                 /* r_A0 = calleeMethod */
   3205                 loadConstant(cUnit, r_A0, (int) calleeMethod);
   3206 
   3207                 genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
   3208                                          calleeMethod);
   3209             }
   3210             break;
   3211         }
   3212 
   3213         /*
   3214          * calleeMethod = dvmFindInterfaceMethodInCache(this->clazz,
   3215          *                    BBBB, method, method->clazz->pDvmDex)
   3216          *
   3217          * The following is an example of generated code for
   3218          *      "invoke-interface v0"
   3219          *
   3220          * -------- dalvik offset: 0x000f @ invoke-interface (PI) v2
   3221          * 0x2f140c54 : lw       a0,8(s1)                    # genProcessArgsNoRange
   3222          * 0x2f140c58 : addiu    s4,s1,0xffffffe8(-24)
   3223          * 0x2f140c5c : beqz     a0,0x2f140d5c (L0x11f864)
   3224          * 0x2f140c60 : pref     1,0(s4)
   3225          * -------- BARRIER
   3226          * 0x2f140c64 : sw       a0,0(s4)
   3227          * 0x2f140c68 : addiu    s4,s4,0x0004(4)
   3228          * -------- BARRIER
   3229          * 0x2f140c6c : lui      s0,0x2d23(11555)            # dalvikPC
   3230          * 0x2f140c70 : ori      s0,s0,0x2d2365a6(757294502)
   3231          * 0x2f140c74 : lahi/lui a1,0x2f14(12052)            # a1 <- &retChainingCell
   3232          * 0x2f140c78 : lalo/ori a1,a1,0x2f140d38(789843256)
   3233          * 0x2f140c7c : lahi/lui a2,0x2f14(12052)            # a2 <- &predictedChainingCell
   3234          * 0x2f140c80 : lalo/ori a2,a2,0x2f140d80(789843328)
   3235          * 0x2f140c84 : jal      0x2f1311ec(789778924)       # call TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN
   3236          * 0x2f140c88 : nop
   3237          * 0x2f140c8c : b        0x2f140d80 (L0x11efc0)      # off to the predicted chain
   3238          * 0x2f140c90 : nop
   3239          * 0x2f140c94 : b        0x2f140d60 (L0x12457c)      # punt to the interpreter
   3240          * 0x2f140c98 : lui      a0,0x2d23(11555)
   3241          * 0x2f140c9c : move     s5,a1                       # prepare for dvmFindInterfaceMethodInCache
   3242          * 0x2f140ca0 : move     s6,a2
   3243          * 0x2f140ca4 : move     s7,a3
   3244          * 0x2f140ca8 : move     a0,a3
   3245          * 0x2f140cac : ori      a1,zero,0x2b42(11074)
   3246          * 0x2f140cb0 : lui      a2,0x2c92(11410)
   3247          * 0x2f140cb4 : ori      a2,a2,0x2c92adf8(747810296)
   3248          * 0x2f140cb8 : lui      a3,0x0009(9)
   3249          * 0x2f140cbc : ori      a3,a3,0x924b8(599224)
   3250          * 0x2f140cc0 : lui      t9,0x2ab2(10930)
   3251          * 0x2f140cc4 : ori      t9,t9,0x2ab2a48c(716350604)
   3252          * 0x2f140cc8 : jalr     ra,t9                       # call dvmFindInterfaceMethodInCache
   3253          * 0x2f140ccc : nop
   3254          * 0x2f140cd0 : lw       gp,84(sp)
   3255          * 0x2f140cd4 : move     a0,v0
   3256          * 0x2f140cd8 : bne      v0,zero,0x2f140cf0 (L0x120064)
   3257          * 0x2f140cdc : nop
   3258          * 0x2f140ce0 : lui      a0,0x2d23(11555)            # a0 <- dalvikPC
   3259          * 0x2f140ce4 : ori      a0,a0,0x2d2365a6(757294502)
   3260          * 0x2f140ce8 : jal      0x2f131720(789780256)       # call TEMPLATE_THROW_EXCEPTION_COMMON
   3261          * 0x2f140cec : nop
   3262          * 0x2f140cf0 : move     a1,s5                       # a1 <- &retChainingCell
   3263          * 0x2f140cf4 : bgtz     s5,0x2f140d20 (L0x120324)   # >0? don't rechain
   3264          * 0x2f140cf8 : nop
   3265          * 0x2f140cfc : lui      t9,0x2aba(10938)            # prepare for dvmJitToPatchPredictedChain
   3266          * 0x2f140d00 : ori      t9,t9,0x2abae3c4(716891076)
   3267          * 0x2f140d04 : move     a1,s2
   3268          * 0x2f140d08 : move     a2,s6
   3269          * 0x2f140d0c : move     a3,s7
   3270          * 0x2f140d10 : jalr     ra,t9                       # call dvmJitToPatchPredictedChain
   3271          * 0x2f140d14 : nop
   3272          * 0x2f140d18 : lw       gp,84(sp)
   3273          * 0x2f140d1c : move     a0,v0
   3274          * 0x2f140d20 : lahi/lui a1,0x2f14(12052)
   3275          * 0x2f140d24 : lalo/ori a1,a1,0x2f140d38(789843256) # a1 <- &retChainingCell
   3276          * 0x2f140d28 : jal      0x2f1310c4(789778628)       # call TEMPLATE_INVOKE_METHOD_NO_OPT
   3277          * 0x2f140d2c : nop
   3278          * 0x2f140d30 : b        0x2f140d60 (L0x12457c)
   3279          * 0x2f140d34 : lui      a0,0x2d23(11555)
   3280          * 0x2f140d38 : .align4
   3281          * -------- dalvik offset: 0x0012 @ move-result (PI) v1, (#0), (#0)
   3282          * 0x2f140d38 : lw       a2,16(s2)
   3283          * 0x2f140d3c : sw       a2,4(s1)
   3284          * 0x2f140d40 : b        0x2f140d74 (L0x1246fc)
   3285          * 0x2f140d44 : lw       a0,116(s2)
   3286          * 0x2f140d48 : undefined
   3287          * -------- reconstruct dalvik PC : 0x2d2365a6 @ +0x000f
   3288          * 0x2f140d4c : lui      a0,0x2d23(11555)
   3289          * 0x2f140d50 : ori      a0,a0,0x2d2365a6(757294502)
   3290          * 0x2f140d54 : b        0x2f140d68 (L0x12463c)
   3291          * 0x2f140d58 : lw       a1,108(s2)
   3292          * -------- reconstruct dalvik PC : 0x2d2365a6 @ +0x000f
   3293          * 0x2f140d5c : lui      a0,0x2d23(11555)
   3294          * 0x2f140d60 : ori      a0,a0,0x2d2365a6(757294502)
   3295          * Exception_Handling:
   3296          * 0x2f140d64 : lw       a1,108(s2)
   3297          * 0x2f140d68 : jalr     ra,a1
   3298          * 0x2f140d6c : nop
   3299          * 0x2f140d70 : .align4
   3300          * -------- chaining cell (hot): 0x0013
   3301          * 0x2f140d70 : lw       a0,116(s2)
   3302          * 0x2f140d74 : jalr     ra,a0
   3303          * 0x2f140d78 : nop
   3304          * 0x2f140d7c : data     0x2d2365ae(757294510)
   3305          * 0x2f140d80 : .align4
   3306          * -------- chaining cell (predicted): N/A
   3307          * 0x2f140d80 : data     0xe7fe(59390)
   3308          * 0x2f140d84 : data     0x0000(0)
   3309          * 0x2f140d88 : data     0x0000(0)
   3310          * 0x2f140d8c : data     0x0000(0)
   3311          * 0x2f140d90 : data     0x0000(0)
   3312          * -------- end of chaining cells (0x0190)
   3313          */
   3314         case OP_INVOKE_INTERFACE:
   3315         case OP_INVOKE_INTERFACE_RANGE: {
   3316             MipsLIR *predChainingCell = &labelList[bb->taken->id];
   3317 
   3318             /*
   3319              * If the invoke has non-null misPredBranchOver, we need to generate
   3320              * the non-inlined version of the invoke here to handle the
   3321              * mispredicted case.
   3322              */
   3323             if (mir->meta.callsiteInfo->misPredBranchOver) {
   3324                 genLandingPadForMispredictedCallee(cUnit, mir, bb, labelList);
   3325             }
   3326 
   3327             if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
   3328                 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
   3329             else
   3330                 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
   3331 
   3332             /* "this" is already left in r_A0 by genProcessArgs* */
   3333 
   3334             /* r4PC = dalvikCallsite */
   3335             loadConstant(cUnit, r4PC,
   3336                          (int) (cUnit->method->insns + mir->offset));
   3337 
   3338             /* r_A1 = &retChainingCell */
   3339             MipsLIR *addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
   3340             addrRetChain->generic.target = (LIR *) retChainingCell;
   3341             addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
   3342             addrRetChain->generic.target = (LIR *) retChainingCell;
   3343 
   3344 
   3345             /* r_A2 = &predictedChainingCell */
   3346             MipsLIR *predictedChainingCell = newLIR2(cUnit, kMipsLahi, r_A2, 0);
   3347             predictedChainingCell->generic.target = (LIR *) predChainingCell;
   3348             predictedChainingCell = newLIR3(cUnit, kMipsLalo, r_A2, r_A2, 0);
   3349             predictedChainingCell->generic.target = (LIR *) predChainingCell;
   3350 
   3351             genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
   3352                 TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF :
   3353                 TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
   3354 
   3355             /* return through ra - jump to the chaining cell */
   3356             genUnconditionalBranch(cUnit, predChainingCell);
   3357 
   3358             /*
   3359              * null-check on "this" may have been eliminated, but we still need
   3360              * a PC-reconstruction label for stack overflow bailout.
   3361              */
   3362             if (pcrLabel == NULL) {
   3363                 int dPC = (int) (cUnit->method->insns + mir->offset);
   3364                 pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
   3365                 pcrLabel->opcode = kMipsPseudoPCReconstructionCell;
   3366                 pcrLabel->operands[0] = dPC;
   3367                 pcrLabel->operands[1] = mir->offset;
   3368                 /* Insert the place holder to the growable list */
   3369                 dvmInsertGrowableList(&cUnit->pcReconstructionList,
   3370                                       (intptr_t) pcrLabel);
   3371             }
   3372 
   3373             /* return through ra+8 - punt to the interpreter */
   3374             genUnconditionalBranch(cUnit, pcrLabel);
   3375 
   3376             /*
   3377              * return through ra+16 - fully resolve the callee method.
   3378              * r_A1 <- count
   3379              * r_A2 <- &predictedChainCell
   3380              * r_A3 <- this->class
   3381              * r4 <- dPC
   3382              * r_S4 <- this->class->vtable
   3383              */
   3384 
   3385             /* Save count, &predictedChainCell, and class to high regs first */
   3386             genRegCopy(cUnit, r_S5, r_A1);
   3387             genRegCopy(cUnit, r_S6, r_A2);
   3388             genRegCopy(cUnit, r_S7, r_A3);
   3389 
   3390             /* r_A0 now contains this->clazz */
   3391             genRegCopy(cUnit, r_A0, r_A3);
   3392 
   3393             /* r_A1 = BBBB */
   3394             loadConstant(cUnit, r_A1, dInsn->vB);
   3395 
   3396             /* r_A2 = method (caller) */
   3397             loadConstant(cUnit, r_A2, (int) cUnit->method);
   3398 
   3399             /* r_A3 = pDvmDex */
   3400             loadConstant(cUnit, r_A3, (int) cUnit->method->clazz->pDvmDex);
   3401 
   3402             LOAD_FUNC_ADDR(cUnit, r_T9,
   3403                            (intptr_t) dvmFindInterfaceMethodInCache);
   3404             opReg(cUnit, kOpBlx, r_T9);
   3405             newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   3406             /* r_V0 = calleeMethod (returned from dvmFindInterfaceMethodInCache */
   3407             genRegCopy(cUnit, r_A0, r_V0);
   3408 
   3409             dvmCompilerClobberCallRegs(cUnit);
   3410             /* generate a branch over if the interface method is resolved */
   3411             MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
   3412             /*
   3413              * calleeMethod == NULL -> throw
   3414              */
   3415             loadConstant(cUnit, r_A0,
   3416                          (int) (cUnit->method->insns + mir->offset));
   3417             genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
   3418             /* noreturn */
   3419 
   3420             MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
   3421             target->defMask = ENCODE_ALL;
   3422             branchOver->generic.target = (LIR *) target;
   3423 
   3424             genRegCopy(cUnit, r_A1, r_S5);
   3425 
   3426             /* Check if rechain limit is reached */
   3427             MipsLIR *bypassRechaining = opCompareBranch(cUnit, kMipsBgtz, r_S5, -1);
   3428 
   3429             LOAD_FUNC_ADDR(cUnit, r_T9, (int) dvmJitToPatchPredictedChain);
   3430 
   3431             genRegCopy(cUnit, r_A1, rSELF);
   3432             genRegCopy(cUnit, r_A2, r_S6);
   3433             genRegCopy(cUnit, r_A3, r_S7);
   3434 
   3435             /*
   3436              * r_A0 = calleeMethod
   3437              * r_A2 = &predictedChainingCell
   3438              * r_A3 = class
   3439              *
   3440              * &returnChainingCell has been loaded into r_A1 but is not needed
   3441              * when patching the chaining cell and will be clobbered upon
   3442              * returning so it will be reconstructed again.
   3443              */
   3444             opReg(cUnit, kOpBlx, r_T9);
   3445             newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   3446             genRegCopy(cUnit, r_A0, r_V0);
   3447 
   3448             /* r_A1 = &retChainingCell */
   3449             addrRetChain = newLIR2(cUnit, kMipsLahi, r_A1, 0);
   3450             addrRetChain->generic.target = (LIR *) retChainingCell;
   3451             bypassRechaining->generic.target = (LIR *) addrRetChain;
   3452             addrRetChain = newLIR3(cUnit, kMipsLalo, r_A1, r_A1, 0);
   3453             addrRetChain->generic.target = (LIR *) retChainingCell;
   3454 
   3455 
   3456             /*
   3457              * r_A0 = this, r_A1 = calleeMethod,
   3458              * r_A1 = &ChainingCell,
   3459              * r4PC = callsiteDPC,
   3460              */
   3461             genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
   3462                 TEMPLATE_INVOKE_METHOD_NO_OPT_PROF :
   3463                 TEMPLATE_INVOKE_METHOD_NO_OPT);
   3464 
   3465 #if defined(WITH_JIT_TUNING)
   3466             gDvmJit.invokePolymorphic++;
   3467 #endif
   3468             /* Handle exceptions using the interpreter */
   3469             genTrap(cUnit, mir->offset, pcrLabel);
   3470             break;
   3471         }
   3472         case OP_INVOKE_OBJECT_INIT_RANGE:
   3473         case OP_FILLED_NEW_ARRAY:
   3474         case OP_FILLED_NEW_ARRAY_RANGE: {
   3475             /* Just let the interpreter deal with these */
   3476             genInterpSingleStep(cUnit, mir);
   3477             break;
   3478         }
   3479         default:
   3480             return true;
   3481     }
   3482     return false;
   3483 }
   3484 
   3485 static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
   3486                                BasicBlock *bb, MipsLIR *labelList)
   3487 {
   3488     MipsLIR *pcrLabel = NULL;
   3489 
   3490     /* An invoke with the MIR_INLINED is effectively a no-op */
   3491     if (mir->OptimizationFlags & MIR_INLINED)
   3492         return false;
   3493 
   3494     DecodedInstruction *dInsn = &mir->dalvikInsn;
   3495     switch (mir->dalvikInsn.opcode) {
   3496         /* calleeMethod = this->clazz->vtable[BBBB] */
   3497         case OP_INVOKE_VIRTUAL_QUICK_RANGE:
   3498         case OP_INVOKE_VIRTUAL_QUICK: {
   3499             int methodIndex = dInsn->vB;
   3500             MipsLIR *retChainingCell = &labelList[bb->fallThrough->id];
   3501             MipsLIR *predChainingCell = &labelList[bb->taken->id];
   3502 
   3503             /*
   3504              * If the invoke has non-null misPredBranchOver, we need to generate
   3505              * the non-inlined version of the invoke here to handle the
   3506              * mispredicted case.
   3507              */
   3508             if (mir->meta.callsiteInfo->misPredBranchOver) {
   3509                 genLandingPadForMispredictedCallee(cUnit, mir, bb, labelList);
   3510             }
   3511 
   3512             if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL_QUICK)
   3513                 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
   3514             else
   3515                 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
   3516 
   3517             if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
   3518                 const Method *calleeMethod = mir->meta.callsiteInfo->method;
   3519                 void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
   3520                 assert(calleeAddr);
   3521                 genInvokeVirtualWholeMethod(cUnit, mir, calleeAddr,
   3522                                             retChainingCell);
   3523             }
   3524 
   3525             genInvokeVirtualCommon(cUnit, mir, methodIndex,
   3526                                    retChainingCell,
   3527                                    predChainingCell,
   3528                                    pcrLabel);
   3529             break;
   3530         }
   3531         /* calleeMethod = method->clazz->super->vtable[BBBB] */
   3532         case OP_INVOKE_SUPER_QUICK:
   3533         case OP_INVOKE_SUPER_QUICK_RANGE: {
   3534             /* Grab the method ptr directly from what the interpreter sees */
   3535             const Method *calleeMethod = mir->meta.callsiteInfo->method;
   3536             assert(calleeMethod ==
   3537                    cUnit->method->clazz->super->vtable[dInsn->vB]);
   3538 
   3539             if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER_QUICK)
   3540                 genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel);
   3541             else
   3542                 genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
   3543 
   3544             /* r_A0 = calleeMethod */
   3545             loadConstant(cUnit, r_A0, (int) calleeMethod);
   3546 
   3547             genInvokeSingletonCommon(cUnit, mir, bb, labelList, pcrLabel,
   3548                                      calleeMethod);
   3549             break;
   3550         }
   3551         default:
   3552             return true;
   3553     }
   3554     return false;
   3555 }
   3556 
   3557 /*
   3558  * This operation is complex enough that we'll do it partly inline
   3559  * and partly with a handler.  NOTE: the handler uses hardcoded
   3560  * values for string object offsets and must be revisitied if the
   3561  * layout changes.
   3562  */
   3563 static bool genInlinedCompareTo(CompilationUnit *cUnit, MIR *mir)
   3564 {
   3565 #if defined(USE_GLOBAL_STRING_DEFS)
   3566     return handleExecuteInlineC(cUnit, mir);
   3567 #else
   3568     MipsLIR *rollback;
   3569     RegLocation rlThis = dvmCompilerGetSrc(cUnit, mir, 0);
   3570     RegLocation rlComp = dvmCompilerGetSrc(cUnit, mir, 1);
   3571 
   3572     loadValueDirectFixed(cUnit, rlThis, r_A0);
   3573     loadValueDirectFixed(cUnit, rlComp, r_A1);
   3574     /* Test objects for NULL */
   3575     rollback = genNullCheck(cUnit, rlThis.sRegLow, r_A0, mir->offset, NULL);
   3576     genNullCheck(cUnit, rlComp.sRegLow, r_A1, mir->offset, rollback);
   3577     /*
   3578      * TUNING: we could check for object pointer equality before invoking
   3579      * handler. Unclear whether the gain would be worth the added code size
   3580      * expansion.
   3581      */
   3582     genDispatchToHandler(cUnit, TEMPLATE_STRING_COMPARETO);
   3583     storeValue(cUnit, inlinedTarget(cUnit, mir, false),
   3584                dvmCompilerGetReturn(cUnit));
   3585     return false;
   3586 #endif
   3587 }
   3588 
   3589 static bool genInlinedFastIndexOf(CompilationUnit *cUnit, MIR *mir)
   3590 {
   3591 #if defined(USE_GLOBAL_STRING_DEFS)
   3592     return handleExecuteInlineC(cUnit, mir);
   3593 #else
   3594     RegLocation rlThis = dvmCompilerGetSrc(cUnit, mir, 0);
   3595     RegLocation rlChar = dvmCompilerGetSrc(cUnit, mir, 1);
   3596 
   3597     loadValueDirectFixed(cUnit, rlThis, r_A0);
   3598     loadValueDirectFixed(cUnit, rlChar, r_A1);
   3599 
   3600     RegLocation rlStart = dvmCompilerGetSrc(cUnit, mir, 2);
   3601     loadValueDirectFixed(cUnit, rlStart, r_A2);
   3602 
   3603     /* Test objects for NULL */
   3604     genNullCheck(cUnit, rlThis.sRegLow, r_A0, mir->offset, NULL);
   3605     genDispatchToHandler(cUnit, TEMPLATE_STRING_INDEXOF);
   3606     storeValue(cUnit, inlinedTarget(cUnit, mir, false),
   3607                dvmCompilerGetReturn(cUnit));
   3608     return false;
   3609 #endif
   3610 }
   3611 
   3612 // Generates an inlined String.isEmpty or String.length.
   3613 static bool genInlinedStringIsEmptyOrLength(CompilationUnit *cUnit, MIR *mir,
   3614                                             bool isEmpty)
   3615 {
   3616     // dst = src.length();
   3617     RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 0);
   3618     RegLocation rlDest = inlinedTarget(cUnit, mir, false);
   3619     rlObj = loadValue(cUnit, rlObj, kCoreReg);
   3620     RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   3621     genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir->offset, NULL);
   3622     loadWordDisp(cUnit, rlObj.lowReg, gDvm.offJavaLangString_count,
   3623                  rlResult.lowReg);
   3624     if (isEmpty) {
   3625         // dst = (dst == 0);
   3626         int tReg = dvmCompilerAllocTemp(cUnit);
   3627         newLIR3(cUnit, kMipsSltu, tReg, r_ZERO, rlResult.lowReg);
   3628         opRegRegImm(cUnit, kOpXor, rlResult.lowReg, tReg, 1);
   3629     }
   3630     storeValue(cUnit, rlDest, rlResult);
   3631     return false;
   3632 }
   3633 
   3634 static bool genInlinedStringLength(CompilationUnit *cUnit, MIR *mir)
   3635 {
   3636     return genInlinedStringIsEmptyOrLength(cUnit, mir, false);
   3637 }
   3638 
   3639 static bool genInlinedStringIsEmpty(CompilationUnit *cUnit, MIR *mir)
   3640 {
   3641     return genInlinedStringIsEmptyOrLength(cUnit, mir, true);
   3642 }
   3643 
   3644 static bool genInlinedStringCharAt(CompilationUnit *cUnit, MIR *mir)
   3645 {
   3646     int contents = OFFSETOF_MEMBER(ArrayObject, contents);
   3647     RegLocation rlObj = dvmCompilerGetSrc(cUnit, mir, 0);
   3648     RegLocation rlIdx = dvmCompilerGetSrc(cUnit, mir, 1);
   3649     RegLocation rlDest = inlinedTarget(cUnit, mir, false);
   3650     RegLocation rlResult;
   3651     rlObj = loadValue(cUnit, rlObj, kCoreReg);
   3652     rlIdx = loadValue(cUnit, rlIdx, kCoreReg);
   3653     int regMax = dvmCompilerAllocTemp(cUnit);
   3654     int regOff = dvmCompilerAllocTemp(cUnit);
   3655     int regPtr = dvmCompilerAllocTemp(cUnit);
   3656     MipsLIR *pcrLabel = genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg,
   3657                                     mir->offset, NULL);
   3658     loadWordDisp(cUnit, rlObj.lowReg, gDvm.offJavaLangString_count, regMax);
   3659     loadWordDisp(cUnit, rlObj.lowReg, gDvm.offJavaLangString_offset, regOff);
   3660     loadWordDisp(cUnit, rlObj.lowReg, gDvm.offJavaLangString_value, regPtr);
   3661     genBoundsCheck(cUnit, rlIdx.lowReg, regMax, mir->offset, pcrLabel);
   3662     dvmCompilerFreeTemp(cUnit, regMax);
   3663     opRegImm(cUnit, kOpAdd, regPtr, contents);
   3664     opRegReg(cUnit, kOpAdd, regOff, rlIdx.lowReg);
   3665     rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   3666     loadBaseIndexed(cUnit, regPtr, regOff, rlResult.lowReg, 1, kUnsignedHalf);
   3667     storeValue(cUnit, rlDest, rlResult);
   3668     return false;
   3669 }
   3670 
   3671 static bool genInlinedAbsInt(CompilationUnit *cUnit, MIR *mir)
   3672 {
   3673     RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   3674     rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
   3675     RegLocation rlDest = inlinedTarget(cUnit, mir, false);
   3676     RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   3677     int signReg = dvmCompilerAllocTemp(cUnit);
   3678     /*
   3679      * abs(x) = y<=x>>31, (x+y)^y.
   3680      * Thumb2's IT block also yields 3 instructions, but imposes
   3681      * scheduling constraints.
   3682      */
   3683     opRegRegImm(cUnit, kOpAsr, signReg, rlSrc.lowReg, 31);
   3684     opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
   3685     opRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
   3686     storeValue(cUnit, rlDest, rlResult);
   3687     return false;
   3688 }
   3689 
   3690 static bool genInlinedAbsLong(CompilationUnit *cUnit, MIR *mir)
   3691 {
   3692     RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
   3693     RegLocation rlDest = inlinedTargetWide(cUnit, mir, false);
   3694     rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
   3695     RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   3696     int signReg = dvmCompilerAllocTemp(cUnit);
   3697     int tReg = dvmCompilerAllocTemp(cUnit);
   3698     /*
   3699      * abs(x) = y<=x>>31, (x+y)^y.
   3700      * Thumb2 IT block allows slightly shorter sequence,
   3701      * but introduces a scheduling barrier.  Stick with this
   3702      * mechanism for now.
   3703      */
   3704     opRegRegImm(cUnit, kOpAsr, signReg, rlSrc.highReg, 31);
   3705     opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, signReg);
   3706     newLIR3(cUnit, kMipsSltu, tReg, rlResult.lowReg, signReg);
   3707     opRegRegReg(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, signReg);
   3708     opRegRegReg(cUnit, kOpAdd, rlResult.highReg, rlResult.highReg, tReg);
   3709     opRegReg(cUnit, kOpXor, rlResult.lowReg, signReg);
   3710     opRegReg(cUnit, kOpXor, rlResult.highReg, signReg);
   3711     dvmCompilerFreeTemp(cUnit, signReg);
   3712     dvmCompilerFreeTemp(cUnit, tReg);
   3713     storeValueWide(cUnit, rlDest, rlResult);
   3714     return false;
   3715 }
   3716 
   3717 static bool genInlinedIntFloatConversion(CompilationUnit *cUnit, MIR *mir)
   3718 {
   3719     // Just move from source to destination...
   3720     RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
   3721     RegLocation rlDest = inlinedTarget(cUnit, mir, false);
   3722     storeValue(cUnit, rlDest, rlSrc);
   3723     return false;
   3724 }
   3725 
   3726 static bool genInlinedLongDoubleConversion(CompilationUnit *cUnit, MIR *mir)
   3727 {
   3728     // Just move from source to destination...
   3729     RegLocation rlSrc = dvmCompilerGetSrcWide(cUnit, mir, 0, 1);
   3730     RegLocation rlDest = inlinedTargetWide(cUnit, mir, false);
   3731     storeValueWide(cUnit, rlDest, rlSrc);
   3732     return false;
   3733 }
   3734 /*
   3735  * JITs a call to a C function.
   3736  * TODO: use this for faster native method invocation for simple native
   3737  * methods (http://b/3069458).
   3738  */
   3739 static bool handleExecuteInlineC(CompilationUnit *cUnit, MIR *mir)
   3740 {
   3741     DecodedInstruction *dInsn = &mir->dalvikInsn;
   3742     int operation = dInsn->vB;
   3743     unsigned int i;
   3744     const InlineOperation* inLineTable = dvmGetInlineOpsTable();
   3745     uintptr_t fn = (int) inLineTable[operation].func;
   3746     if (fn == 0) {
   3747         dvmCompilerAbort(cUnit);
   3748     }
   3749     dvmCompilerFlushAllRegs(cUnit);   /* Everything to home location */
   3750     dvmCompilerClobberCallRegs(cUnit);
   3751     dvmCompilerClobber(cUnit, r4PC);
   3752     dvmCompilerClobber(cUnit, rINST);
   3753     int offset = offsetof(Thread, interpSave.retval);
   3754     opRegRegImm(cUnit, kOpAdd, r4PC, rSELF, offset);
   3755     newLIR3(cUnit, kMipsSw, r4PC, 16, r_SP); /* sp has plenty of space */
   3756     genExportPC(cUnit, mir);
   3757     assert(dInsn->vA <= 4);
   3758     for (i=0; i < dInsn->vA; i++) {
   3759         loadValueDirect(cUnit, dvmCompilerGetSrc(cUnit, mir, i), i+r_A0);
   3760     }
   3761     LOAD_FUNC_ADDR(cUnit, r_T9, fn);
   3762     opReg(cUnit, kOpBlx, r_T9);
   3763     newLIR3(cUnit, kMipsLw, r_GP, STACK_OFFSET_GP, r_SP);
   3764     /* NULL? */
   3765     MipsLIR *branchOver = opCompareBranch(cUnit, kMipsBne, r_V0, r_ZERO);
   3766     loadConstant(cUnit, r_A0, (int) (cUnit->method->insns + mir->offset));
   3767     genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
   3768     MipsLIR *target = newLIR0(cUnit, kMipsPseudoTargetLabel);
   3769     target->defMask = ENCODE_ALL;
   3770     branchOver->generic.target = (LIR *) target;
   3771     return false;
   3772 }
   3773 
   3774 /*
   3775  * NOTE: Handles both range and non-range versions (arguments
   3776  * have already been normalized by this point).
   3777  */
   3778 static bool handleExecuteInline(CompilationUnit *cUnit, MIR *mir)
   3779 {
   3780     DecodedInstruction *dInsn = &mir->dalvikInsn;
   3781     assert(dInsn->opcode == OP_EXECUTE_INLINE_RANGE ||
   3782            dInsn->opcode == OP_EXECUTE_INLINE);
   3783     switch (dInsn->vB) {
   3784         case INLINE_EMPTYINLINEMETHOD:
   3785             return false;  /* Nop */
   3786 
   3787         /* These ones we potentially JIT inline. */
   3788 
   3789         case INLINE_STRING_CHARAT:
   3790             return genInlinedStringCharAt(cUnit, mir);
   3791         case INLINE_STRING_LENGTH:
   3792             return genInlinedStringLength(cUnit, mir);
   3793         case INLINE_STRING_IS_EMPTY:
   3794             return genInlinedStringIsEmpty(cUnit, mir);
   3795         case INLINE_STRING_COMPARETO:
   3796             return genInlinedCompareTo(cUnit, mir);
   3797         case INLINE_STRING_FASTINDEXOF_II:
   3798             return genInlinedFastIndexOf(cUnit, mir);
   3799 
   3800         case INLINE_MATH_ABS_INT:
   3801         case INLINE_STRICT_MATH_ABS_INT:
   3802             return genInlinedAbsInt(cUnit, mir);
   3803         case INLINE_MATH_ABS_LONG:
   3804         case INLINE_STRICT_MATH_ABS_LONG:
   3805             return genInlinedAbsLong(cUnit, mir);
   3806         case INLINE_MATH_MIN_INT:
   3807         case INLINE_STRICT_MATH_MIN_INT:
   3808             return genInlinedMinMaxInt(cUnit, mir, true);
   3809         case INLINE_MATH_MAX_INT:
   3810         case INLINE_STRICT_MATH_MAX_INT:
   3811             return genInlinedMinMaxInt(cUnit, mir, false);
   3812         case INLINE_MATH_SQRT:
   3813         case INLINE_STRICT_MATH_SQRT:
   3814             return genInlineSqrt(cUnit, mir);
   3815         case INLINE_MATH_ABS_FLOAT:
   3816         case INLINE_STRICT_MATH_ABS_FLOAT:
   3817             return genInlinedAbsFloat(cUnit, mir);
   3818         case INLINE_MATH_ABS_DOUBLE:
   3819         case INLINE_STRICT_MATH_ABS_DOUBLE:
   3820             return genInlinedAbsDouble(cUnit, mir);
   3821 
   3822         case INLINE_FLOAT_TO_RAW_INT_BITS:
   3823         case INLINE_INT_BITS_TO_FLOAT:
   3824             return genInlinedIntFloatConversion(cUnit, mir);
   3825         case INLINE_DOUBLE_TO_RAW_LONG_BITS:
   3826         case INLINE_LONG_BITS_TO_DOUBLE:
   3827             return genInlinedLongDoubleConversion(cUnit, mir);
   3828 
   3829         /*
   3830          * These ones we just JIT a call to a C function for.
   3831          * TODO: special-case these in the other "invoke" call paths.
   3832          */
   3833         case INLINE_STRING_EQUALS:
   3834         case INLINE_MATH_COS:
   3835         case INLINE_MATH_SIN:
   3836         case INLINE_FLOAT_TO_INT_BITS:
   3837         case INLINE_DOUBLE_TO_LONG_BITS:
   3838             return handleExecuteInlineC(cUnit, mir);
   3839     }
   3840     dvmCompilerAbort(cUnit);
   3841     return false; // Not reachable; keeps compiler happy.
   3842 }
   3843 
   3844 static bool handleFmt51l(CompilationUnit *cUnit, MIR *mir)
   3845 {
   3846     //TUNING: We're using core regs here - not optimal when target is a double
   3847     RegLocation rlDest = dvmCompilerGetDestWide(cUnit, mir, 0, 1);
   3848     RegLocation rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kCoreReg, true);
   3849     loadConstantNoClobber(cUnit, rlResult.lowReg,
   3850                           mir->dalvikInsn.vB_wide & 0xFFFFFFFFUL);
   3851     loadConstantNoClobber(cUnit, rlResult.highReg,
   3852                           (mir->dalvikInsn.vB_wide>>32) & 0xFFFFFFFFUL);
   3853     storeValueWide(cUnit, rlDest, rlResult);
   3854     return false;
   3855 }
   3856 
   3857 /*
   3858  * The following are special processing routines that handle transfer of
   3859  * controls between compiled code and the interpreter. Certain VM states like
   3860  * Dalvik PC and special-purpose registers are reconstructed here.
   3861  */
   3862 
   3863 /* Chaining cell for code that may need warmup. */
   3864 static void handleNormalChainingCell(CompilationUnit *cUnit,
   3865                                      unsigned int offset)
   3866 {
   3867     newLIR3(cUnit, kMipsLw, r_A0,
   3868         offsetof(Thread, jitToInterpEntries.dvmJitToInterpNormal),
   3869         rSELF);
   3870     newLIR2(cUnit, kMipsJalr, r_RA, r_A0);
   3871     addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
   3872 }
   3873 
   3874 /*
   3875  * Chaining cell for instructions that immediately following already translated
   3876  * code.
   3877  */
   3878 static void handleHotChainingCell(CompilationUnit *cUnit,
   3879                                   unsigned int offset)
   3880 {
   3881     newLIR3(cUnit, kMipsLw, r_A0,
   3882         offsetof(Thread, jitToInterpEntries.dvmJitToInterpTraceSelect),
   3883         rSELF);
   3884     newLIR2(cUnit, kMipsJalr, r_RA, r_A0);
   3885     addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
   3886 }
   3887 
   3888 /* Chaining cell for branches that branch back into the same basic block */
   3889 static void handleBackwardBranchChainingCell(CompilationUnit *cUnit,
   3890                                              unsigned int offset)
   3891 {
   3892     /*
   3893      * Use raw instruction constructors to guarantee that the generated
   3894      * instructions fit the predefined cell size.
   3895      */
   3896 #if defined(WITH_SELF_VERIFICATION)
   3897     newLIR3(cUnit, kMipsLw, r_A0,
   3898         offsetof(Thread, jitToInterpEntries.dvmJitToInterpBackwardBranch),
   3899         rSELF);
   3900 #else
   3901     newLIR3(cUnit, kMipsLw, r_A0,
   3902         offsetof(Thread, jitToInterpEntries.dvmJitToInterpNormal),
   3903         rSELF);
   3904 #endif
   3905     newLIR2(cUnit, kMipsJalr, r_RA, r_A0);
   3906     addWordData(cUnit, NULL, (int) (cUnit->method->insns + offset));
   3907 }
   3908 
   3909 /* Chaining cell for monomorphic method invocations. */
   3910 static void handleInvokeSingletonChainingCell(CompilationUnit *cUnit,
   3911                                               const Method *callee)
   3912 {
   3913     newLIR3(cUnit, kMipsLw, r_A0,
   3914         offsetof(Thread, jitToInterpEntries.dvmJitToInterpTraceSelect),
   3915         rSELF);
   3916     newLIR2(cUnit, kMipsJalr, r_RA, r_A0);
   3917     addWordData(cUnit, NULL, (int) (callee->insns));
   3918 }
   3919 
   3920 /* Chaining cell for monomorphic method invocations. */
   3921 static void handleInvokePredictedChainingCell(CompilationUnit *cUnit)
   3922 {
   3923     /* Should not be executed in the initial state */
   3924     addWordData(cUnit, NULL, PREDICTED_CHAIN_BX_PAIR_INIT);
   3925     /* branch delay slot nop */
   3926     addWordData(cUnit, NULL, PREDICTED_CHAIN_DELAY_SLOT_INIT);
   3927     /* To be filled: class */
   3928     addWordData(cUnit, NULL, PREDICTED_CHAIN_CLAZZ_INIT);
   3929     /* To be filled: method */
   3930     addWordData(cUnit, NULL, PREDICTED_CHAIN_METHOD_INIT);
   3931     /*
   3932      * Rechain count. The initial value of 0 here will trigger chaining upon
   3933      * the first invocation of this callsite.
   3934      */
   3935     addWordData(cUnit, NULL, PREDICTED_CHAIN_COUNTER_INIT);
   3936 }
   3937 
   3938 /* Load the Dalvik PC into a0 and jump to the specified target */
   3939 static void handlePCReconstruction(CompilationUnit *cUnit,
   3940                                    MipsLIR *targetLabel)
   3941 {
   3942     MipsLIR **pcrLabel =
   3943         (MipsLIR **) cUnit->pcReconstructionList.elemList;
   3944     int numElems = cUnit->pcReconstructionList.numUsed;
   3945     int i;
   3946 
   3947     /*
   3948      * We should never reach here through fall-through code, so insert
   3949      * a bomb to signal troubles immediately.
   3950      */
   3951     if (numElems) {
   3952         newLIR0(cUnit, kMipsUndefined);
   3953     }
   3954 
   3955     for (i = 0; i < numElems; i++) {
   3956         dvmCompilerAppendLIR(cUnit, (LIR *) pcrLabel[i]);
   3957         /* a0 = dalvik PC */
   3958         loadConstant(cUnit, r_A0, pcrLabel[i]->operands[0]);
   3959         genUnconditionalBranch(cUnit, targetLabel);
   3960     }
   3961 }
   3962 
   3963 static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
   3964     "kMirOpPhi",
   3965     "kMirOpNullNRangeUpCheck",
   3966     "kMirOpNullNRangeDownCheck",
   3967     "kMirOpLowerBound",
   3968     "kMirOpPunt",
   3969     "kMirOpCheckInlinePrediction",
   3970 };
   3971 
   3972 /*
   3973  * vA = arrayReg;
   3974  * vB = idxReg;
   3975  * vC = endConditionReg;
   3976  * arg[0] = maxC
   3977  * arg[1] = minC
   3978  * arg[2] = loopBranchConditionCode
   3979  */
   3980 static void genHoistedChecksForCountUpLoop(CompilationUnit *cUnit, MIR *mir)
   3981 {
   3982     /*
   3983      * NOTE: these synthesized blocks don't have ssa names assigned
   3984      * for Dalvik registers.  However, because they dominate the following
   3985      * blocks we can simply use the Dalvik name w/ subscript 0 as the
   3986      * ssa name.
   3987      */
   3988     DecodedInstruction *dInsn = &mir->dalvikInsn;
   3989     const int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
   3990     const int maxC = dInsn->arg[0];
   3991     int regLength;
   3992     RegLocation rlArray = cUnit->regLocation[mir->dalvikInsn.vA];
   3993     RegLocation rlIdxEnd = cUnit->regLocation[mir->dalvikInsn.vC];
   3994 
   3995     /* regArray <- arrayRef */
   3996     rlArray = loadValue(cUnit, rlArray, kCoreReg);
   3997     rlIdxEnd = loadValue(cUnit, rlIdxEnd, kCoreReg);
   3998     genRegImmCheck(cUnit, kMipsCondEq, rlArray.lowReg, 0, 0,
   3999                    (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
   4000 
   4001     /* regLength <- len(arrayRef) */
   4002     regLength = dvmCompilerAllocTemp(cUnit);
   4003     loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLength);
   4004 
   4005     int delta = maxC;
   4006     /*
   4007      * If the loop end condition is ">=" instead of ">", then the largest value
   4008      * of the index is "endCondition - 1".
   4009      */
   4010     if (dInsn->arg[2] == OP_IF_GE) {
   4011         delta--;
   4012     }
   4013 
   4014     if (delta) {
   4015         int tReg = dvmCompilerAllocTemp(cUnit);
   4016         opRegRegImm(cUnit, kOpAdd, tReg, rlIdxEnd.lowReg, delta);
   4017         rlIdxEnd.lowReg = tReg;
   4018         dvmCompilerFreeTemp(cUnit, tReg);
   4019     }
   4020     /* Punt if "regIdxEnd < len(Array)" is false */
   4021     genRegRegCheck(cUnit, kMipsCondGe, rlIdxEnd.lowReg, regLength, 0,
   4022                    (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
   4023 }
   4024 
   4025 /*
   4026  * vA = arrayReg;
   4027  * vB = idxReg;
   4028  * vC = endConditionReg;
   4029  * arg[0] = maxC
   4030  * arg[1] = minC
   4031  * arg[2] = loopBranchConditionCode
   4032  */
   4033 static void genHoistedChecksForCountDownLoop(CompilationUnit *cUnit, MIR *mir)
   4034 {
   4035     DecodedInstruction *dInsn = &mir->dalvikInsn;
   4036     const int lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
   4037     const int regLength = dvmCompilerAllocTemp(cUnit);
   4038     const int maxC = dInsn->arg[0];
   4039     RegLocation rlArray = cUnit->regLocation[mir->dalvikInsn.vA];
   4040     RegLocation rlIdxInit = cUnit->regLocation[mir->dalvikInsn.vB];
   4041 
   4042     /* regArray <- arrayRef */
   4043     rlArray = loadValue(cUnit, rlArray, kCoreReg);
   4044     rlIdxInit = loadValue(cUnit, rlIdxInit, kCoreReg);
   4045     genRegImmCheck(cUnit, kMipsCondEq, rlArray.lowReg, 0, 0,
   4046                    (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
   4047 
   4048     /* regLength <- len(arrayRef) */
   4049     loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLength);
   4050 
   4051     if (maxC) {
   4052         int tReg = dvmCompilerAllocTemp(cUnit);
   4053         opRegRegImm(cUnit, kOpAdd, tReg, rlIdxInit.lowReg, maxC);
   4054         rlIdxInit.lowReg = tReg;
   4055         dvmCompilerFreeTemp(cUnit, tReg);
   4056     }
   4057 
   4058     /* Punt if "regIdxInit < len(Array)" is false */
   4059     genRegRegCheck(cUnit, kMipsCondGe, rlIdxInit.lowReg, regLength, 0,
   4060                    (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
   4061 }
   4062 
   4063 /*
   4064  * vA = idxReg;
   4065  * vB = minC;
   4066  */
   4067 static void genHoistedLowerBoundCheck(CompilationUnit *cUnit, MIR *mir)
   4068 {
   4069     DecodedInstruction *dInsn = &mir->dalvikInsn;
   4070     const int minC = dInsn->vB;
   4071     RegLocation rlIdx = cUnit->regLocation[mir->dalvikInsn.vA];
   4072 
   4073     /* regIdx <- initial index value */
   4074     rlIdx = loadValue(cUnit, rlIdx, kCoreReg);
   4075 
   4076     /* Punt if "regIdxInit + minC >= 0" is false */
   4077     genRegImmCheck(cUnit, kMipsCondLt, rlIdx.lowReg, -minC, 0,
   4078                    (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
   4079 }
   4080 
   4081 /*
   4082  * vC = this
   4083  *
   4084  * A predicted inlining target looks like the following, where instructions
   4085  * between 0x2f130d24 and 0x2f130d40 are checking if the predicted class
   4086  * matches "this", and the verificaion code is generated by this routine.
   4087  *
   4088  * (C) means the instruction is inlined from the callee, and (PI) means the
   4089  * instruction is the predicted inlined invoke, whose corresponding
   4090  * instructions are still generated to handle the mispredicted case.
   4091  *
   4092  * D/dalvikvm( 2377): -------- kMirOpCheckInlinePrediction
   4093  * D/dalvikvm( 2377): 0x2f130d24 (0020):  lw       v0,16(s1)
   4094  * D/dalvikvm( 2377): 0x2f130d28 (0024):  lui      v1,0x0011(17)
   4095  * D/dalvikvm( 2377): 0x2f130d2c (0028):  ori      v1,v1,0x11e418(1172504)
   4096  * D/dalvikvm( 2377): 0x2f130d30 (002c):  beqz     v0,0x2f130df0 (L0x11f1f0)
   4097  * D/dalvikvm( 2377): 0x2f130d34 (0030):  pref     0,0(v0)
   4098  * D/dalvikvm( 2377): 0x2f130d38 (0034):  lw       a0,0(v0)
   4099  * D/dalvikvm( 2377): 0x2f130d3c (0038):  bne      v1,a0,0x2f130d54 (L0x11f518)
   4100  * D/dalvikvm( 2377): 0x2f130d40 (003c):  pref     0,8(v0)
   4101  * D/dalvikvm( 2377): -------- dalvik offset: 0x000a @ +iget-object-quick (C) v3, v4, (#8)
   4102  * D/dalvikvm( 2377): 0x2f130d44 (0040):  lw       a1,8(v0)
   4103  * D/dalvikvm( 2377): -------- dalvik offset: 0x000a @ +invoke-virtual-quick (PI) v4
   4104  * D/dalvikvm( 2377): 0x2f130d48 (0044):  sw       a1,12(s1)
   4105  * D/dalvikvm( 2377): 0x2f130d4c (0048):  b        0x2f130e18 (L0x120150)
   4106  * D/dalvikvm( 2377): 0x2f130d50 (004c):  lw       a0,116(s2)
   4107  * D/dalvikvm( 2377): L0x11f518:
   4108  * D/dalvikvm( 2377): 0x2f130d54 (0050):  lw       a0,16(s1)
   4109  * D/dalvikvm( 2377): 0x2f130d58 (0054):  addiu    s4,s1,0xffffffe8(-24)
   4110  * D/dalvikvm( 2377): 0x2f130d5c (0058):  beqz     a0,0x2f130e00 (L0x11f618)
   4111  * D/dalvikvm( 2377): 0x2f130d60 (005c):  pref     1,0(s4)
   4112  * D/dalvikvm( 2377): -------- BARRIER
   4113  * D/dalvikvm( 2377): 0x2f130d64 (0060):  sw       a0,0(s4)
   4114  * D/dalvikvm( 2377): 0x2f130d68 (0064):  addiu    s4,s4,0x0004(4)
   4115  * D/dalvikvm( 2377): -------- BARRIER
   4116  * D/dalvikvm( 2377): 0x2f130d6c (0068):  lui      s0,0x2d22(11554)
   4117  * D/dalvikvm( 2377): 0x2f130d70 (006c):  ori      s0,s0,0x2d228464(757236836)
   4118  * D/dalvikvm( 2377): 0x2f130d74 (0070):  lahi/lui a1,0x2f13(12051)
   4119  * D/dalvikvm( 2377): 0x2f130d78 (0074):  lalo/ori a1,a1,0x2f130ddc(789777884)
   4120  * D/dalvikvm( 2377): 0x2f130d7c (0078):  lahi/lui a2,0x2f13(12051)
   4121  * D/dalvikvm( 2377): 0x2f130d80 (007c):  lalo/ori a2,a2,0x2f130e24(789777956)
   4122  * D/dalvikvm( 2377): 0x2f130d84 (0080):  jal      0x2f12d1ec(789762540)
   4123  * D/dalvikvm( 2377): 0x2f130d88 (0084):  nop
   4124  * D/dalvikvm( 2377): 0x2f130d8c (0088):  b        0x2f130e24 (L0x11ed6c)
   4125  * D/dalvikvm( 2377): 0x2f130d90 (008c):  nop
   4126  * D/dalvikvm( 2377): 0x2f130d94 (0090):  b        0x2f130e04 (L0x11ffd0)
   4127  * D/dalvikvm( 2377): 0x2f130d98 (0094):  lui      a0,0x2d22(11554)
   4128  * D/dalvikvm( 2377): 0x2f130d9c (0098):  lw       a0,44(s4)
   4129  * D/dalvikvm( 2377): 0x2f130da0 (009c):  bgtz     a1,0x2f130dc4 (L0x11fb98)
   4130  * D/dalvikvm( 2377): 0x2f130da4 (00a0):  nop
   4131  * D/dalvikvm( 2377): 0x2f130da8 (00a4):  lui      t9,0x2aba(10938)
   4132  * D/dalvikvm( 2377): 0x2f130dac (00a8):  ori      t9,t9,0x2abae3f8(716891128)
   4133  * D/dalvikvm( 2377): 0x2f130db0 (00ac):  move     a1,s2
   4134  * D/dalvikvm( 2377): 0x2f130db4 (00b0):  jalr     ra,t9
   4135  * D/dalvikvm( 2377): 0x2f130db8 (00b4):  nop
   4136  * D/dalvikvm( 2377): 0x2f130dbc (00b8):  lw       gp,84(sp)
   4137  * D/dalvikvm( 2377): 0x2f130dc0 (00bc):  move     a0,v0
   4138  * D/dalvikvm( 2377): 0x2f130dc4 (00c0):  lahi/lui a1,0x2f13(12051)
   4139  * D/dalvikvm( 2377): 0x2f130dc8 (00c4):  lalo/ori a1,a1,0x2f130ddc(789777884)
   4140  * D/dalvikvm( 2377): 0x2f130dcc (00c8):  jal      0x2f12d0c4(789762244)
   4141  * D/dalvikvm( 2377): 0x2f130dd0 (00cc):  nop
   4142  * D/dalvikvm( 2377): 0x2f130dd4 (00d0):  b        0x2f130e04 (L0x11ffd0)
   4143  * D/dalvikvm( 2377): 0x2f130dd8 (00d4):  lui      a0,0x2d22(11554)
   4144  * D/dalvikvm( 2377): 0x2f130ddc (00d8): .align4
   4145  * D/dalvikvm( 2377): L0x11ed2c:
   4146  * D/dalvikvm( 2377): -------- dalvik offset: 0x000d @ move-result-object (PI) v3, (#0), (#0)
   4147  * D/dalvikvm( 2377): 0x2f130ddc (00d8):  lw       a2,16(s2)
   4148  * D/dalvikvm( 2377): 0x2f130de0 (00dc):  sw       a2,12(s1)
   4149  * D/dalvikvm( 2377): 0x2f130de4 (00e0):  b        0x2f130e18 (L0x120150)
   4150  * D/dalvikvm( 2377): 0x2f130de8 (00e4):  lw       a0,116(s2)
   4151  * D/dalvikvm( 2377): 0x2f130dec (00e8):  undefined
   4152  * D/dalvikvm( 2377): L0x11f1f0:
   4153  * D/dalvikvm( 2377): -------- reconstruct dalvik PC : 0x2d228464 @ +0x000a
   4154  * D/dalvikvm( 2377): 0x2f130df0 (00ec):  lui      a0,0x2d22(11554)
   4155  * D/dalvikvm( 2377): 0x2f130df4 (00f0):  ori      a0,a0,0x2d228464(757236836)
   4156  * D/dalvikvm( 2377): 0x2f130df8 (00f4):  b        0x2f130e0c (L0x120090)
   4157  * D/dalvikvm( 2377): 0x2f130dfc (00f8):  lw       a1,108(s2)
   4158  * D/dalvikvm( 2377): L0x11f618:
   4159  * D/dalvikvm( 2377): -------- reconstruct dalvik PC : 0x2d228464 @ +0x000a
   4160  * D/dalvikvm( 2377): 0x2f130e00 (00fc):  lui      a0,0x2d22(11554)
   4161  * D/dalvikvm( 2377): 0x2f130e04 (0100):  ori      a0,a0,0x2d228464(757236836)
   4162  * D/dalvikvm( 2377): Exception_Handling:
   4163  * D/dalvikvm( 2377): 0x2f130e08 (0104):  lw       a1,108(s2)
   4164  * D/dalvikvm( 2377): 0x2f130e0c (0108):  jalr     ra,a1
   4165  * D/dalvikvm( 2377): 0x2f130e10 (010c):  nop
   4166  * D/dalvikvm( 2377): 0x2f130e14 (0110): .align4
   4167  * D/dalvikvm( 2377): L0x11edac:
   4168  * D/dalvikvm( 2377): -------- chaining cell (hot): 0x000e
   4169  * D/dalvikvm( 2377): 0x2f130e14 (0110):  lw       a0,116(s2)
   4170  * D/dalvikvm( 2377): 0x2f130e18 (0114):  jalr     ra,a0
   4171  * D/dalvikvm( 2377): 0x2f130e1c (0118):  nop
   4172  * D/dalvikvm( 2377): 0x2f130e20 (011c):  data     0x2d22846c(757236844)
   4173  * D/dalvikvm( 2377): 0x2f130e24 (0120): .align4
   4174  * D/dalvikvm( 2377): L0x11ed6c:
   4175  * D/dalvikvm( 2377): -------- chaining cell (predicted)
   4176  * D/dalvikvm( 2377): 0x2f130e24 (0120):  data     0xe7fe(59390)
   4177  * D/dalvikvm( 2377): 0x2f130e28 (0124):  data     0x0000(0)
   4178  * D/dalvikvm( 2377): 0x2f130e2c (0128):  data     0x0000(0)
   4179  * D/dalvikvm( 2377): 0x2f130e30 (012c):  data     0x0000(0)
   4180  * D/dalvikvm( 2377): 0x2f130e34 (0130):  data     0x0000(0)
   4181  */
   4182 static void genValidationForPredictedInline(CompilationUnit *cUnit, MIR *mir)
   4183 {
   4184     CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo;
   4185     RegLocation rlThis = cUnit->regLocation[mir->dalvikInsn.vC];
   4186 
   4187     rlThis = loadValue(cUnit, rlThis, kCoreReg);
   4188     int regPredictedClass = dvmCompilerAllocTemp(cUnit);
   4189     loadClassPointer(cUnit, regPredictedClass, (int) callsiteInfo);
   4190     genNullCheck(cUnit, rlThis.sRegLow, rlThis.lowReg, mir->offset,
   4191                  NULL);/* null object? */
   4192     int regActualClass = dvmCompilerAllocTemp(cUnit);
   4193     loadWordDisp(cUnit, rlThis.lowReg, offsetof(Object, clazz), regActualClass);
   4194 //    opRegReg(cUnit, kOpCmp, regPredictedClass, regActualClass);
   4195     /*
   4196      * Set the misPredBranchOver target so that it will be generated when the
   4197      * code for the non-optimized invoke is generated.
   4198      */
   4199     callsiteInfo->misPredBranchOver = (LIR *) opCompareBranch(cUnit, kMipsBne, regPredictedClass, regActualClass);
   4200 }
   4201 
   4202 /* Extended MIR instructions like PHI */
   4203 static void handleExtendedMIR(CompilationUnit *cUnit, MIR *mir)
   4204 {
   4205     int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
   4206     char *msg = (char *)dvmCompilerNew(strlen(extendedMIROpNames[opOffset]) + 1,
   4207                                        false);
   4208     strcpy(msg, extendedMIROpNames[opOffset]);
   4209     newLIR1(cUnit, kMipsPseudoExtended, (int) msg);
   4210 
   4211     switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
   4212         case kMirOpPhi: {
   4213             char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep);
   4214             newLIR1(cUnit, kMipsPseudoSSARep, (int) ssaString);
   4215             break;
   4216         }
   4217         case kMirOpNullNRangeUpCheck: {
   4218             genHoistedChecksForCountUpLoop(cUnit, mir);
   4219             break;
   4220         }
   4221         case kMirOpNullNRangeDownCheck: {
   4222             genHoistedChecksForCountDownLoop(cUnit, mir);
   4223             break;
   4224         }
   4225         case kMirOpLowerBound: {
   4226             genHoistedLowerBoundCheck(cUnit, mir);
   4227             break;
   4228         }
   4229         case kMirOpPunt: {
   4230             genUnconditionalBranch(cUnit,
   4231                                    (MipsLIR *) cUnit->loopAnalysis->branchToPCR);
   4232             break;
   4233         }
   4234         case kMirOpCheckInlinePrediction: {
   4235             genValidationForPredictedInline(cUnit, mir);
   4236             break;
   4237         }
   4238         default:
   4239             break;
   4240     }
   4241 }
   4242 
   4243 /*
   4244  * Create a PC-reconstruction cell for the starting offset of this trace.
   4245  * Since the PCR cell is placed near the end of the compiled code which is
   4246  * usually out of range for a conditional branch, we put two branches (one
   4247  * branch over to the loop body and one layover branch to the actual PCR) at the
   4248  * end of the entry block.
   4249  */
   4250 static void setupLoopEntryBlock(CompilationUnit *cUnit, BasicBlock *entry,
   4251                                 MipsLIR *bodyLabel)
   4252 {
   4253     /* Set up the place holder to reconstruct this Dalvik PC */
   4254     MipsLIR *pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
   4255     pcrLabel->opcode = kMipsPseudoPCReconstructionCell;
   4256     pcrLabel->operands[0] =
   4257         (int) (cUnit->method->insns + entry->startOffset);
   4258     pcrLabel->operands[1] = entry->startOffset;
   4259     /* Insert the place holder to the growable list */
   4260     dvmInsertGrowableList(&cUnit->pcReconstructionList, (intptr_t) pcrLabel);
   4261 
   4262     /*
   4263      * Next, create two branches - one branch over to the loop body and the
   4264      * other branch to the PCR cell to punt.
   4265      */
   4266     MipsLIR *branchToBody = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
   4267     branchToBody->opcode = kMipsB;
   4268     branchToBody->generic.target = (LIR *) bodyLabel;
   4269     setupResourceMasks(branchToBody);
   4270     cUnit->loopAnalysis->branchToBody = (LIR *) branchToBody;
   4271 
   4272     MipsLIR *branchToPCR = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true);
   4273     branchToPCR->opcode = kMipsB;
   4274     branchToPCR->generic.target = (LIR *) pcrLabel;
   4275     setupResourceMasks(branchToPCR);
   4276     cUnit->loopAnalysis->branchToPCR = (LIR *) branchToPCR;
   4277 }
   4278 
   4279 #if defined(WITH_SELF_VERIFICATION)
   4280 static bool selfVerificationPuntOps(MIR *mir)
   4281 {
   4282 assert(0); /* MIPSTODO port selfVerificationPuntOps() */
   4283     DecodedInstruction *decInsn = &mir->dalvikInsn;
   4284 
   4285     /*
   4286      * All opcodes that can throw exceptions and use the
   4287      * TEMPLATE_THROW_EXCEPTION_COMMON template should be excluded in the trace
   4288      * under self-verification mode.
   4289      */
   4290     switch (decInsn->opcode) {
   4291         case OP_MONITOR_ENTER:
   4292         case OP_MONITOR_EXIT:
   4293         case OP_NEW_INSTANCE:
   4294         case OP_NEW_ARRAY:
   4295         case OP_CHECK_CAST:
   4296         case OP_MOVE_EXCEPTION:
   4297         case OP_FILL_ARRAY_DATA:
   4298         case OP_EXECUTE_INLINE:
   4299         case OP_EXECUTE_INLINE_RANGE:
   4300             return true;
   4301         default:
   4302             return false;
   4303     }
   4304 }
   4305 #endif
   4306 
   4307 void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
   4308 {
   4309     /* Used to hold the labels of each block */
   4310     MipsLIR *labelList =
   4311         (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR) * cUnit->numBlocks, true);
   4312     MipsLIR *headLIR = NULL;
   4313     GrowableList chainingListByType[kChainingCellGap];
   4314     int i;
   4315 
   4316     /*
   4317      * Initialize various types chaining lists.
   4318      */
   4319     for (i = 0; i < kChainingCellGap; i++) {
   4320         dvmInitGrowableList(&chainingListByType[i], 2);
   4321     }
   4322 
   4323     /* Clear the visited flag for each block */
   4324     dvmCompilerDataFlowAnalysisDispatcher(cUnit, dvmCompilerClearVisitedFlag,
   4325                                           kAllNodes, false /* isIterative */);
   4326 
   4327     GrowableListIterator iterator;
   4328     dvmGrowableListIteratorInit(&cUnit->blockList, &iterator);
   4329 
   4330     /* Traces start with a profiling entry point.  Generate it here */
   4331     cUnit->profileCodeSize = genTraceProfileEntry(cUnit);
   4332 
   4333     /* Handle the content in each basic block */
   4334     for (i = 0; ; i++) {
   4335         MIR *mir;
   4336         BasicBlock *bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
   4337         if (bb == NULL) break;
   4338         if (bb->visited == true) continue;
   4339 
   4340         labelList[i].operands[0] = bb->startOffset;
   4341 
   4342         if (bb->blockType >= kChainingCellGap) {
   4343             if (bb->isFallThroughFromInvoke == true) {
   4344                 /* Align this block first since it is a return chaining cell */
   4345                 newLIR0(cUnit, kMipsPseudoPseudoAlign4);
   4346             }
   4347             /*
   4348              * Append the label pseudo LIR first. Chaining cells will be handled
   4349              * separately afterwards.
   4350              */
   4351             dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[i]);
   4352         }
   4353 
   4354         if (bb->blockType == kEntryBlock) {
   4355             labelList[i].opcode = kMipsPseudoEntryBlock;
   4356             if (bb->firstMIRInsn == NULL) {
   4357                 continue;
   4358             } else {
   4359               setupLoopEntryBlock(cUnit, bb,
   4360                                   &labelList[bb->fallThrough->id]);
   4361             }
   4362         } else if (bb->blockType == kExitBlock) {
   4363             labelList[i].opcode = kMipsPseudoExitBlock;
   4364             goto gen_fallthrough;
   4365         } else if (bb->blockType == kDalvikByteCode) {
   4366             if (bb->hidden == true) continue;
   4367             labelList[i].opcode = kMipsPseudoNormalBlockLabel;
   4368             /* Reset the register state */
   4369             dvmCompilerResetRegPool(cUnit);
   4370             dvmCompilerClobberAllRegs(cUnit);
   4371             dvmCompilerResetNullCheck(cUnit);
   4372         } else {
   4373             switch (bb->blockType) {
   4374                 case kChainingCellNormal:
   4375                     labelList[i].opcode = kMipsPseudoChainingCellNormal;
   4376                     /* handle the codegen later */
   4377                     dvmInsertGrowableList(
   4378                         &chainingListByType[kChainingCellNormal], i);
   4379                     break;
   4380                 case kChainingCellInvokeSingleton:
   4381                     labelList[i].opcode =
   4382                         kMipsPseudoChainingCellInvokeSingleton;
   4383                     labelList[i].operands[0] =
   4384                         (int) bb->containingMethod;
   4385                     /* handle the codegen later */
   4386                     dvmInsertGrowableList(
   4387                         &chainingListByType[kChainingCellInvokeSingleton], i);
   4388                     break;
   4389                 case kChainingCellInvokePredicted:
   4390                     labelList[i].opcode =
   4391                         kMipsPseudoChainingCellInvokePredicted;
   4392                     /*
   4393                      * Move the cached method pointer from operand 1 to 0.
   4394                      * Operand 0 was clobbered earlier in this routine to store
   4395                      * the block starting offset, which is not applicable to
   4396                      * predicted chaining cell.
   4397                      */
   4398                     labelList[i].operands[0] = labelList[i].operands[1];
   4399                     /* handle the codegen later */
   4400                     dvmInsertGrowableList(
   4401                         &chainingListByType[kChainingCellInvokePredicted], i);
   4402                     break;
   4403                 case kChainingCellHot:
   4404                     labelList[i].opcode =
   4405                         kMipsPseudoChainingCellHot;
   4406                     /* handle the codegen later */
   4407                     dvmInsertGrowableList(
   4408                         &chainingListByType[kChainingCellHot], i);
   4409                     break;
   4410                 case kPCReconstruction:
   4411                     /* Make sure exception handling block is next */
   4412                     labelList[i].opcode =
   4413                         kMipsPseudoPCReconstructionBlockLabel;
   4414                     handlePCReconstruction(cUnit,
   4415                                            &labelList[cUnit->puntBlock->id]);
   4416                     break;
   4417                 case kExceptionHandling:
   4418                     labelList[i].opcode = kMipsPseudoEHBlockLabel;
   4419                     if (cUnit->pcReconstructionList.numUsed) {
   4420                         loadWordDisp(cUnit, rSELF, offsetof(Thread,
   4421                                      jitToInterpEntries.dvmJitToInterpPunt),
   4422                                      r_A1);
   4423                         opReg(cUnit, kOpBlx, r_A1);
   4424                     }
   4425                     break;
   4426                 case kChainingCellBackwardBranch:
   4427                     labelList[i].opcode =
   4428                         kMipsPseudoChainingCellBackwardBranch;
   4429                     /* handle the codegen later */
   4430                     dvmInsertGrowableList(
   4431                         &chainingListByType[kChainingCellBackwardBranch],
   4432                         i);
   4433                     break;
   4434                 default:
   4435                     break;
   4436             }
   4437             continue;
   4438         }
   4439 
   4440         /*
   4441          * Try to build a longer optimization unit. Currently if the previous
   4442          * block ends with a goto, we continue adding instructions and don't
   4443          * reset the register allocation pool.
   4444          */
   4445         for (BasicBlock *nextBB = bb; nextBB != NULL; nextBB = cUnit->nextCodegenBlock) {
   4446             bb = nextBB;
   4447             bb->visited = true;
   4448             cUnit->nextCodegenBlock = NULL;
   4449 
   4450             for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
   4451 
   4452                 dvmCompilerResetRegPool(cUnit);
   4453                 if (gDvmJit.disableOpt & (1 << kTrackLiveTemps)) {
   4454                     dvmCompilerClobberAllRegs(cUnit);
   4455                 }
   4456 
   4457                 if (gDvmJit.disableOpt & (1 << kSuppressLoads)) {
   4458                     dvmCompilerResetDefTracking(cUnit);
   4459                 }
   4460 
   4461                 if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
   4462                     handleExtendedMIR(cUnit, mir);
   4463                     continue;
   4464                 }
   4465 
   4466                 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
   4467                 InstructionFormat dalvikFormat =
   4468                     dexGetFormatFromOpcode(dalvikOpcode);
   4469                 const char *note;
   4470                 if (mir->OptimizationFlags & MIR_INLINED) {
   4471                     note = " (I)";
   4472                 } else if (mir->OptimizationFlags & MIR_INLINED_PRED) {
   4473                     note = " (PI)";
   4474                 } else if (mir->OptimizationFlags & MIR_CALLEE) {
   4475                     note = " (C)";
   4476                 } else {
   4477                     note = NULL;
   4478                 }
   4479 
   4480                 MipsLIR *boundaryLIR =
   4481                     newLIR2(cUnit, kMipsPseudoDalvikByteCodeBoundary,
   4482                             mir->offset,
   4483                             (int) dvmCompilerGetDalvikDisassembly(&mir->dalvikInsn,
   4484                                                                   note));
   4485                 if (mir->ssaRep) {
   4486                     char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep);
   4487                     newLIR1(cUnit, kMipsPseudoSSARep, (int) ssaString);
   4488                 }
   4489 
   4490                 /* Remember the first LIR for this block */
   4491                 if (headLIR == NULL) {
   4492                     headLIR = boundaryLIR;
   4493                     /* Set the first boundaryLIR as a scheduling barrier */
   4494                     headLIR->defMask = ENCODE_ALL;
   4495                 }
   4496 
   4497                 bool notHandled;
   4498                 /*
   4499                  * Debugging: screen the opcode first to see if it is in the
   4500                  * do[-not]-compile list
   4501                  */
   4502                 bool singleStepMe = SINGLE_STEP_OP(dalvikOpcode);
   4503 #if defined(WITH_SELF_VERIFICATION)
   4504               if (singleStepMe == false) {
   4505                   singleStepMe = selfVerificationPuntOps(mir);
   4506               }
   4507 #endif
   4508                 if (singleStepMe || cUnit->allSingleStep) {
   4509                     notHandled = false;
   4510                     genInterpSingleStep(cUnit, mir);
   4511                 } else {
   4512                     opcodeCoverage[dalvikOpcode]++;
   4513                     switch (dalvikFormat) {
   4514                         case kFmt10t:
   4515                         case kFmt20t:
   4516                         case kFmt30t:
   4517                             notHandled = handleFmt10t_Fmt20t_Fmt30t(cUnit,
   4518                                       mir, bb, labelList);
   4519                             break;
   4520                         case kFmt10x:
   4521                             notHandled = handleFmt10x(cUnit, mir);
   4522                             break;
   4523                         case kFmt11n:
   4524                         case kFmt31i:
   4525                             notHandled = handleFmt11n_Fmt31i(cUnit, mir);
   4526                             break;
   4527                         case kFmt11x:
   4528                             notHandled = handleFmt11x(cUnit, mir);
   4529                             break;
   4530                         case kFmt12x:
   4531                             notHandled = handleFmt12x(cUnit, mir);
   4532                             break;
   4533                         case kFmt20bc:
   4534                             notHandled = handleFmt20bc(cUnit, mir);
   4535                             break;
   4536                         case kFmt21c:
   4537                         case kFmt31c:
   4538                             notHandled = handleFmt21c_Fmt31c(cUnit, mir);
   4539                             break;
   4540                         case kFmt21h:
   4541                             notHandled = handleFmt21h(cUnit, mir);
   4542                             break;
   4543                         case kFmt21s:
   4544                             notHandled = handleFmt21s(cUnit, mir);
   4545                             break;
   4546                         case kFmt21t:
   4547                             notHandled = handleFmt21t(cUnit, mir, bb,
   4548                                                       labelList);
   4549                             break;
   4550                         case kFmt22b:
   4551                         case kFmt22s:
   4552                             notHandled = handleFmt22b_Fmt22s(cUnit, mir);
   4553                             break;
   4554                         case kFmt22c:
   4555                             notHandled = handleFmt22c(cUnit, mir);
   4556                             break;
   4557                         case kFmt22cs:
   4558                             notHandled = handleFmt22cs(cUnit, mir);
   4559                             break;
   4560                         case kFmt22t:
   4561                             notHandled = handleFmt22t(cUnit, mir, bb,
   4562                                                       labelList);
   4563                             break;
   4564                         case kFmt22x:
   4565                         case kFmt32x:
   4566                             notHandled = handleFmt22x_Fmt32x(cUnit, mir);
   4567                             break;
   4568                         case kFmt23x:
   4569                             notHandled = handleFmt23x(cUnit, mir);
   4570                             break;
   4571                         case kFmt31t:
   4572                             notHandled = handleFmt31t(cUnit, mir);
   4573                             break;
   4574                         case kFmt3rc:
   4575                         case kFmt35c:
   4576                             notHandled = handleFmt35c_3rc(cUnit, mir, bb,
   4577                                                           labelList);
   4578                             break;
   4579                         case kFmt3rms:
   4580                         case kFmt35ms:
   4581                             notHandled = handleFmt35ms_3rms(cUnit, mir,bb,
   4582                                                             labelList);
   4583                             break;
   4584                         case kFmt35mi:
   4585                         case kFmt3rmi:
   4586                             notHandled = handleExecuteInline(cUnit, mir);
   4587                             break;
   4588                         case kFmt51l:
   4589                             notHandled = handleFmt51l(cUnit, mir);
   4590                             break;
   4591                         default:
   4592                             notHandled = true;
   4593                             break;
   4594                     }
   4595                 }
   4596                 if (notHandled) {
   4597                     ALOGE("%#06x: Opcode %#x (%s) / Fmt %d not handled",
   4598                          mir->offset,
   4599                          dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
   4600                          dalvikFormat);
   4601                     dvmCompilerAbort(cUnit);
   4602                     break;
   4603                 }
   4604             }
   4605         }
   4606 
   4607         if (bb->blockType == kEntryBlock) {
   4608             dvmCompilerAppendLIR(cUnit,
   4609                                  (LIR *) cUnit->loopAnalysis->branchToBody);
   4610             dvmCompilerAppendLIR(cUnit,
   4611                                  (LIR *) cUnit->loopAnalysis->branchToPCR);
   4612         }
   4613 
   4614         if (headLIR) {
   4615             /*
   4616              * Eliminate redundant loads/stores and delay stores into later
   4617              * slots
   4618              */
   4619             dvmCompilerApplyLocalOptimizations(cUnit, (LIR *) headLIR,
   4620                                                cUnit->lastLIRInsn);
   4621             /* Reset headLIR which is also the optimization boundary */
   4622             headLIR = NULL;
   4623         }
   4624 
   4625 gen_fallthrough:
   4626         /*
   4627          * Check if the block is terminated due to trace length constraint -
   4628          * insert an unconditional branch to the chaining cell.
   4629          */
   4630         if (bb->needFallThroughBranch) {
   4631             genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
   4632         }
   4633     }
   4634 
   4635     /* Handle the chaining cells in predefined order */
   4636     for (i = 0; i < kChainingCellGap; i++) {
   4637         size_t j;
   4638         int *blockIdList = (int *) chainingListByType[i].elemList;
   4639 
   4640         cUnit->numChainingCells[i] = chainingListByType[i].numUsed;
   4641 
   4642         /* No chaining cells of this type */
   4643         if (cUnit->numChainingCells[i] == 0)
   4644             continue;
   4645 
   4646         /* Record the first LIR for a new type of chaining cell */
   4647         cUnit->firstChainingLIR[i] = (LIR *) &labelList[blockIdList[0]];
   4648 
   4649         for (j = 0; j < chainingListByType[i].numUsed; j++) {
   4650             int blockId = blockIdList[j];
   4651             BasicBlock *chainingBlock =
   4652                 (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList,
   4653                                                          blockId);
   4654 
   4655             /* Align this chaining cell first */
   4656             newLIR0(cUnit, kMipsPseudoPseudoAlign4);
   4657 
   4658             /* Insert the pseudo chaining instruction */
   4659             dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[blockId]);
   4660 
   4661 
   4662             switch (chainingBlock->blockType) {
   4663                 case kChainingCellNormal:
   4664                     handleNormalChainingCell(cUnit, chainingBlock->startOffset);
   4665                     break;
   4666                 case kChainingCellInvokeSingleton:
   4667                     handleInvokeSingletonChainingCell(cUnit,
   4668                         chainingBlock->containingMethod);
   4669                     break;
   4670                 case kChainingCellInvokePredicted:
   4671                     handleInvokePredictedChainingCell(cUnit);
   4672                     break;
   4673                 case kChainingCellHot:
   4674                     handleHotChainingCell(cUnit, chainingBlock->startOffset);
   4675                     break;
   4676                 case kChainingCellBackwardBranch:
   4677                     handleBackwardBranchChainingCell(cUnit,
   4678                         chainingBlock->startOffset);
   4679                     break;
   4680                 default:
   4681                     ALOGE("Bad blocktype %d", chainingBlock->blockType);
   4682                     dvmCompilerAbort(cUnit);
   4683             }
   4684         }
   4685     }
   4686 
   4687     /* Mark the bottom of chaining cells */
   4688     cUnit->chainingCellBottom = (LIR *) newLIR0(cUnit, kMipsChainingCellBottom);
   4689 
   4690     /*
   4691      * Generate the branch to the dvmJitToInterpNoChain entry point at the end
   4692      * of all chaining cells for the overflow cases.
   4693      */
   4694     if (cUnit->switchOverflowPad) {
   4695         loadConstant(cUnit, r_A0, (int) cUnit->switchOverflowPad);
   4696         loadWordDisp(cUnit, rSELF, offsetof(Thread,
   4697                      jitToInterpEntries.dvmJitToInterpNoChain), r_A2);
   4698         opRegReg(cUnit, kOpAdd, r_A1, r_A1);
   4699         opRegRegReg(cUnit, kOpAdd, r4PC, r_A0, r_A1);
   4700 #if defined(WITH_JIT_TUNING)
   4701         loadConstant(cUnit, r_A0, kSwitchOverflow);
   4702 #endif
   4703         opReg(cUnit, kOpBlx, r_A2);
   4704     }
   4705 
   4706     dvmCompilerApplyGlobalOptimizations(cUnit);
   4707 
   4708 #if defined(WITH_SELF_VERIFICATION)
   4709     selfVerificationBranchInsertPass(cUnit);
   4710 #endif
   4711 }
   4712 
   4713 /*
   4714  * Accept the work and start compiling.  Returns true if compilation
   4715  * is attempted.
   4716  */
   4717 bool dvmCompilerDoWork(CompilerWorkOrder *work)
   4718 {
   4719     JitTraceDescription *desc;
   4720     bool isCompile;
   4721     bool success = true;
   4722 
   4723     if (gDvmJit.codeCacheFull) {
   4724         return false;
   4725     }
   4726 
   4727     switch (work->kind) {
   4728         case kWorkOrderTrace:
   4729             isCompile = true;
   4730             /* Start compilation with maximally allowed trace length */
   4731             desc = (JitTraceDescription *)work->info;
   4732             success = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
   4733                                         work->bailPtr, 0 /* no hints */);
   4734             break;
   4735         case kWorkOrderTraceDebug: {
   4736             bool oldPrintMe = gDvmJit.printMe;
   4737             gDvmJit.printMe = true;
   4738             isCompile = true;
   4739             /* Start compilation with maximally allowed trace length */
   4740             desc = (JitTraceDescription *)work->info;
   4741             success = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
   4742                                         work->bailPtr, 0 /* no hints */);
   4743             gDvmJit.printMe = oldPrintMe;
   4744             break;
   4745         }
   4746         case kWorkOrderProfileMode:
   4747             dvmJitChangeProfileMode((TraceProfilingModes)(int)work->info);
   4748             isCompile = false;
   4749             break;
   4750         default:
   4751             isCompile = false;
   4752             ALOGE("Jit: unknown work order type");
   4753             assert(0);  // Bail if debug build, discard otherwise
   4754     }
   4755     if (!success)
   4756         work->result.codeAddress = NULL;
   4757     return isCompile;
   4758 }
   4759 
   4760 /* Architectural-specific debugging helpers go here */
   4761 void dvmCompilerArchDump(void)
   4762 {
   4763     /* Print compiled opcode in this VM instance */
   4764     int i, start, streak;
   4765     char buf[1024];
   4766 
   4767     streak = i = 0;
   4768     buf[0] = 0;
   4769     while (opcodeCoverage[i] == 0 && i < 256) {
   4770         i++;
   4771     }
   4772     if (i == 256) {
   4773         return;
   4774     }
   4775     for (start = i++, streak = 1; i < 256; i++) {
   4776         if (opcodeCoverage[i]) {
   4777             streak++;
   4778         } else {
   4779             if (streak == 1) {
   4780                 sprintf(buf+strlen(buf), "%x,", start);
   4781             } else {
   4782                 sprintf(buf+strlen(buf), "%x-%x,", start, start + streak - 1);
   4783             }
   4784             streak = 0;
   4785             while (opcodeCoverage[i] == 0 && i < 256) {
   4786                 i++;
   4787             }
   4788             if (i < 256) {
   4789                 streak = 1;
   4790                 start = i;
   4791             }
   4792         }
   4793     }
   4794     if (streak) {
   4795         if (streak == 1) {
   4796             sprintf(buf+strlen(buf), "%x", start);
   4797         } else {
   4798             sprintf(buf+strlen(buf), "%x-%x", start, start + streak - 1);
   4799         }
   4800     }
   4801     if (strlen(buf)) {
   4802         ALOGD("dalvik.vm.jit.op = %s", buf);
   4803     }
   4804 }
   4805 
   4806 /* Common initialization routine for an architecture family */
   4807 bool dvmCompilerArchInit()
   4808 {
   4809     int i;
   4810 
   4811     for (i = 0; i < kMipsLast; i++) {
   4812         if (EncodingMap[i].opcode != i) {
   4813             ALOGE("Encoding order for %s is wrong: expecting %d, seeing %d",
   4814                  EncodingMap[i].name, i, EncodingMap[i].opcode);
   4815             dvmAbort();  // OK to dvmAbort - build error
   4816         }
   4817     }
   4818 
   4819     return dvmCompilerArchVariantInit();
   4820 }
   4821 
   4822 void *dvmCompilerGetInterpretTemplate()
   4823 {
   4824       return (void*) ((int)gDvmJit.codeCache +
   4825                       templateEntryOffsets[TEMPLATE_INTERPRET]);
   4826 }
   4827 
   4828 JitInstructionSetType dvmCompilerGetInterpretTemplateSet()
   4829 {
   4830     return DALVIK_JIT_MIPS;
   4831 }
   4832 
   4833 /* Needed by the Assembler */
   4834 void dvmCompilerSetupResourceMasks(MipsLIR *lir)
   4835 {
   4836     setupResourceMasks(lir);
   4837 }
   4838 
   4839 /* Needed by the ld/st optmizatons */
   4840 MipsLIR* dvmCompilerRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
   4841 {
   4842     return genRegCopyNoInsert(cUnit, rDest, rSrc);
   4843 }
   4844 
   4845 /* Needed by the register allocator */
   4846 MipsLIR* dvmCompilerRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
   4847 {
   4848     return genRegCopy(cUnit, rDest, rSrc);
   4849 }
   4850 
   4851 /* Needed by the register allocator */
   4852 void dvmCompilerRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
   4853                             int srcLo, int srcHi)
   4854 {
   4855     genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
   4856 }
   4857 
   4858 void dvmCompilerFlushRegImpl(CompilationUnit *cUnit, int rBase,
   4859                              int displacement, int rSrc, OpSize size)
   4860 {
   4861     storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
   4862 }
   4863 
   4864 void dvmCompilerFlushRegWideImpl(CompilationUnit *cUnit, int rBase,
   4865                                  int displacement, int rSrcLo, int rSrcHi)
   4866 {
   4867     storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
   4868 }
   4869