Home | History | Annotate | Download | only in quick
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "dex/compiler_ir.h"
     18 #include "dex/compiler_internals.h"
     19 #include "dex/quick/mir_to_lir-inl.h"
     20 #include "entrypoints/quick/quick_entrypoints.h"
     21 #include "mirror/array.h"
     22 #include "verifier/method_verifier.h"
     23 
     24 namespace art {
     25 
     26 /*
     27  * This source files contains "gen" codegen routines that should
     28  * be applicable to most targets.  Only mid-level support utilities
     29  * and "op" calls may be used here.
     30  */
     31 
     32 /*
     33  * Generate an kPseudoBarrier marker to indicate the boundary of special
     34  * blocks.
     35  */
     36 void Mir2Lir::GenBarrier() {
     37   LIR* barrier = NewLIR0(kPseudoBarrier);
     38   /* Mark all resources as being clobbered */
     39   barrier->def_mask = -1;
     40 }
     41 
     42 // FIXME: need to do some work to split out targets with
     43 // condition codes and those without
     44 LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) {
     45   DCHECK_NE(cu_->instruction_set, kMips);
     46   LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
     47   LIR* branch = OpCondBranch(c_code, tgt);
     48   // Remember branch target - will process later
     49   throw_launchpads_.Insert(tgt);
     50   return branch;
     51 }
     52 
     53 LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) {
     54   LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val);
     55   LIR* branch;
     56   if (c_code == kCondAl) {
     57     branch = OpUnconditionalBranch(tgt);
     58   } else {
     59     branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
     60   }
     61   // Remember branch target - will process later
     62   throw_launchpads_.Insert(tgt);
     63   return branch;
     64 }
     65 
     66 /* Perform null-check on a register.  */
     67 LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) {
     68   if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
     69     opt_flags & MIR_IGNORE_NULL_CHECK) {
     70     return NULL;
     71   }
     72   return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
     73 }
     74 
     75 /* Perform check on two registers */
     76 LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
     77                              ThrowKind kind) {
     78   LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
     79   LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
     80   // Remember branch target - will process later
     81   throw_launchpads_.Insert(tgt);
     82   return branch;
     83 }
     84 
     85 void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
     86                                   RegLocation rl_src2, LIR* taken,
     87                                   LIR* fall_through) {
     88   ConditionCode cond;
     89   switch (opcode) {
     90     case Instruction::IF_EQ:
     91       cond = kCondEq;
     92       break;
     93     case Instruction::IF_NE:
     94       cond = kCondNe;
     95       break;
     96     case Instruction::IF_LT:
     97       cond = kCondLt;
     98       break;
     99     case Instruction::IF_GE:
    100       cond = kCondGe;
    101       break;
    102     case Instruction::IF_GT:
    103       cond = kCondGt;
    104       break;
    105     case Instruction::IF_LE:
    106       cond = kCondLe;
    107       break;
    108     default:
    109       cond = static_cast<ConditionCode>(0);
    110       LOG(FATAL) << "Unexpected opcode " << opcode;
    111   }
    112 
    113   // Normalize such that if either operand is constant, src2 will be constant
    114   if (rl_src1.is_const) {
    115     RegLocation rl_temp = rl_src1;
    116     rl_src1 = rl_src2;
    117     rl_src2 = rl_temp;
    118     cond = FlipComparisonOrder(cond);
    119   }
    120 
    121   rl_src1 = LoadValue(rl_src1, kCoreReg);
    122   // Is this really an immediate comparison?
    123   if (rl_src2.is_const) {
    124     // If it's already live in a register or not easily materialized, just keep going
    125     RegLocation rl_temp = UpdateLoc(rl_src2);
    126     if ((rl_temp.location == kLocDalvikFrame) &&
    127         InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
    128       // OK - convert this to a compare immediate and branch
    129       OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
    130       OpUnconditionalBranch(fall_through);
    131       return;
    132     }
    133   }
    134   rl_src2 = LoadValue(rl_src2, kCoreReg);
    135   OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
    136   OpUnconditionalBranch(fall_through);
    137 }
    138 
    139 void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
    140                                       LIR* fall_through) {
    141   ConditionCode cond;
    142   rl_src = LoadValue(rl_src, kCoreReg);
    143   switch (opcode) {
    144     case Instruction::IF_EQZ:
    145       cond = kCondEq;
    146       break;
    147     case Instruction::IF_NEZ:
    148       cond = kCondNe;
    149       break;
    150     case Instruction::IF_LTZ:
    151       cond = kCondLt;
    152       break;
    153     case Instruction::IF_GEZ:
    154       cond = kCondGe;
    155       break;
    156     case Instruction::IF_GTZ:
    157       cond = kCondGt;
    158       break;
    159     case Instruction::IF_LEZ:
    160       cond = kCondLe;
    161       break;
    162     default:
    163       cond = static_cast<ConditionCode>(0);
    164       LOG(FATAL) << "Unexpected opcode " << opcode;
    165   }
    166   OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
    167   OpUnconditionalBranch(fall_through);
    168 }
    169 
    170 void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
    171   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    172   if (rl_src.location == kLocPhysReg) {
    173     OpRegCopy(rl_result.low_reg, rl_src.low_reg);
    174   } else {
    175     LoadValueDirect(rl_src, rl_result.low_reg);
    176   }
    177   OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
    178   StoreValueWide(rl_dest, rl_result);
    179 }
    180 
    181 void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
    182                               RegLocation rl_src) {
    183   rl_src = LoadValue(rl_src, kCoreReg);
    184   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    185   OpKind op = kOpInvalid;
    186   switch (opcode) {
    187     case Instruction::INT_TO_BYTE:
    188       op = kOp2Byte;
    189       break;
    190     case Instruction::INT_TO_SHORT:
    191        op = kOp2Short;
    192        break;
    193     case Instruction::INT_TO_CHAR:
    194        op = kOp2Char;
    195        break;
    196     default:
    197       LOG(ERROR) << "Bad int conversion type";
    198   }
    199   OpRegReg(op, rl_result.low_reg, rl_src.low_reg);
    200   StoreValue(rl_dest, rl_result);
    201 }
    202 
    203 /*
    204  * Let helper function take care of everything.  Will call
    205  * Array::AllocFromCode(type_idx, method, count);
    206  * Note: AllocFromCode will handle checks for errNegativeArraySize.
    207  */
    208 void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
    209                           RegLocation rl_src) {
    210   FlushAllRegs();  /* Everything to home location */
    211   ThreadOffset func_offset(-1);
    212   if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
    213                                                        type_idx)) {
    214     func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray);
    215   } else {
    216     func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck);
    217   }
    218   CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
    219   RegLocation rl_result = GetReturn(false);
    220   StoreValue(rl_dest, rl_result);
    221 }
    222 
    223 /*
    224  * Similar to GenNewArray, but with post-allocation initialization.
    225  * Verifier guarantees we're dealing with an array class.  Current
    226  * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
    227  * Current code also throws internal unimp if not 'L', '[' or 'I'.
    228  */
    229 void Mir2Lir::GenFilledNewArray(CallInfo* info) {
    230   int elems = info->num_arg_words;
    231   int type_idx = info->index;
    232   FlushAllRegs();  /* Everything to home location */
    233   ThreadOffset func_offset(-1);
    234   if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
    235                                                        type_idx)) {
    236     func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArray);
    237   } else {
    238     func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayWithAccessCheck);
    239   }
    240   CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
    241   FreeTemp(TargetReg(kArg2));
    242   FreeTemp(TargetReg(kArg1));
    243   /*
    244    * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
    245    * return region.  Because AllocFromCode placed the new array
    246    * in kRet0, we'll just lock it into place.  When debugger support is
    247    * added, it may be necessary to additionally copy all return
    248    * values to a home location in thread-local storage
    249    */
    250   LockTemp(TargetReg(kRet0));
    251 
    252   // TODO: use the correct component size, currently all supported types
    253   // share array alignment with ints (see comment at head of function)
    254   size_t component_size = sizeof(int32_t);
    255 
    256   // Having a range of 0 is legal
    257   if (info->is_range && (elems > 0)) {
    258     /*
    259      * Bit of ugliness here.  We're going generate a mem copy loop
    260      * on the register range, but it is possible that some regs
    261      * in the range have been promoted.  This is unlikely, but
    262      * before generating the copy, we'll just force a flush
    263      * of any regs in the source range that have been promoted to
    264      * home location.
    265      */
    266     for (int i = 0; i < elems; i++) {
    267       RegLocation loc = UpdateLoc(info->args[i]);
    268       if (loc.location == kLocPhysReg) {
    269         StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
    270                       loc.low_reg, kWord);
    271       }
    272     }
    273     /*
    274      * TUNING note: generated code here could be much improved, but
    275      * this is an uncommon operation and isn't especially performance
    276      * critical.
    277      */
    278     int r_src = AllocTemp();
    279     int r_dst = AllocTemp();
    280     int r_idx = AllocTemp();
    281     int r_val = INVALID_REG;
    282     switch (cu_->instruction_set) {
    283       case kThumb2:
    284         r_val = TargetReg(kLr);
    285         break;
    286       case kX86:
    287         FreeTemp(TargetReg(kRet0));
    288         r_val = AllocTemp();
    289         break;
    290       case kMips:
    291         r_val = AllocTemp();
    292         break;
    293       default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
    294     }
    295     // Set up source pointer
    296     RegLocation rl_first = info->args[0];
    297     OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low));
    298     // Set up the target pointer
    299     OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0),
    300                 mirror::Array::DataOffset(component_size).Int32Value());
    301     // Set up the loop counter (known to be > 0)
    302     LoadConstant(r_idx, elems - 1);
    303     // Generate the copy loop.  Going backwards for convenience
    304     LIR* target = NewLIR0(kPseudoTargetLabel);
    305     // Copy next element
    306     LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
    307     StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
    308     FreeTemp(r_val);
    309     OpDecAndBranch(kCondGe, r_idx, target);
    310     if (cu_->instruction_set == kX86) {
    311       // Restore the target pointer
    312       OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst,
    313                   -mirror::Array::DataOffset(component_size).Int32Value());
    314     }
    315   } else if (!info->is_range) {
    316     // TUNING: interleave
    317     for (int i = 0; i < elems; i++) {
    318       RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
    319       StoreBaseDisp(TargetReg(kRet0),
    320                     mirror::Array::DataOffset(component_size).Int32Value() +
    321                     i * 4, rl_arg.low_reg, kWord);
    322       // If the LoadValue caused a temp to be allocated, free it
    323       if (IsTemp(rl_arg.low_reg)) {
    324         FreeTemp(rl_arg.low_reg);
    325       }
    326     }
    327   }
    328   if (info->result.location != kLocInvalid) {
    329     StoreValue(info->result, GetReturn(false /* not fp */));
    330   }
    331 }
    332 
    333 void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
    334                       bool is_object) {
    335   int field_offset;
    336   int ssb_index;
    337   bool is_volatile;
    338   bool is_referrers_class;
    339   bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
    340       field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
    341       is_referrers_class, is_volatile, true);
    342   if (fast_path && !SLOW_FIELD_PATH) {
    343     DCHECK_GE(field_offset, 0);
    344     int rBase;
    345     if (is_referrers_class) {
    346       // Fast path, static storage base is this method's class
    347       RegLocation rl_method  = LoadCurrMethod();
    348       rBase = AllocTemp();
    349       LoadWordDisp(rl_method.low_reg,
    350                    mirror::ArtMethod::DeclaringClassOffset().Int32Value(), rBase);
    351       if (IsTemp(rl_method.low_reg)) {
    352         FreeTemp(rl_method.low_reg);
    353       }
    354     } else {
    355       // Medium path, static storage base in a different class which requires checks that the other
    356       // class is initialized.
    357       // TODO: remove initialized check now that we are initializing classes in the compiler driver.
    358       DCHECK_GE(ssb_index, 0);
    359       // May do runtime call so everything to home locations.
    360       FlushAllRegs();
    361       // Using fixed register to sync with possible call to runtime support.
    362       int r_method = TargetReg(kArg1);
    363       LockTemp(r_method);
    364       LoadCurrMethodDirect(r_method);
    365       rBase = TargetReg(kArg0);
    366       LockTemp(rBase);
    367       LoadWordDisp(r_method,
    368                    mirror::ArtMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
    369                    rBase);
    370       LoadWordDisp(rBase,
    371                    mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
    372                    sizeof(int32_t*) * ssb_index, rBase);
    373       // rBase now points at appropriate static storage base (Class*)
    374       // or NULL if not initialized. Check for NULL and call helper if NULL.
    375       // TUNING: fast path should fall through
    376       LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
    377       LoadConstant(TargetReg(kArg0), ssb_index);
    378       CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
    379       if (cu_->instruction_set == kMips) {
    380         // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
    381         OpRegCopy(rBase, TargetReg(kRet0));
    382       }
    383       LIR* skip_target = NewLIR0(kPseudoTargetLabel);
    384       branch_over->target = skip_target;
    385       FreeTemp(r_method);
    386     }
    387     // rBase now holds static storage base
    388     if (is_long_or_double) {
    389       rl_src = LoadValueWide(rl_src, kAnyReg);
    390     } else {
    391       rl_src = LoadValue(rl_src, kAnyReg);
    392     }
    393     if (is_volatile) {
    394       GenMemBarrier(kStoreStore);
    395     }
    396     if (is_long_or_double) {
    397       StoreBaseDispWide(rBase, field_offset, rl_src.low_reg,
    398                         rl_src.high_reg);
    399     } else {
    400       StoreWordDisp(rBase, field_offset, rl_src.low_reg);
    401     }
    402     if (is_volatile) {
    403       GenMemBarrier(kStoreLoad);
    404     }
    405     if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
    406       MarkGCCard(rl_src.low_reg, rBase);
    407     }
    408     FreeTemp(rBase);
    409   } else {
    410     FlushAllRegs();  // Everything to home locations
    411     ThreadOffset setter_offset =
    412         is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
    413                           : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
    414                                        : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
    415     CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
    416   }
    417 }
    418 
    419 void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
    420                       bool is_long_or_double, bool is_object) {
    421   int field_offset;
    422   int ssb_index;
    423   bool is_volatile;
    424   bool is_referrers_class;
    425   bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
    426       field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
    427       is_referrers_class, is_volatile, false);
    428   if (fast_path && !SLOW_FIELD_PATH) {
    429     DCHECK_GE(field_offset, 0);
    430     int rBase;
    431     if (is_referrers_class) {
    432       // Fast path, static storage base is this method's class
    433       RegLocation rl_method  = LoadCurrMethod();
    434       rBase = AllocTemp();
    435       LoadWordDisp(rl_method.low_reg,
    436                    mirror::ArtMethod::DeclaringClassOffset().Int32Value(), rBase);
    437     } else {
    438       // Medium path, static storage base in a different class which requires checks that the other
    439       // class is initialized
    440       // TODO: remove initialized check now that we are initializing classes in the compiler driver.
    441       DCHECK_GE(ssb_index, 0);
    442       // May do runtime call so everything to home locations.
    443       FlushAllRegs();
    444       // Using fixed register to sync with possible call to runtime support.
    445       int r_method = TargetReg(kArg1);
    446       LockTemp(r_method);
    447       LoadCurrMethodDirect(r_method);
    448       rBase = TargetReg(kArg0);
    449       LockTemp(rBase);
    450       LoadWordDisp(r_method,
    451                    mirror::ArtMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
    452                    rBase);
    453       LoadWordDisp(rBase, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
    454                    sizeof(int32_t*) * ssb_index, rBase);
    455       // rBase now points at appropriate static storage base (Class*)
    456       // or NULL if not initialized. Check for NULL and call helper if NULL.
    457       // TUNING: fast path should fall through
    458       LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
    459       CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
    460       if (cu_->instruction_set == kMips) {
    461         // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
    462         OpRegCopy(rBase, TargetReg(kRet0));
    463       }
    464       LIR* skip_target = NewLIR0(kPseudoTargetLabel);
    465       branch_over->target = skip_target;
    466       FreeTemp(r_method);
    467     }
    468     // rBase now holds static storage base
    469     RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
    470     if (is_volatile) {
    471       GenMemBarrier(kLoadLoad);
    472     }
    473     if (is_long_or_double) {
    474       LoadBaseDispWide(rBase, field_offset, rl_result.low_reg,
    475                        rl_result.high_reg, INVALID_SREG);
    476     } else {
    477       LoadWordDisp(rBase, field_offset, rl_result.low_reg);
    478     }
    479     FreeTemp(rBase);
    480     if (is_long_or_double) {
    481       StoreValueWide(rl_dest, rl_result);
    482     } else {
    483       StoreValue(rl_dest, rl_result);
    484     }
    485   } else {
    486     FlushAllRegs();  // Everything to home locations
    487     ThreadOffset getterOffset =
    488         is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
    489                           :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
    490                                       : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
    491     CallRuntimeHelperImm(getterOffset, field_idx, true);
    492     if (is_long_or_double) {
    493       RegLocation rl_result = GetReturnWide(rl_dest.fp);
    494       StoreValueWide(rl_dest, rl_result);
    495     } else {
    496       RegLocation rl_result = GetReturn(rl_dest.fp);
    497       StoreValue(rl_dest, rl_result);
    498     }
    499   }
    500 }
    501 
    502 void Mir2Lir::HandleSuspendLaunchPads() {
    503   int num_elems = suspend_launchpads_.Size();
    504   ThreadOffset helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspend);
    505   for (int i = 0; i < num_elems; i++) {
    506     ResetRegPool();
    507     ResetDefTracking();
    508     LIR* lab = suspend_launchpads_.Get(i);
    509     LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
    510     current_dalvik_offset_ = lab->operands[1];
    511     AppendLIR(lab);
    512     int r_tgt = CallHelperSetup(helper_offset);
    513     CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
    514     OpUnconditionalBranch(resume_lab);
    515   }
    516 }
    517 
    518 void Mir2Lir::HandleIntrinsicLaunchPads() {
    519   int num_elems = intrinsic_launchpads_.Size();
    520   for (int i = 0; i < num_elems; i++) {
    521     ResetRegPool();
    522     ResetDefTracking();
    523     LIR* lab = intrinsic_launchpads_.Get(i);
    524     CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
    525     current_dalvik_offset_ = info->offset;
    526     AppendLIR(lab);
    527     // NOTE: GenInvoke handles MarkSafepointPC
    528     GenInvoke(info);
    529     LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
    530     if (resume_lab != NULL) {
    531       OpUnconditionalBranch(resume_lab);
    532     }
    533   }
    534 }
    535 
    536 void Mir2Lir::HandleThrowLaunchPads() {
    537   int num_elems = throw_launchpads_.Size();
    538   for (int i = 0; i < num_elems; i++) {
    539     ResetRegPool();
    540     ResetDefTracking();
    541     LIR* lab = throw_launchpads_.Get(i);
    542     current_dalvik_offset_ = lab->operands[1];
    543     AppendLIR(lab);
    544     ThreadOffset func_offset(-1);
    545     int v1 = lab->operands[2];
    546     int v2 = lab->operands[3];
    547     bool target_x86 = (cu_->instruction_set == kX86);
    548     switch (lab->operands[0]) {
    549       case kThrowNullPointer:
    550         func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointer);
    551         break;
    552       case kThrowConstantArrayBounds:  // v1 is length reg (for Arm/Mips), v2 constant index
    553         // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
    554         if (target_x86) {
    555           OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
    556         } else {
    557           OpRegCopy(TargetReg(kArg1), v1);
    558         }
    559         // Make sure the following LoadConstant doesn't mess with kArg1.
    560         LockTemp(TargetReg(kArg1));
    561         LoadConstant(TargetReg(kArg0), v2);
    562         func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
    563         break;
    564       case kThrowArrayBounds:
    565         // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
    566         if (v2 != TargetReg(kArg0)) {
    567           OpRegCopy(TargetReg(kArg0), v1);
    568           if (target_x86) {
    569             // x86 leaves the array pointer in v2, so load the array length that the handler expects
    570             OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
    571           } else {
    572             OpRegCopy(TargetReg(kArg1), v2);
    573           }
    574         } else {
    575           if (v1 == TargetReg(kArg1)) {
    576             // Swap v1 and v2, using kArg2 as a temp
    577             OpRegCopy(TargetReg(kArg2), v1);
    578             if (target_x86) {
    579               // x86 leaves the array pointer in v2; load the array length that the handler expects
    580               OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
    581             } else {
    582               OpRegCopy(TargetReg(kArg1), v2);
    583             }
    584             OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
    585           } else {
    586             if (target_x86) {
    587               // x86 leaves the array pointer in v2; load the array length that the handler expects
    588               OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
    589             } else {
    590               OpRegCopy(TargetReg(kArg1), v2);
    591             }
    592             OpRegCopy(TargetReg(kArg0), v1);
    593           }
    594         }
    595         func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
    596         break;
    597       case kThrowDivZero:
    598         func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero);
    599         break;
    600       case kThrowNoSuchMethod:
    601         OpRegCopy(TargetReg(kArg0), v1);
    602         func_offset =
    603           QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod);
    604         break;
    605       case kThrowStackOverflow:
    606         func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
    607         // Restore stack alignment
    608         if (target_x86) {
    609           OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
    610         } else {
    611           OpRegImm(kOpAdd, TargetReg(kSp), (num_core_spills_ + num_fp_spills_) * 4);
    612         }
    613         break;
    614       default:
    615         LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
    616     }
    617     ClobberCalleeSave();
    618     int r_tgt = CallHelperSetup(func_offset);
    619     CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */);
    620   }
    621 }
    622 
    623 void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
    624                       RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
    625                       bool is_object) {
    626   int field_offset;
    627   bool is_volatile;
    628 
    629   bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
    630 
    631   if (fast_path && !SLOW_FIELD_PATH) {
    632     RegLocation rl_result;
    633     RegisterClass reg_class = oat_reg_class_by_size(size);
    634     DCHECK_GE(field_offset, 0);
    635     rl_obj = LoadValue(rl_obj, kCoreReg);
    636     if (is_long_or_double) {
    637       DCHECK(rl_dest.wide);
    638       GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
    639       if (cu_->instruction_set == kX86) {
    640         rl_result = EvalLoc(rl_dest, reg_class, true);
    641         GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
    642         LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
    643                          rl_result.high_reg, rl_obj.s_reg_low);
    644         if (is_volatile) {
    645           GenMemBarrier(kLoadLoad);
    646         }
    647       } else {
    648         int reg_ptr = AllocTemp();
    649         OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
    650         rl_result = EvalLoc(rl_dest, reg_class, true);
    651         LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
    652         if (is_volatile) {
    653           GenMemBarrier(kLoadLoad);
    654         }
    655         FreeTemp(reg_ptr);
    656       }
    657       StoreValueWide(rl_dest, rl_result);
    658     } else {
    659       rl_result = EvalLoc(rl_dest, reg_class, true);
    660       GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
    661       LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
    662                    kWord, rl_obj.s_reg_low);
    663       if (is_volatile) {
    664         GenMemBarrier(kLoadLoad);
    665       }
    666       StoreValue(rl_dest, rl_result);
    667     }
    668   } else {
    669     ThreadOffset getterOffset =
    670         is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
    671                           : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
    672                                        : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
    673     CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
    674     if (is_long_or_double) {
    675       RegLocation rl_result = GetReturnWide(rl_dest.fp);
    676       StoreValueWide(rl_dest, rl_result);
    677     } else {
    678       RegLocation rl_result = GetReturn(rl_dest.fp);
    679       StoreValue(rl_dest, rl_result);
    680     }
    681   }
    682 }
    683 
    684 void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
    685                       RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
    686                       bool is_object) {
    687   int field_offset;
    688   bool is_volatile;
    689 
    690   bool fast_path = FastInstance(field_idx, field_offset, is_volatile,
    691                  true);
    692   if (fast_path && !SLOW_FIELD_PATH) {
    693     RegisterClass reg_class = oat_reg_class_by_size(size);
    694     DCHECK_GE(field_offset, 0);
    695     rl_obj = LoadValue(rl_obj, kCoreReg);
    696     if (is_long_or_double) {
    697       int reg_ptr;
    698       rl_src = LoadValueWide(rl_src, kAnyReg);
    699       GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
    700       reg_ptr = AllocTemp();
    701       OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
    702       if (is_volatile) {
    703         GenMemBarrier(kStoreStore);
    704       }
    705       StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
    706       if (is_volatile) {
    707         GenMemBarrier(kLoadLoad);
    708       }
    709       FreeTemp(reg_ptr);
    710     } else {
    711       rl_src = LoadValue(rl_src, reg_class);
    712       GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
    713       if (is_volatile) {
    714         GenMemBarrier(kStoreStore);
    715       }
    716       StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
    717       if (is_volatile) {
    718         GenMemBarrier(kLoadLoad);
    719       }
    720       if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
    721         MarkGCCard(rl_src.low_reg, rl_obj.low_reg);
    722       }
    723     }
    724   } else {
    725     ThreadOffset setter_offset =
    726         is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
    727                           : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
    728                                        : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
    729     CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
    730   }
    731 }
    732 
    733 void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
    734   RegLocation rl_method = LoadCurrMethod();
    735   int res_reg = AllocTemp();
    736   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    737   if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
    738                                                    *cu_->dex_file,
    739                                                    type_idx)) {
    740     // Call out to helper which resolves type and verifies access.
    741     // Resolved type returned in kRet0.
    742     CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
    743                             type_idx, rl_method.low_reg, true);
    744     RegLocation rl_result = GetReturn(false);
    745     StoreValue(rl_dest, rl_result);
    746   } else {
    747     // We're don't need access checks, load type from dex cache
    748     int32_t dex_cache_offset =
    749         mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
    750     LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg);
    751     int32_t offset_of_type =
    752         mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
    753                           * type_idx);
    754     LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg);
    755     if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
    756         type_idx) || SLOW_TYPE_PATH) {
    757       // Slow path, at runtime test if type is null and if so initialize
    758       FlushAllRegs();
    759       LIR* branch1 = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL);
    760       // Resolved, store and hop over following code
    761       StoreValue(rl_dest, rl_result);
    762       /*
    763        * Because we have stores of the target value on two paths,
    764        * clobber temp tracking for the destination using the ssa name
    765        */
    766       ClobberSReg(rl_dest.s_reg_low);
    767       LIR* branch2 = OpUnconditionalBranch(0);
    768       // TUNING: move slow path to end & remove unconditional branch
    769       LIR* target1 = NewLIR0(kPseudoTargetLabel);
    770       // Call out to helper, which will return resolved type in kArg0
    771       CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx,
    772                               rl_method.low_reg, true);
    773       RegLocation rl_result = GetReturn(false);
    774       StoreValue(rl_dest, rl_result);
    775       /*
    776        * Because we have stores of the target value on two paths,
    777        * clobber temp tracking for the destination using the ssa name
    778        */
    779       ClobberSReg(rl_dest.s_reg_low);
    780       // Rejoin code paths
    781       LIR* target2 = NewLIR0(kPseudoTargetLabel);
    782       branch1->target = target1;
    783       branch2->target = target2;
    784     } else {
    785       // Fast path, we're done - just store result
    786       StoreValue(rl_dest, rl_result);
    787     }
    788   }
    789 }
    790 
    791 void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
    792   /* NOTE: Most strings should be available at compile time */
    793   int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
    794                  (sizeof(mirror::String*) * string_idx);
    795   if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
    796       *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
    797     // slow path, resolve string if not in dex cache
    798     FlushAllRegs();
    799     LockCallTemps();  // Using explicit registers
    800     LoadCurrMethodDirect(TargetReg(kArg2));
    801     LoadWordDisp(TargetReg(kArg2),
    802                  mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
    803     // Might call out to helper, which will return resolved string in kRet0
    804     int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString));
    805     LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
    806     LoadConstant(TargetReg(kArg1), string_idx);
    807     if (cu_->instruction_set == kThumb2) {
    808       OpRegImm(kOpCmp, TargetReg(kRet0), 0);  // Is resolved?
    809       GenBarrier();
    810       // For testing, always force through helper
    811       if (!EXERCISE_SLOWEST_STRING_PATH) {
    812         OpIT(kCondEq, "T");
    813       }
    814       OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));   // .eq
    815       LIR* call_inst = OpReg(kOpBlx, r_tgt);    // .eq, helper(Method*, string_idx)
    816       MarkSafepointPC(call_inst);
    817       FreeTemp(r_tgt);
    818     } else if (cu_->instruction_set == kMips) {
    819       LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
    820       OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));   // .eq
    821       LIR* call_inst = OpReg(kOpBlx, r_tgt);
    822       MarkSafepointPC(call_inst);
    823       FreeTemp(r_tgt);
    824       LIR* target = NewLIR0(kPseudoTargetLabel);
    825       branch->target = target;
    826     } else {
    827       DCHECK_EQ(cu_->instruction_set, kX86);
    828       CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), TargetReg(kArg2),
    829                               TargetReg(kArg1), true);
    830     }
    831     GenBarrier();
    832     StoreValue(rl_dest, GetReturn(false));
    833   } else {
    834     RegLocation rl_method = LoadCurrMethod();
    835     int res_reg = AllocTemp();
    836     RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    837     LoadWordDisp(rl_method.low_reg,
    838                  mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
    839     LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg);
    840     StoreValue(rl_dest, rl_result);
    841   }
    842 }
    843 
    844 /*
    845  * Let helper function take care of everything.  Will
    846  * call Class::NewInstanceFromCode(type_idx, method);
    847  */
    848 void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
    849   FlushAllRegs();  /* Everything to home location */
    850   // alloc will always check for resolution, do we also need to verify
    851   // access because the verifier was unable to?
    852   ThreadOffset func_offset(-1);
    853   if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
    854       cu_->method_idx, *cu_->dex_file, type_idx)) {
    855     func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject);
    856   } else {
    857     func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck);
    858   }
    859   CallRuntimeHelperImmMethod(func_offset, type_idx, true);
    860   RegLocation rl_result = GetReturn(false);
    861   StoreValue(rl_dest, rl_result);
    862 }
    863 
    864 void Mir2Lir::GenThrow(RegLocation rl_src) {
    865   FlushAllRegs();
    866   CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
    867 }
    868 
    869 // For final classes there are no sub-classes to check and so we can answer the instance-of
    870 // question with simple comparisons.
    871 void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
    872                                  RegLocation rl_src) {
    873   RegLocation object = LoadValue(rl_src, kCoreReg);
    874   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    875   int result_reg = rl_result.low_reg;
    876   if (result_reg == object.low_reg) {
    877     result_reg = AllocTypedTemp(false, kCoreReg);
    878   }
    879   LoadConstant(result_reg, 0);     // assume false
    880   LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
    881 
    882   int check_class = AllocTypedTemp(false, kCoreReg);
    883   int object_class = AllocTypedTemp(false, kCoreReg);
    884 
    885   LoadCurrMethodDirect(check_class);
    886   if (use_declaring_class) {
    887     LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
    888                  check_class);
    889     LoadWordDisp(object.low_reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
    890   } else {
    891     LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
    892                  check_class);
    893     LoadWordDisp(object.low_reg,  mirror::Object::ClassOffset().Int32Value(), object_class);
    894     int32_t offset_of_type =
    895       mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
    896       (sizeof(mirror::Class*) * type_idx);
    897     LoadWordDisp(check_class, offset_of_type, check_class);
    898   }
    899 
    900   LIR* ne_branchover = NULL;
    901   if (cu_->instruction_set == kThumb2) {
    902     OpRegReg(kOpCmp, check_class, object_class);  // Same?
    903     OpIT(kCondEq, "");   // if-convert the test
    904     LoadConstant(result_reg, 1);     // .eq case - load true
    905   } else {
    906     ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
    907     LoadConstant(result_reg, 1);     // eq case - load true
    908   }
    909   LIR* target = NewLIR0(kPseudoTargetLabel);
    910   null_branchover->target = target;
    911   if (ne_branchover != NULL) {
    912     ne_branchover->target = target;
    913   }
    914   FreeTemp(object_class);
    915   FreeTemp(check_class);
    916   if (IsTemp(result_reg)) {
    917     OpRegCopy(rl_result.low_reg, result_reg);
    918     FreeTemp(result_reg);
    919   }
    920   StoreValue(rl_dest, rl_result);
    921 }
    922 
    923 void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
    924                                          bool type_known_abstract, bool use_declaring_class,
    925                                          bool can_assume_type_is_in_dex_cache,
    926                                          uint32_t type_idx, RegLocation rl_dest,
    927                                          RegLocation rl_src) {
    928   FlushAllRegs();
    929   // May generate a call - use explicit registers
    930   LockCallTemps();
    931   LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
    932   int class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
    933   if (needs_access_check) {
    934     // Check we have access to type_idx and if not throw IllegalAccessError,
    935     // returns Class* in kArg0
    936     CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
    937                          type_idx, true);
    938     OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
    939     LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
    940   } else if (use_declaring_class) {
    941     LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
    942     LoadWordDisp(TargetReg(kArg1),
    943                  mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg);
    944   } else {
    945     // Load dex cache entry into class_reg (kArg2)
    946     LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
    947     LoadWordDisp(TargetReg(kArg1),
    948                  mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
    949     int32_t offset_of_type =
    950         mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
    951         * type_idx);
    952     LoadWordDisp(class_reg, offset_of_type, class_reg);
    953     if (!can_assume_type_is_in_dex_cache) {
    954       // Need to test presence of type in dex cache at runtime
    955       LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
    956       // Not resolved
    957       // Call out to helper, which will return resolved type in kRet0
    958       CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true);
    959       OpRegCopy(TargetReg(kArg2), TargetReg(kRet0));  // Align usage with fast path
    960       LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
    961       // Rejoin code paths
    962       LIR* hop_target = NewLIR0(kPseudoTargetLabel);
    963       hop_branch->target = hop_target;
    964     }
    965   }
    966   /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
    967   RegLocation rl_result = GetReturn(false);
    968   if (cu_->instruction_set == kMips) {
    969     // On MIPS rArg0 != rl_result, place false in result if branch is taken.
    970     LoadConstant(rl_result.low_reg, 0);
    971   }
    972   LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
    973 
    974   /* load object->klass_ */
    975   DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
    976   LoadWordDisp(TargetReg(kArg0),  mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
    977   /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
    978   LIR* branchover = NULL;
    979   if (type_known_final) {
    980     // rl_result == ref == null == 0.
    981     if (cu_->instruction_set == kThumb2) {
    982       OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
    983       OpIT(kCondEq, "E");   // if-convert the test
    984       LoadConstant(rl_result.low_reg, 1);     // .eq case - load true
    985       LoadConstant(rl_result.low_reg, 0);     // .ne case - load false
    986     } else {
    987       LoadConstant(rl_result.low_reg, 0);     // ne case - load false
    988       branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
    989       LoadConstant(rl_result.low_reg, 1);     // eq case - load true
    990     }
    991   } else {
    992     if (cu_->instruction_set == kThumb2) {
    993       int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
    994       if (!type_known_abstract) {
    995       /* Uses conditional nullification */
    996         OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
    997         OpIT(kCondEq, "EE");   // if-convert the test
    998         LoadConstant(TargetReg(kArg0), 1);     // .eq case - load true
    999       }
   1000       OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
   1001       OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
   1002       FreeTemp(r_tgt);
   1003     } else {
   1004       if (!type_known_abstract) {
   1005         /* Uses branchovers */
   1006         LoadConstant(rl_result.low_reg, 1);     // assume true
   1007         branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
   1008       }
   1009       if (cu_->instruction_set != kX86) {
   1010         int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
   1011         OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
   1012         OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
   1013         FreeTemp(r_tgt);
   1014       } else {
   1015         OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
   1016         OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
   1017       }
   1018     }
   1019   }
   1020   // TODO: only clobber when type isn't final?
   1021   ClobberCalleeSave();
   1022   /* branch targets here */
   1023   LIR* target = NewLIR0(kPseudoTargetLabel);
   1024   StoreValue(rl_dest, rl_result);
   1025   branch1->target = target;
   1026   if (branchover != NULL) {
   1027     branchover->target = target;
   1028   }
   1029 }
   1030 
   1031 void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
   1032   bool type_known_final, type_known_abstract, use_declaring_class;
   1033   bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
   1034                                                                               *cu_->dex_file,
   1035                                                                               type_idx,
   1036                                                                               &type_known_final,
   1037                                                                               &type_known_abstract,
   1038                                                                               &use_declaring_class);
   1039   bool can_assume_type_is_in_dex_cache = !needs_access_check &&
   1040       cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
   1041 
   1042   if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
   1043     GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
   1044   } else {
   1045     GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
   1046                                use_declaring_class, can_assume_type_is_in_dex_cache,
   1047                                type_idx, rl_dest, rl_src);
   1048   }
   1049 }
   1050 
   1051 void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) {
   1052   bool type_known_final, type_known_abstract, use_declaring_class;
   1053   bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
   1054                                                                               *cu_->dex_file,
   1055                                                                               type_idx,
   1056                                                                               &type_known_final,
   1057                                                                               &type_known_abstract,
   1058                                                                               &use_declaring_class);
   1059   // Note: currently type_known_final is unused, as optimizing will only improve the performance
   1060   // of the exception throw path.
   1061   DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
   1062   const MethodReference mr(cu->GetDexFile(), cu->GetDexMethodIndex());
   1063   if (!needs_access_check && cu_->compiler_driver->IsSafeCast(mr, insn_idx)) {
   1064     // Verifier type analysis proved this check cast would never cause an exception.
   1065     return;
   1066   }
   1067   FlushAllRegs();
   1068   // May generate a call - use explicit registers
   1069   LockCallTemps();
   1070   LoadCurrMethodDirect(TargetReg(kArg1));  // kArg1 <= current Method*
   1071   int class_reg = TargetReg(kArg2);  // kArg2 will hold the Class*
   1072   if (needs_access_check) {
   1073     // Check we have access to type_idx and if not throw IllegalAccessError,
   1074     // returns Class* in kRet0
   1075     // InitializeTypeAndVerifyAccess(idx, method)
   1076     CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
   1077                             type_idx, TargetReg(kArg1), true);
   1078     OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
   1079   } else if (use_declaring_class) {
   1080     LoadWordDisp(TargetReg(kArg1),
   1081                  mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg);
   1082   } else {
   1083     // Load dex cache entry into class_reg (kArg2)
   1084     LoadWordDisp(TargetReg(kArg1),
   1085                  mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
   1086     int32_t offset_of_type =
   1087         mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
   1088         (sizeof(mirror::Class*) * type_idx);
   1089     LoadWordDisp(class_reg, offset_of_type, class_reg);
   1090     if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
   1091       // Need to test presence of type in dex cache at runtime
   1092       LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
   1093       // Not resolved
   1094       // Call out to helper, which will return resolved type in kArg0
   1095       // InitializeTypeFromCode(idx, method)
   1096       CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx,
   1097                               TargetReg(kArg1), true);
   1098       OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
   1099       // Rejoin code paths
   1100       LIR* hop_target = NewLIR0(kPseudoTargetLabel);
   1101       hop_branch->target = hop_target;
   1102     }
   1103   }
   1104   // At this point, class_reg (kArg2) has class
   1105   LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
   1106   /* Null is OK - continue */
   1107   LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
   1108   /* load object->klass_ */
   1109   DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
   1110   LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
   1111   /* kArg1 now contains object->klass_ */
   1112   LIR* branch2 = NULL;
   1113   if (!type_known_abstract) {
   1114     branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
   1115   }
   1116   CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCast), TargetReg(kArg1),
   1117                           TargetReg(kArg2), true);
   1118   /* branch target here */
   1119   LIR* target = NewLIR0(kPseudoTargetLabel);
   1120   branch1->target = target;
   1121   if (branch2 != NULL) {
   1122     branch2->target = target;
   1123   }
   1124 }
   1125 
   1126 void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
   1127                            RegLocation rl_src1, RegLocation rl_src2) {
   1128   RegLocation rl_result;
   1129   if (cu_->instruction_set == kThumb2) {
   1130     /*
   1131      * NOTE:  This is the one place in the code in which we might have
   1132      * as many as six live temporary registers.  There are 5 in the normal
   1133      * set for Arm.  Until we have spill capabilities, temporarily add
   1134      * lr to the temp set.  It is safe to do this locally, but note that
   1135      * lr is used explicitly elsewhere in the code generator and cannot
   1136      * normally be used as a general temp register.
   1137      */
   1138     MarkTemp(TargetReg(kLr));   // Add lr to the temp pool
   1139     FreeTemp(TargetReg(kLr));   // and make it available
   1140   }
   1141   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   1142   rl_src2 = LoadValueWide(rl_src2, kCoreReg);
   1143   rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1144   // The longs may overlap - use intermediate temp if so
   1145   if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) {
   1146     int t_reg = AllocTemp();
   1147     OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
   1148     OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
   1149     OpRegCopy(rl_result.low_reg, t_reg);
   1150     FreeTemp(t_reg);
   1151   } else {
   1152     OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
   1153     OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg,
   1154                 rl_src2.high_reg);
   1155   }
   1156   /*
   1157    * NOTE: If rl_dest refers to a frame variable in a large frame, the
   1158    * following StoreValueWide might need to allocate a temp register.
   1159    * To further work around the lack of a spill capability, explicitly
   1160    * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
   1161    * Remove when spill is functional.
   1162    */
   1163   FreeRegLocTemps(rl_result, rl_src1);
   1164   FreeRegLocTemps(rl_result, rl_src2);
   1165   StoreValueWide(rl_dest, rl_result);
   1166   if (cu_->instruction_set == kThumb2) {
   1167     Clobber(TargetReg(kLr));
   1168     UnmarkTemp(TargetReg(kLr));  // Remove lr from the temp pool
   1169   }
   1170 }
   1171 
   1172 
   1173 void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
   1174                              RegLocation rl_src1, RegLocation rl_shift) {
   1175   ThreadOffset func_offset(-1);
   1176 
   1177   switch (opcode) {
   1178     case Instruction::SHL_LONG:
   1179     case Instruction::SHL_LONG_2ADDR:
   1180       func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong);
   1181       break;
   1182     case Instruction::SHR_LONG:
   1183     case Instruction::SHR_LONG_2ADDR:
   1184       func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong);
   1185       break;
   1186     case Instruction::USHR_LONG:
   1187     case Instruction::USHR_LONG_2ADDR:
   1188       func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong);
   1189       break;
   1190     default:
   1191       LOG(FATAL) << "Unexpected case";
   1192   }
   1193   FlushAllRegs();   /* Send everything to home location */
   1194   CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
   1195   RegLocation rl_result = GetReturnWide(false);
   1196   StoreValueWide(rl_dest, rl_result);
   1197 }
   1198 
   1199 
   1200 void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
   1201                             RegLocation rl_src1, RegLocation rl_src2) {
   1202   OpKind op = kOpBkpt;
   1203   bool is_div_rem = false;
   1204   bool check_zero = false;
   1205   bool unary = false;
   1206   RegLocation rl_result;
   1207   bool shift_op = false;
   1208   switch (opcode) {
   1209     case Instruction::NEG_INT:
   1210       op = kOpNeg;
   1211       unary = true;
   1212       break;
   1213     case Instruction::NOT_INT:
   1214       op = kOpMvn;
   1215       unary = true;
   1216       break;
   1217     case Instruction::ADD_INT:
   1218     case Instruction::ADD_INT_2ADDR:
   1219       op = kOpAdd;
   1220       break;
   1221     case Instruction::SUB_INT:
   1222     case Instruction::SUB_INT_2ADDR:
   1223       op = kOpSub;
   1224       break;
   1225     case Instruction::MUL_INT:
   1226     case Instruction::MUL_INT_2ADDR:
   1227       op = kOpMul;
   1228       break;
   1229     case Instruction::DIV_INT:
   1230     case Instruction::DIV_INT_2ADDR:
   1231       check_zero = true;
   1232       op = kOpDiv;
   1233       is_div_rem = true;
   1234       break;
   1235     /* NOTE: returns in kArg1 */
   1236     case Instruction::REM_INT:
   1237     case Instruction::REM_INT_2ADDR:
   1238       check_zero = true;
   1239       op = kOpRem;
   1240       is_div_rem = true;
   1241       break;
   1242     case Instruction::AND_INT:
   1243     case Instruction::AND_INT_2ADDR:
   1244       op = kOpAnd;
   1245       break;
   1246     case Instruction::OR_INT:
   1247     case Instruction::OR_INT_2ADDR:
   1248       op = kOpOr;
   1249       break;
   1250     case Instruction::XOR_INT:
   1251     case Instruction::XOR_INT_2ADDR:
   1252       op = kOpXor;
   1253       break;
   1254     case Instruction::SHL_INT:
   1255     case Instruction::SHL_INT_2ADDR:
   1256       shift_op = true;
   1257       op = kOpLsl;
   1258       break;
   1259     case Instruction::SHR_INT:
   1260     case Instruction::SHR_INT_2ADDR:
   1261       shift_op = true;
   1262       op = kOpAsr;
   1263       break;
   1264     case Instruction::USHR_INT:
   1265     case Instruction::USHR_INT_2ADDR:
   1266       shift_op = true;
   1267       op = kOpLsr;
   1268       break;
   1269     default:
   1270       LOG(FATAL) << "Invalid word arith op: " << opcode;
   1271   }
   1272   if (!is_div_rem) {
   1273     if (unary) {
   1274       rl_src1 = LoadValue(rl_src1, kCoreReg);
   1275       rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1276       OpRegReg(op, rl_result.low_reg, rl_src1.low_reg);
   1277     } else {
   1278       if (shift_op) {
   1279         int t_reg = INVALID_REG;
   1280         if (cu_->instruction_set == kX86) {
   1281           // X86 doesn't require masking and must use ECX
   1282           t_reg = TargetReg(kCount);  // rCX
   1283           LoadValueDirectFixed(rl_src2, t_reg);
   1284         } else {
   1285           rl_src2 = LoadValue(rl_src2, kCoreReg);
   1286           t_reg = AllocTemp();
   1287           OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31);
   1288         }
   1289         rl_src1 = LoadValue(rl_src1, kCoreReg);
   1290         rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1291         OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg);
   1292         FreeTemp(t_reg);
   1293       } else {
   1294         rl_src1 = LoadValue(rl_src1, kCoreReg);
   1295         rl_src2 = LoadValue(rl_src2, kCoreReg);
   1296         rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1297         OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
   1298       }
   1299     }
   1300     StoreValue(rl_dest, rl_result);
   1301   } else {
   1302     if (cu_->instruction_set == kMips) {
   1303       rl_src1 = LoadValue(rl_src1, kCoreReg);
   1304       rl_src2 = LoadValue(rl_src2, kCoreReg);
   1305       if (check_zero) {
   1306           GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
   1307       }
   1308       rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
   1309     } else {
   1310       ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
   1311       FlushAllRegs();   /* Send everything to home location */
   1312       LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
   1313       int r_tgt = CallHelperSetup(func_offset);
   1314       LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
   1315       if (check_zero) {
   1316         GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
   1317       }
   1318       // NOTE: callout here is not a safepoint
   1319       CallHelper(r_tgt, func_offset, false /* not a safepoint */);
   1320       if (op == kOpDiv)
   1321         rl_result = GetReturn(false);
   1322       else
   1323         rl_result = GetReturnAlt();
   1324     }
   1325     StoreValue(rl_dest, rl_result);
   1326   }
   1327 }
   1328 
   1329 /*
   1330  * The following are the first-level codegen routines that analyze the format
   1331  * of each bytecode then either dispatch special purpose codegen routines
   1332  * or produce corresponding Thumb instructions directly.
   1333  */
   1334 
   1335 static bool IsPowerOfTwo(int x) {
   1336   return (x & (x - 1)) == 0;
   1337 }
   1338 
   1339 // Returns true if no more than two bits are set in 'x'.
   1340 static bool IsPopCountLE2(unsigned int x) {
   1341   x &= x - 1;
   1342   return (x & (x - 1)) == 0;
   1343 }
   1344 
   1345 // Returns the index of the lowest set bit in 'x'.
   1346 static int LowestSetBit(unsigned int x) {
   1347   int bit_posn = 0;
   1348   while ((x & 0xf) == 0) {
   1349     bit_posn += 4;
   1350     x >>= 4;
   1351   }
   1352   while ((x & 1) == 0) {
   1353     bit_posn++;
   1354     x >>= 1;
   1355   }
   1356   return bit_posn;
   1357 }
   1358 
   1359 // Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
   1360 // and store the result in 'rl_dest'.
   1361 bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
   1362                                RegLocation rl_src, RegLocation rl_dest, int lit) {
   1363   if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
   1364     return false;
   1365   }
   1366   // No divide instruction for Arm, so check for more special cases
   1367   if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
   1368     return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
   1369   }
   1370   int k = LowestSetBit(lit);
   1371   if (k >= 30) {
   1372     // Avoid special cases.
   1373     return false;
   1374   }
   1375   rl_src = LoadValue(rl_src, kCoreReg);
   1376   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1377   if (is_div) {
   1378     int t_reg = AllocTemp();
   1379     if (lit == 2) {
   1380       // Division by 2 is by far the most common division by constant.
   1381       OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k);
   1382       OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
   1383       OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
   1384     } else {
   1385       OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31);
   1386       OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
   1387       OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
   1388       OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
   1389     }
   1390   } else {
   1391     int t_reg1 = AllocTemp();
   1392     int t_reg2 = AllocTemp();
   1393     if (lit == 2) {
   1394       OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
   1395       OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
   1396       OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
   1397       OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
   1398     } else {
   1399       OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31);
   1400       OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
   1401       OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
   1402       OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
   1403       OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
   1404     }
   1405   }
   1406   StoreValue(rl_dest, rl_result);
   1407   return true;
   1408 }
   1409 
   1410 // Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
   1411 // and store the result in 'rl_dest'.
   1412 bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
   1413   // Can we simplify this multiplication?
   1414   bool power_of_two = false;
   1415   bool pop_count_le2 = false;
   1416   bool power_of_two_minus_one = false;
   1417   if (lit < 2) {
   1418     // Avoid special cases.
   1419     return false;
   1420   } else if (IsPowerOfTwo(lit)) {
   1421     power_of_two = true;
   1422   } else if (IsPopCountLE2(lit)) {
   1423     pop_count_le2 = true;
   1424   } else if (IsPowerOfTwo(lit + 1)) {
   1425     power_of_two_minus_one = true;
   1426   } else {
   1427     return false;
   1428   }
   1429   rl_src = LoadValue(rl_src, kCoreReg);
   1430   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1431   if (power_of_two) {
   1432     // Shift.
   1433     OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
   1434   } else if (pop_count_le2) {
   1435     // Shift and add and shift.
   1436     int first_bit = LowestSetBit(lit);
   1437     int second_bit = LowestSetBit(lit ^ (1 << first_bit));
   1438     GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
   1439   } else {
   1440     // Reverse subtract: (src << (shift + 1)) - src.
   1441     DCHECK(power_of_two_minus_one);
   1442     // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
   1443     int t_reg = AllocTemp();
   1444     OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
   1445     OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
   1446   }
   1447   StoreValue(rl_dest, rl_result);
   1448   return true;
   1449 }
   1450 
   1451 void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
   1452                                int lit) {
   1453   RegLocation rl_result;
   1454   OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
   1455   int shift_op = false;
   1456   bool is_div = false;
   1457 
   1458   switch (opcode) {
   1459     case Instruction::RSUB_INT_LIT8:
   1460     case Instruction::RSUB_INT: {
   1461       rl_src = LoadValue(rl_src, kCoreReg);
   1462       rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1463       if (cu_->instruction_set == kThumb2) {
   1464         OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
   1465       } else {
   1466         OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
   1467         OpRegImm(kOpAdd, rl_result.low_reg, lit);
   1468       }
   1469       StoreValue(rl_dest, rl_result);
   1470       return;
   1471     }
   1472 
   1473     case Instruction::SUB_INT:
   1474     case Instruction::SUB_INT_2ADDR:
   1475       lit = -lit;
   1476       // Intended fallthrough
   1477     case Instruction::ADD_INT:
   1478     case Instruction::ADD_INT_2ADDR:
   1479     case Instruction::ADD_INT_LIT8:
   1480     case Instruction::ADD_INT_LIT16:
   1481       op = kOpAdd;
   1482       break;
   1483     case Instruction::MUL_INT:
   1484     case Instruction::MUL_INT_2ADDR:
   1485     case Instruction::MUL_INT_LIT8:
   1486     case Instruction::MUL_INT_LIT16: {
   1487       if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
   1488         return;
   1489       }
   1490       op = kOpMul;
   1491       break;
   1492     }
   1493     case Instruction::AND_INT:
   1494     case Instruction::AND_INT_2ADDR:
   1495     case Instruction::AND_INT_LIT8:
   1496     case Instruction::AND_INT_LIT16:
   1497       op = kOpAnd;
   1498       break;
   1499     case Instruction::OR_INT:
   1500     case Instruction::OR_INT_2ADDR:
   1501     case Instruction::OR_INT_LIT8:
   1502     case Instruction::OR_INT_LIT16:
   1503       op = kOpOr;
   1504       break;
   1505     case Instruction::XOR_INT:
   1506     case Instruction::XOR_INT_2ADDR:
   1507     case Instruction::XOR_INT_LIT8:
   1508     case Instruction::XOR_INT_LIT16:
   1509       op = kOpXor;
   1510       break;
   1511     case Instruction::SHL_INT_LIT8:
   1512     case Instruction::SHL_INT:
   1513     case Instruction::SHL_INT_2ADDR:
   1514       lit &= 31;
   1515       shift_op = true;
   1516       op = kOpLsl;
   1517       break;
   1518     case Instruction::SHR_INT_LIT8:
   1519     case Instruction::SHR_INT:
   1520     case Instruction::SHR_INT_2ADDR:
   1521       lit &= 31;
   1522       shift_op = true;
   1523       op = kOpAsr;
   1524       break;
   1525     case Instruction::USHR_INT_LIT8:
   1526     case Instruction::USHR_INT:
   1527     case Instruction::USHR_INT_2ADDR:
   1528       lit &= 31;
   1529       shift_op = true;
   1530       op = kOpLsr;
   1531       break;
   1532 
   1533     case Instruction::DIV_INT:
   1534     case Instruction::DIV_INT_2ADDR:
   1535     case Instruction::DIV_INT_LIT8:
   1536     case Instruction::DIV_INT_LIT16:
   1537     case Instruction::REM_INT:
   1538     case Instruction::REM_INT_2ADDR:
   1539     case Instruction::REM_INT_LIT8:
   1540     case Instruction::REM_INT_LIT16: {
   1541       if (lit == 0) {
   1542         GenImmedCheck(kCondAl, 0, 0, kThrowDivZero);
   1543         return;
   1544       }
   1545       if ((opcode == Instruction::DIV_INT) ||
   1546           (opcode == Instruction::DIV_INT_2ADDR) ||
   1547           (opcode == Instruction::DIV_INT_LIT8) ||
   1548           (opcode == Instruction::DIV_INT_LIT16)) {
   1549         is_div = true;
   1550       } else {
   1551         is_div = false;
   1552       }
   1553       if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) {
   1554         return;
   1555       }
   1556       if (cu_->instruction_set == kMips) {
   1557         rl_src = LoadValue(rl_src, kCoreReg);
   1558         rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
   1559       } else {
   1560         FlushAllRegs();   /* Everything to home location */
   1561         LoadValueDirectFixed(rl_src, TargetReg(kArg0));
   1562         Clobber(TargetReg(kArg0));
   1563         ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
   1564         CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
   1565         if (is_div)
   1566           rl_result = GetReturn(false);
   1567         else
   1568           rl_result = GetReturnAlt();
   1569       }
   1570       StoreValue(rl_dest, rl_result);
   1571       return;
   1572     }
   1573     default:
   1574       LOG(FATAL) << "Unexpected opcode " << opcode;
   1575   }
   1576   rl_src = LoadValue(rl_src, kCoreReg);
   1577   rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1578   // Avoid shifts by literal 0 - no support in Thumb.  Change to copy
   1579   if (shift_op && (lit == 0)) {
   1580     OpRegCopy(rl_result.low_reg, rl_src.low_reg);
   1581   } else {
   1582     OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit);
   1583   }
   1584   StoreValue(rl_dest, rl_result);
   1585 }
   1586 
   1587 void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
   1588                              RegLocation rl_src1, RegLocation rl_src2) {
   1589   RegLocation rl_result;
   1590   OpKind first_op = kOpBkpt;
   1591   OpKind second_op = kOpBkpt;
   1592   bool call_out = false;
   1593   bool check_zero = false;
   1594   ThreadOffset func_offset(-1);
   1595   int ret_reg = TargetReg(kRet0);
   1596 
   1597   switch (opcode) {
   1598     case Instruction::NOT_LONG:
   1599       rl_src2 = LoadValueWide(rl_src2, kCoreReg);
   1600       rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1601       // Check for destructive overlap
   1602       if (rl_result.low_reg == rl_src2.high_reg) {
   1603         int t_reg = AllocTemp();
   1604         OpRegCopy(t_reg, rl_src2.high_reg);
   1605         OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
   1606         OpRegReg(kOpMvn, rl_result.high_reg, t_reg);
   1607         FreeTemp(t_reg);
   1608       } else {
   1609         OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
   1610         OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg);
   1611       }
   1612       StoreValueWide(rl_dest, rl_result);
   1613       return;
   1614     case Instruction::ADD_LONG:
   1615     case Instruction::ADD_LONG_2ADDR:
   1616       if (cu_->instruction_set != kThumb2) {
   1617         GenAddLong(rl_dest, rl_src1, rl_src2);
   1618         return;
   1619       }
   1620       first_op = kOpAdd;
   1621       second_op = kOpAdc;
   1622       break;
   1623     case Instruction::SUB_LONG:
   1624     case Instruction::SUB_LONG_2ADDR:
   1625       if (cu_->instruction_set != kThumb2) {
   1626         GenSubLong(rl_dest, rl_src1, rl_src2);
   1627         return;
   1628       }
   1629       first_op = kOpSub;
   1630       second_op = kOpSbc;
   1631       break;
   1632     case Instruction::MUL_LONG:
   1633     case Instruction::MUL_LONG_2ADDR:
   1634       if (cu_->instruction_set == kThumb2) {
   1635         GenMulLong(rl_dest, rl_src1, rl_src2);
   1636         return;
   1637       } else {
   1638         call_out = true;
   1639         ret_reg = TargetReg(kRet0);
   1640         func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
   1641       }
   1642       break;
   1643     case Instruction::DIV_LONG:
   1644     case Instruction::DIV_LONG_2ADDR:
   1645       call_out = true;
   1646       check_zero = true;
   1647       ret_reg = TargetReg(kRet0);
   1648       func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv);
   1649       break;
   1650     case Instruction::REM_LONG:
   1651     case Instruction::REM_LONG_2ADDR:
   1652       call_out = true;
   1653       check_zero = true;
   1654       func_offset = QUICK_ENTRYPOINT_OFFSET(pLdivmod);
   1655       /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
   1656       ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
   1657       break;
   1658     case Instruction::AND_LONG_2ADDR:
   1659     case Instruction::AND_LONG:
   1660       if (cu_->instruction_set == kX86) {
   1661         return GenAndLong(rl_dest, rl_src1, rl_src2);
   1662       }
   1663       first_op = kOpAnd;
   1664       second_op = kOpAnd;
   1665       break;
   1666     case Instruction::OR_LONG:
   1667     case Instruction::OR_LONG_2ADDR:
   1668       if (cu_->instruction_set == kX86) {
   1669         GenOrLong(rl_dest, rl_src1, rl_src2);
   1670         return;
   1671       }
   1672       first_op = kOpOr;
   1673       second_op = kOpOr;
   1674       break;
   1675     case Instruction::XOR_LONG:
   1676     case Instruction::XOR_LONG_2ADDR:
   1677       if (cu_->instruction_set == kX86) {
   1678         GenXorLong(rl_dest, rl_src1, rl_src2);
   1679         return;
   1680       }
   1681       first_op = kOpXor;
   1682       second_op = kOpXor;
   1683       break;
   1684     case Instruction::NEG_LONG: {
   1685       GenNegLong(rl_dest, rl_src2);
   1686       return;
   1687     }
   1688     default:
   1689       LOG(FATAL) << "Invalid long arith op";
   1690   }
   1691   if (!call_out) {
   1692     GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
   1693   } else {
   1694     FlushAllRegs();   /* Send everything to home location */
   1695     if (check_zero) {
   1696       LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3));
   1697       int r_tgt = CallHelperSetup(func_offset);
   1698       GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3));
   1699       LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1));
   1700       // NOTE: callout here is not a safepoint
   1701       CallHelper(r_tgt, func_offset, false /* not safepoint */);
   1702     } else {
   1703       CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
   1704     }
   1705     // Adjust return regs in to handle case of rem returning kArg2/kArg3
   1706     if (ret_reg == TargetReg(kRet0))
   1707       rl_result = GetReturnWide(false);
   1708     else
   1709       rl_result = GetReturnWideAlt();
   1710     StoreValueWide(rl_dest, rl_result);
   1711   }
   1712 }
   1713 
   1714 void Mir2Lir::GenConversionCall(ThreadOffset func_offset,
   1715                                 RegLocation rl_dest, RegLocation rl_src) {
   1716   /*
   1717    * Don't optimize the register usage since it calls out to support
   1718    * functions
   1719    */
   1720   FlushAllRegs();   /* Send everything to home location */
   1721   if (rl_src.wide) {
   1722     LoadValueDirectWideFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
   1723                              rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
   1724   } else {
   1725     LoadValueDirectFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
   1726   }
   1727   CallRuntimeHelperRegLocation(func_offset, rl_src, false);
   1728   if (rl_dest.wide) {
   1729     RegLocation rl_result;
   1730     rl_result = GetReturnWide(rl_dest.fp);
   1731     StoreValueWide(rl_dest, rl_result);
   1732   } else {
   1733     RegLocation rl_result;
   1734     rl_result = GetReturn(rl_dest.fp);
   1735     StoreValue(rl_dest, rl_result);
   1736   }
   1737 }
   1738 
   1739 /* Check if we need to check for pending suspend request */
   1740 void Mir2Lir::GenSuspendTest(int opt_flags) {
   1741   if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
   1742     return;
   1743   }
   1744   FlushAllRegs();
   1745   LIR* branch = OpTestSuspend(NULL);
   1746   LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
   1747   LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
   1748                        reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
   1749   branch->target = target;
   1750   suspend_launchpads_.Insert(target);
   1751 }
   1752 
   1753 /* Check if we need to check for pending suspend request */
   1754 void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
   1755   if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
   1756     OpUnconditionalBranch(target);
   1757     return;
   1758   }
   1759   OpTestSuspend(target);
   1760   LIR* launch_pad =
   1761       RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
   1762              reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
   1763   FlushAllRegs();
   1764   OpUnconditionalBranch(launch_pad);
   1765   suspend_launchpads_.Insert(launch_pad);
   1766 }
   1767 
   1768 }  // namespace art
   1769