Home | History | Annotate | Download | only in quick
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "dex/compiler_ir.h"
     18 #include "dex_file-inl.h"
     19 #include "entrypoints/quick/quick_entrypoints.h"
     20 #include "invoke_type.h"
     21 #include "mirror/array.h"
     22 #include "mirror/string.h"
     23 #include "mir_to_lir-inl.h"
     24 #include "x86/codegen_x86.h"
     25 
     26 namespace art {
     27 
     28 /*
     29  * This source files contains "gen" codegen routines that should
     30  * be applicable to most targets.  Only mid-level support utilities
     31  * and "op" calls may be used here.
     32  */
     33 
     34 /*
     35  * To save scheduling time, helper calls are broken into two parts: generation of
     36  * the helper target address, and the actuall call to the helper.  Because x86
     37  * has a memory call operation, part 1 is a NOP for x86.  For other targets,
     38  * load arguments between the two parts.
     39  */
     40 int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) {
     41   return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
     42 }
     43 
     44 /* NOTE: if r_tgt is a temp, it will be freed following use */
     45 LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc) {
     46   LIR* call_inst;
     47   if (cu_->instruction_set == kX86) {
     48     call_inst = OpThreadMem(kOpBlx, helper_offset);
     49   } else {
     50     call_inst = OpReg(kOpBlx, r_tgt);
     51     FreeTemp(r_tgt);
     52   }
     53   if (safepoint_pc) {
     54     MarkSafepointPC(call_inst);
     55   }
     56   return call_inst;
     57 }
     58 
     59 void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
     60   int r_tgt = CallHelperSetup(helper_offset);
     61   LoadConstant(TargetReg(kArg0), arg0);
     62   ClobberCalleeSave();
     63   CallHelper(r_tgt, helper_offset, safepoint_pc);
     64 }
     65 
     66 void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
     67   int r_tgt = CallHelperSetup(helper_offset);
     68   OpRegCopy(TargetReg(kArg0), arg0);
     69   ClobberCalleeSave();
     70   CallHelper(r_tgt, helper_offset, safepoint_pc);
     71 }
     72 
     73 void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
     74                                            bool safepoint_pc) {
     75   int r_tgt = CallHelperSetup(helper_offset);
     76   if (arg0.wide == 0) {
     77     LoadValueDirectFixed(arg0, TargetReg(kArg0));
     78   } else {
     79     LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
     80   }
     81   ClobberCalleeSave();
     82   CallHelper(r_tgt, helper_offset, safepoint_pc);
     83 }
     84 
     85 void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
     86                                       bool safepoint_pc) {
     87   int r_tgt = CallHelperSetup(helper_offset);
     88   LoadConstant(TargetReg(kArg0), arg0);
     89   LoadConstant(TargetReg(kArg1), arg1);
     90   ClobberCalleeSave();
     91   CallHelper(r_tgt, helper_offset, safepoint_pc);
     92 }
     93 
     94 void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
     95                                               RegLocation arg1, bool safepoint_pc) {
     96   int r_tgt = CallHelperSetup(helper_offset);
     97   if (arg1.wide == 0) {
     98     LoadValueDirectFixed(arg1, TargetReg(kArg1));
     99   } else {
    100     LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
    101   }
    102   LoadConstant(TargetReg(kArg0), arg0);
    103   ClobberCalleeSave();
    104   CallHelper(r_tgt, helper_offset, safepoint_pc);
    105 }
    106 
    107 void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1,
    108                                               bool safepoint_pc) {
    109   int r_tgt = CallHelperSetup(helper_offset);
    110   LoadValueDirectFixed(arg0, TargetReg(kArg0));
    111   LoadConstant(TargetReg(kArg1), arg1);
    112   ClobberCalleeSave();
    113   CallHelper(r_tgt, helper_offset, safepoint_pc);
    114 }
    115 
    116 void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
    117                                       bool safepoint_pc) {
    118   int r_tgt = CallHelperSetup(helper_offset);
    119   OpRegCopy(TargetReg(kArg1), arg1);
    120   LoadConstant(TargetReg(kArg0), arg0);
    121   ClobberCalleeSave();
    122   CallHelper(r_tgt, helper_offset, safepoint_pc);
    123 }
    124 
    125 void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
    126                                       bool safepoint_pc) {
    127   int r_tgt = CallHelperSetup(helper_offset);
    128   OpRegCopy(TargetReg(kArg0), arg0);
    129   LoadConstant(TargetReg(kArg1), arg1);
    130   ClobberCalleeSave();
    131   CallHelper(r_tgt, helper_offset, safepoint_pc);
    132 }
    133 
    134 void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
    135   int r_tgt = CallHelperSetup(helper_offset);
    136   LoadCurrMethodDirect(TargetReg(kArg1));
    137   LoadConstant(TargetReg(kArg0), arg0);
    138   ClobberCalleeSave();
    139   CallHelper(r_tgt, helper_offset, safepoint_pc);
    140 }
    141 
    142 void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0,
    143                                                       RegLocation arg1, bool safepoint_pc) {
    144   int r_tgt = CallHelperSetup(helper_offset);
    145   if (arg0.wide == 0) {
    146     LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
    147     if (arg1.wide == 0) {
    148       if (cu_->instruction_set == kMips) {
    149         LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
    150       } else {
    151         LoadValueDirectFixed(arg1, TargetReg(kArg1));
    152       }
    153     } else {
    154       if (cu_->instruction_set == kMips) {
    155         LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
    156       } else {
    157         LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
    158       }
    159     }
    160   } else {
    161     LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
    162     if (arg1.wide == 0) {
    163       LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
    164     } else {
    165       LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
    166     }
    167   }
    168   ClobberCalleeSave();
    169   CallHelper(r_tgt, helper_offset, safepoint_pc);
    170 }
    171 
    172 void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
    173                                       bool safepoint_pc) {
    174   int r_tgt = CallHelperSetup(helper_offset);
    175   DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
    176   OpRegCopy(TargetReg(kArg0), arg0);
    177   OpRegCopy(TargetReg(kArg1), arg1);
    178   ClobberCalleeSave();
    179   CallHelper(r_tgt, helper_offset, safepoint_pc);
    180 }
    181 
    182 void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
    183                                          int arg2, bool safepoint_pc) {
    184   int r_tgt = CallHelperSetup(helper_offset);
    185   DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
    186   OpRegCopy(TargetReg(kArg0), arg0);
    187   OpRegCopy(TargetReg(kArg1), arg1);
    188   LoadConstant(TargetReg(kArg2), arg2);
    189   ClobberCalleeSave();
    190   CallHelper(r_tgt, helper_offset, safepoint_pc);
    191 }
    192 
    193 void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset,
    194                                                     int arg0, RegLocation arg2, bool safepoint_pc) {
    195   int r_tgt = CallHelperSetup(helper_offset);
    196   LoadValueDirectFixed(arg2, TargetReg(kArg2));
    197   LoadCurrMethodDirect(TargetReg(kArg1));
    198   LoadConstant(TargetReg(kArg0), arg0);
    199   ClobberCalleeSave();
    200   CallHelper(r_tgt, helper_offset, safepoint_pc);
    201 }
    202 
    203 void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0,
    204                                             int arg2, bool safepoint_pc) {
    205   int r_tgt = CallHelperSetup(helper_offset);
    206   LoadCurrMethodDirect(TargetReg(kArg1));
    207   LoadConstant(TargetReg(kArg2), arg2);
    208   LoadConstant(TargetReg(kArg0), arg0);
    209   ClobberCalleeSave();
    210   CallHelper(r_tgt, helper_offset, safepoint_pc);
    211 }
    212 
    213 void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
    214                                                          int arg0, RegLocation arg1,
    215                                                          RegLocation arg2, bool safepoint_pc) {
    216   int r_tgt = CallHelperSetup(helper_offset);
    217   LoadValueDirectFixed(arg1, TargetReg(kArg1));
    218   if (arg2.wide == 0) {
    219     LoadValueDirectFixed(arg2, TargetReg(kArg2));
    220   } else {
    221     LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
    222   }
    223   LoadConstant(TargetReg(kArg0), arg0);
    224   ClobberCalleeSave();
    225   CallHelper(r_tgt, helper_offset, safepoint_pc);
    226 }
    227 
    228 /*
    229  * If there are any ins passed in registers that have not been promoted
    230  * to a callee-save register, flush them to the frame.  Perform intial
    231  * assignment of promoted arguments.
    232  *
    233  * ArgLocs is an array of location records describing the incoming arguments
    234  * with one location record per word of argument.
    235  */
    236 void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
    237   /*
    238    * Dummy up a RegLocation for the incoming Method*
    239    * It will attempt to keep kArg0 live (or copy it to home location
    240    * if promoted).
    241    */
    242   RegLocation rl_src = rl_method;
    243   rl_src.location = kLocPhysReg;
    244   rl_src.low_reg = TargetReg(kArg0);
    245   rl_src.home = false;
    246   MarkLive(rl_src.low_reg, rl_src.s_reg_low);
    247   StoreValue(rl_method, rl_src);
    248   // If Method* has been promoted, explicitly flush
    249   if (rl_method.location == kLocPhysReg) {
    250     StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
    251   }
    252 
    253   if (cu_->num_ins == 0)
    254     return;
    255   const int num_arg_regs = 3;
    256   static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
    257   int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
    258   /*
    259    * Copy incoming arguments to their proper home locations.
    260    * NOTE: an older version of dx had an issue in which
    261    * it would reuse static method argument registers.
    262    * This could result in the same Dalvik virtual register
    263    * being promoted to both core and fp regs. To account for this,
    264    * we only copy to the corresponding promoted physical register
    265    * if it matches the type of the SSA name for the incoming
    266    * argument.  It is also possible that long and double arguments
    267    * end up half-promoted.  In those cases, we must flush the promoted
    268    * half to memory as well.
    269    */
    270   for (int i = 0; i < cu_->num_ins; i++) {
    271     PromotionMap* v_map = &promotion_map_[start_vreg + i];
    272     if (i < num_arg_regs) {
    273       // If arriving in register
    274       bool need_flush = true;
    275       RegLocation* t_loc = &ArgLocs[i];
    276       if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
    277         OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i]));
    278         need_flush = false;
    279       } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
    280         OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i]));
    281         need_flush = false;
    282       } else {
    283         need_flush = true;
    284       }
    285 
    286       // For wide args, force flush if not fully promoted
    287       if (t_loc->wide) {
    288         PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
    289         // Is only half promoted?
    290         need_flush |= (p_map->core_location != v_map->core_location) ||
    291             (p_map->fp_location != v_map->fp_location);
    292         if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
    293           /*
    294            * In Arm, a double is represented as a pair of consecutive single float
    295            * registers starting at an even number.  It's possible that both Dalvik vRegs
    296            * representing the incoming double were independently promoted as singles - but
    297            * not in a form usable as a double.  If so, we need to flush - even though the
    298            * incoming arg appears fully in register.  At this point in the code, both
    299            * halves of the double are promoted.  Make sure they are in a usable form.
    300            */
    301           int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
    302           int low_reg = promotion_map_[lowreg_index].FpReg;
    303           int high_reg = promotion_map_[lowreg_index + 1].FpReg;
    304           if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
    305             need_flush = true;
    306           }
    307         }
    308       }
    309       if (need_flush) {
    310         StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
    311                       TargetReg(arg_regs[i]), kWord);
    312       }
    313     } else {
    314       // If arriving in frame & promoted
    315       if (v_map->core_location == kLocPhysReg) {
    316         LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
    317                      v_map->core_reg);
    318       }
    319       if (v_map->fp_location == kLocPhysReg) {
    320         LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
    321                      v_map->FpReg);
    322       }
    323     }
    324   }
    325 }
    326 
    327 /*
    328  * Bit of a hack here - in the absence of a real scheduling pass,
    329  * emit the next instruction in static & direct invoke sequences.
    330  */
    331 static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
    332                           int state, const MethodReference& target_method,
    333                           uint32_t unused,
    334                           uintptr_t direct_code, uintptr_t direct_method,
    335                           InvokeType type) {
    336   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
    337   if (cu->instruction_set != kThumb2) {
    338     // Disable sharpening
    339     direct_code = 0;
    340     direct_method = 0;
    341   }
    342   if (direct_code != 0 && direct_method != 0) {
    343     switch (state) {
    344     case 0:  // Get the current Method* [sets kArg0]
    345       if (direct_code != static_cast<unsigned int>(-1)) {
    346         cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
    347       } else {
    348         CHECK_EQ(cu->dex_file, target_method.dex_file);
    349         LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
    350                                                target_method.dex_method_index, 0);
    351         if (data_target == NULL) {
    352           data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
    353           data_target->operands[1] = type;
    354         }
    355         LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
    356         cg->AppendLIR(load_pc_rel);
    357         DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
    358       }
    359       if (direct_method != static_cast<unsigned int>(-1)) {
    360         cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
    361       } else {
    362         CHECK_EQ(cu->dex_file, target_method.dex_file);
    363         LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
    364                                                target_method.dex_method_index, 0);
    365         if (data_target == NULL) {
    366           data_target = cg->AddWordData(&cg->method_literal_list_, target_method.dex_method_index);
    367           data_target->operands[1] = type;
    368         }
    369         LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
    370         cg->AppendLIR(load_pc_rel);
    371         DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
    372       }
    373       break;
    374     default:
    375       return -1;
    376     }
    377   } else {
    378     switch (state) {
    379     case 0:  // Get the current Method* [sets kArg0]
    380       // TUNING: we can save a reg copy if Method* has been promoted.
    381       cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
    382       break;
    383     case 1:  // Get method->dex_cache_resolved_methods_
    384       cg->LoadWordDisp(cg->TargetReg(kArg0),
    385         mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
    386       // Set up direct code if known.
    387       if (direct_code != 0) {
    388         if (direct_code != static_cast<unsigned int>(-1)) {
    389           cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
    390         } else {
    391           CHECK_EQ(cu->dex_file, target_method.dex_file);
    392           LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
    393                                                  target_method.dex_method_index, 0);
    394           if (data_target == NULL) {
    395             data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
    396             data_target->operands[1] = type;
    397           }
    398           LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
    399           cg->AppendLIR(load_pc_rel);
    400           DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
    401         }
    402       }
    403       break;
    404     case 2:  // Grab target method*
    405       CHECK_EQ(cu->dex_file, target_method.dex_file);
    406       cg->LoadWordDisp(cg->TargetReg(kArg0),
    407                        mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
    408                            (target_method.dex_method_index * 4),
    409                        cg-> TargetReg(kArg0));
    410       break;
    411     case 3:  // Grab the code from the method*
    412       if (cu->instruction_set != kX86) {
    413         if (direct_code == 0) {
    414           cg->LoadWordDisp(cg->TargetReg(kArg0),
    415                            mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
    416                            cg->TargetReg(kInvokeTgt));
    417         }
    418         break;
    419       }
    420       // Intentional fallthrough for x86
    421     default:
    422       return -1;
    423     }
    424   }
    425   return state + 1;
    426 }
    427 
    428 /*
    429  * Bit of a hack here - in the absence of a real scheduling pass,
    430  * emit the next instruction in a virtual invoke sequence.
    431  * We can use kLr as a temp prior to target address loading
    432  * Note also that we'll load the first argument ("this") into
    433  * kArg1 here rather than the standard LoadArgRegs.
    434  */
    435 static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
    436                          int state, const MethodReference& target_method,
    437                          uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
    438                          InvokeType unused3) {
    439   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
    440   /*
    441    * This is the fast path in which the target virtual method is
    442    * fully resolved at compile time.
    443    */
    444   switch (state) {
    445     case 0: {  // Get "this" [set kArg1]
    446       RegLocation  rl_arg = info->args[0];
    447       cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
    448       break;
    449     }
    450     case 1:  // Is "this" null? [use kArg1]
    451       cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
    452       // get this->klass_ [use kArg1, set kInvokeTgt]
    453       cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
    454                        cg->TargetReg(kInvokeTgt));
    455       break;
    456     case 2:  // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
    457       cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
    458                        cg->TargetReg(kInvokeTgt));
    459       break;
    460     case 3:  // Get target method [use kInvokeTgt, set kArg0]
    461       cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
    462                        mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
    463                        cg->TargetReg(kArg0));
    464       break;
    465     case 4:  // Get the compiled code address [uses kArg0, sets kInvokeTgt]
    466       if (cu->instruction_set != kX86) {
    467         cg->LoadWordDisp(cg->TargetReg(kArg0),
    468                          mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
    469                          cg->TargetReg(kInvokeTgt));
    470         break;
    471       }
    472       // Intentional fallthrough for X86
    473     default:
    474       return -1;
    475   }
    476   return state + 1;
    477 }
    478 
    479 /*
    480  * All invoke-interface calls bounce off of art_quick_invoke_interface_trampoline,
    481  * which will locate the target and continue on via a tail call.
    482  */
    483 static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
    484                                  const MethodReference& target_method,
    485                                  uint32_t unused, uintptr_t unused2,
    486                                  uintptr_t direct_method, InvokeType unused4) {
    487   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
    488   if (cu->instruction_set != kThumb2) {
    489     // Disable sharpening
    490     direct_method = 0;
    491   }
    492   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
    493 
    494   if (direct_method != 0) {
    495     switch (state) {
    496       case 0:  // Load the trampoline target [sets kInvokeTgt].
    497         if (cu->instruction_set != kX86) {
    498           cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(),
    499                            cg->TargetReg(kInvokeTgt));
    500         }
    501         // Get the interface Method* [sets kArg0]
    502         if (direct_method != static_cast<unsigned int>(-1)) {
    503           cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
    504         } else {
    505           CHECK_EQ(cu->dex_file, target_method.dex_file);
    506           LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
    507                                                  target_method.dex_method_index, 0);
    508           if (data_target == NULL) {
    509             data_target = cg->AddWordData(&cg->method_literal_list_,
    510                                           target_method.dex_method_index);
    511             data_target->operands[1] = kInterface;
    512           }
    513           LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
    514           cg->AppendLIR(load_pc_rel);
    515           DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
    516         }
    517         break;
    518       default:
    519         return -1;
    520     }
    521   } else {
    522     switch (state) {
    523       case 0:
    524         // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
    525         cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
    526         // Load the trampoline target [sets kInvokeTgt].
    527         if (cu->instruction_set != kX86) {
    528           cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(),
    529                            cg->TargetReg(kInvokeTgt));
    530         }
    531         break;
    532     case 1:  // Get method->dex_cache_resolved_methods_ [set/use kArg0]
    533       cg->LoadWordDisp(cg->TargetReg(kArg0),
    534                        mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
    535                        cg->TargetReg(kArg0));
    536       break;
    537     case 2:  // Grab target method* [set/use kArg0]
    538       CHECK_EQ(cu->dex_file, target_method.dex_file);
    539       cg->LoadWordDisp(cg->TargetReg(kArg0),
    540                        mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
    541                            (target_method.dex_method_index * 4),
    542                        cg->TargetReg(kArg0));
    543       break;
    544     default:
    545       return -1;
    546     }
    547   }
    548   return state + 1;
    549 }
    550 
    551 static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline,
    552                             int state, const MethodReference& target_method,
    553                             uint32_t method_idx) {
    554   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
    555   /*
    556    * This handles the case in which the base method is not fully
    557    * resolved at compile time, we bail to a runtime helper.
    558    */
    559   if (state == 0) {
    560     if (cu->instruction_set != kX86) {
    561       // Load trampoline target
    562       cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
    563     }
    564     // Load kArg0 with method index
    565     CHECK_EQ(cu->dex_file, target_method.dex_file);
    566     cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
    567     return 1;
    568   }
    569   return -1;
    570 }
    571 
    572 static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
    573                                 int state,
    574                                 const MethodReference& target_method,
    575                                 uint32_t method_idx,
    576                                 uintptr_t unused, uintptr_t unused2,
    577                                 InvokeType unused3) {
    578   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
    579   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
    580 }
    581 
    582 static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
    583                                 const MethodReference& target_method,
    584                                 uint32_t method_idx, uintptr_t unused,
    585                                 uintptr_t unused2, InvokeType unused3) {
    586   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
    587   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
    588 }
    589 
    590 static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
    591                                const MethodReference& target_method,
    592                                uint32_t method_idx, uintptr_t unused,
    593                                uintptr_t unused2, InvokeType unused3) {
    594   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
    595   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
    596 }
    597 
    598 static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
    599                            const MethodReference& target_method,
    600                            uint32_t method_idx, uintptr_t unused,
    601                            uintptr_t unused2, InvokeType unused3) {
    602   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
    603   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
    604 }
    605 
    606 static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
    607                                                 CallInfo* info, int state,
    608                                                 const MethodReference& target_method,
    609                                                 uint32_t unused,
    610                                                 uintptr_t unused2, uintptr_t unused3,
    611                                                 InvokeType unused4) {
    612   ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
    613   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
    614 }
    615 
    616 int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
    617                          NextCallInsn next_call_insn,
    618                          const MethodReference& target_method,
    619                          uint32_t vtable_idx, uintptr_t direct_code,
    620                          uintptr_t direct_method, InvokeType type, bool skip_this) {
    621   int last_arg_reg = TargetReg(kArg3);
    622   int next_reg = TargetReg(kArg1);
    623   int next_arg = 0;
    624   if (skip_this) {
    625     next_reg++;
    626     next_arg++;
    627   }
    628   for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
    629     RegLocation rl_arg = info->args[next_arg++];
    630     rl_arg = UpdateRawLoc(rl_arg);
    631     if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
    632       LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
    633       next_reg++;
    634       next_arg++;
    635     } else {
    636       if (rl_arg.wide) {
    637         rl_arg.wide = false;
    638         rl_arg.is_const = false;
    639       }
    640       LoadValueDirectFixed(rl_arg, next_reg);
    641     }
    642     call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
    643                                 direct_code, direct_method, type);
    644   }
    645   return call_state;
    646 }
    647 
    648 /*
    649  * Load up to 5 arguments, the first three of which will be in
    650  * kArg1 .. kArg3.  On entry kArg0 contains the current method pointer,
    651  * and as part of the load sequence, it must be replaced with
    652  * the target method pointer.  Note, this may also be called
    653  * for "range" variants if the number of arguments is 5 or fewer.
    654  */
    655 int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
    656                                   int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
    657                                   const MethodReference& target_method,
    658                                   uint32_t vtable_idx, uintptr_t direct_code,
    659                                   uintptr_t direct_method, InvokeType type, bool skip_this) {
    660   RegLocation rl_arg;
    661 
    662   /* If no arguments, just return */
    663   if (info->num_arg_words == 0)
    664     return call_state;
    665 
    666   call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
    667                               direct_code, direct_method, type);
    668 
    669   DCHECK_LE(info->num_arg_words, 5);
    670   if (info->num_arg_words > 3) {
    671     int32_t next_use = 3;
    672     // Detect special case of wide arg spanning arg3/arg4
    673     RegLocation rl_use0 = info->args[0];
    674     RegLocation rl_use1 = info->args[1];
    675     RegLocation rl_use2 = info->args[2];
    676     if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
    677       rl_use2.wide) {
    678       int reg = -1;
    679       // Wide spans, we need the 2nd half of uses[2].
    680       rl_arg = UpdateLocWide(rl_use2);
    681       if (rl_arg.location == kLocPhysReg) {
    682         reg = rl_arg.high_reg;
    683       } else {
    684         // kArg2 & rArg3 can safely be used here
    685         reg = TargetReg(kArg3);
    686         LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
    687         call_state = next_call_insn(cu_, info, call_state, target_method,
    688                                     vtable_idx, direct_code, direct_method, type);
    689       }
    690       StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
    691       StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
    692       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
    693                                   direct_code, direct_method, type);
    694       next_use++;
    695     }
    696     // Loop through the rest
    697     while (next_use < info->num_arg_words) {
    698       int low_reg;
    699       int high_reg = -1;
    700       rl_arg = info->args[next_use];
    701       rl_arg = UpdateRawLoc(rl_arg);
    702       if (rl_arg.location == kLocPhysReg) {
    703         low_reg = rl_arg.low_reg;
    704         high_reg = rl_arg.high_reg;
    705       } else {
    706         low_reg = TargetReg(kArg2);
    707         if (rl_arg.wide) {
    708           high_reg = TargetReg(kArg3);
    709           LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
    710         } else {
    711           LoadValueDirectFixed(rl_arg, low_reg);
    712         }
    713         call_state = next_call_insn(cu_, info, call_state, target_method,
    714                                     vtable_idx, direct_code, direct_method, type);
    715       }
    716       int outs_offset = (next_use + 1) * 4;
    717       if (rl_arg.wide) {
    718         StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
    719         next_use += 2;
    720       } else {
    721         StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
    722         next_use++;
    723       }
    724       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
    725                                direct_code, direct_method, type);
    726     }
    727   }
    728 
    729   call_state = LoadArgRegs(info, call_state, next_call_insn,
    730                            target_method, vtable_idx, direct_code, direct_method,
    731                            type, skip_this);
    732 
    733   if (pcrLabel) {
    734     *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
    735   }
    736   return call_state;
    737 }
    738 
    739 /*
    740  * May have 0+ arguments (also used for jumbo).  Note that
    741  * source virtual registers may be in physical registers, so may
    742  * need to be flushed to home location before copying.  This
    743  * applies to arg3 and above (see below).
    744  *
    745  * Two general strategies:
    746  *    If < 20 arguments
    747  *       Pass args 3-18 using vldm/vstm block copy
    748  *       Pass arg0, arg1 & arg2 in kArg1-kArg3
    749  *    If 20+ arguments
    750  *       Pass args arg19+ using memcpy block copy
    751  *       Pass arg0, arg1 & arg2 in kArg1-kArg3
    752  *
    753  */
    754 int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
    755                                 LIR** pcrLabel, NextCallInsn next_call_insn,
    756                                 const MethodReference& target_method,
    757                                 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
    758                                 InvokeType type, bool skip_this) {
    759   // If we can treat it as non-range (Jumbo ops will use range form)
    760   if (info->num_arg_words <= 5)
    761     return GenDalvikArgsNoRange(info, call_state, pcrLabel,
    762                                 next_call_insn, target_method, vtable_idx,
    763                                 direct_code, direct_method, type, skip_this);
    764   /*
    765    * First load the non-register arguments.  Both forms expect all
    766    * of the source arguments to be in their home frame location, so
    767    * scan the s_reg names and flush any that have been promoted to
    768    * frame backing storage.
    769    */
    770   // Scan the rest of the args - if in phys_reg flush to memory
    771   for (int next_arg = 0; next_arg < info->num_arg_words;) {
    772     RegLocation loc = info->args[next_arg];
    773     if (loc.wide) {
    774       loc = UpdateLocWide(loc);
    775       if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
    776         StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
    777                           loc.low_reg, loc.high_reg);
    778       }
    779       next_arg += 2;
    780     } else {
    781       loc = UpdateLoc(loc);
    782       if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
    783         StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
    784                       loc.low_reg, kWord);
    785       }
    786       next_arg++;
    787     }
    788   }
    789 
    790   int start_offset = SRegOffset(info->args[3].s_reg_low);
    791   int outs_offset = 4 /* Method* */ + (3 * 4);
    792   if (cu_->instruction_set != kThumb2) {
    793     // Generate memcpy
    794     OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
    795     OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
    796     CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
    797                                TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
    798   } else {
    799     if (info->num_arg_words >= 20) {
    800       // Generate memcpy
    801       OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
    802       OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
    803       CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
    804                                  TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
    805     } else {
    806       // Use vldm/vstm pair using kArg3 as a temp
    807       int regs_left = std::min(info->num_arg_words - 3, 16);
    808       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
    809                                direct_code, direct_method, type);
    810       OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
    811       LIR* ld = OpVldm(TargetReg(kArg3), regs_left);
    812       // TUNING: loosen barrier
    813       ld->def_mask = ENCODE_ALL;
    814       SetMemRefType(ld, true /* is_load */, kDalvikReg);
    815       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
    816                                direct_code, direct_method, type);
    817       OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
    818       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
    819                                direct_code, direct_method, type);
    820       LIR* st = OpVstm(TargetReg(kArg3), regs_left);
    821       SetMemRefType(st, false /* is_load */, kDalvikReg);
    822       st->def_mask = ENCODE_ALL;
    823       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
    824                                direct_code, direct_method, type);
    825     }
    826   }
    827 
    828   call_state = LoadArgRegs(info, call_state, next_call_insn,
    829                            target_method, vtable_idx, direct_code, direct_method,
    830                            type, skip_this);
    831 
    832   call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
    833                            direct_code, direct_method, type);
    834   if (pcrLabel) {
    835     *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
    836   }
    837   return call_state;
    838 }
    839 
    840 RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
    841   RegLocation res;
    842   if (info->result.location == kLocInvalid) {
    843     res = GetReturn(false);
    844   } else {
    845     res = info->result;
    846   }
    847   return res;
    848 }
    849 
    850 RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
    851   RegLocation res;
    852   if (info->result.location == kLocInvalid) {
    853     res = GetReturnWide(false);
    854   } else {
    855     res = info->result;
    856   }
    857   return res;
    858 }
    859 
    860 bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
    861   if (cu_->instruction_set == kMips) {
    862     // TODO - add Mips implementation
    863     return false;
    864   }
    865   // Location of reference to data array
    866   int value_offset = mirror::String::ValueOffset().Int32Value();
    867   // Location of count
    868   int count_offset = mirror::String::CountOffset().Int32Value();
    869   // Starting offset within data array
    870   int offset_offset = mirror::String::OffsetOffset().Int32Value();
    871   // Start of char data with array_
    872   int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
    873 
    874   RegLocation rl_obj = info->args[0];
    875   RegLocation rl_idx = info->args[1];
    876   rl_obj = LoadValue(rl_obj, kCoreReg);
    877   rl_idx = LoadValue(rl_idx, kCoreReg);
    878   int reg_max;
    879   GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
    880   bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
    881   LIR* launch_pad = NULL;
    882   int reg_off = INVALID_REG;
    883   int reg_ptr = INVALID_REG;
    884   if (cu_->instruction_set != kX86) {
    885     reg_off = AllocTemp();
    886     reg_ptr = AllocTemp();
    887     if (range_check) {
    888       reg_max = AllocTemp();
    889       LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
    890     }
    891     LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
    892     LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
    893     if (range_check) {
    894       // Set up a launch pad to allow retry in case of bounds violation */
    895       launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
    896       intrinsic_launchpads_.Insert(launch_pad);
    897       OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
    898       FreeTemp(reg_max);
    899       OpCondBranch(kCondCs, launch_pad);
    900     }
    901   } else {
    902     if (range_check) {
    903       reg_max = AllocTemp();
    904       LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
    905       // Set up a launch pad to allow retry in case of bounds violation */
    906       launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
    907       intrinsic_launchpads_.Insert(launch_pad);
    908       OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
    909       FreeTemp(reg_max);
    910       OpCondBranch(kCondCc, launch_pad);
    911     }
    912     reg_off = AllocTemp();
    913     reg_ptr = AllocTemp();
    914     LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
    915     LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
    916   }
    917   OpRegImm(kOpAdd, reg_ptr, data_offset);
    918   OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
    919   FreeTemp(rl_obj.low_reg);
    920   FreeTemp(rl_idx.low_reg);
    921   RegLocation rl_dest = InlineTarget(info);
    922   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    923   LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
    924   FreeTemp(reg_off);
    925   FreeTemp(reg_ptr);
    926   StoreValue(rl_dest, rl_result);
    927   if (range_check) {
    928     launch_pad->operands[2] = 0;  // no resumption
    929   }
    930   // Record that we've already inlined & null checked
    931   info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
    932   return true;
    933 }
    934 
    935 // Generates an inlined String.is_empty or String.length.
    936 bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
    937   if (cu_->instruction_set == kMips) {
    938     // TODO - add Mips implementation
    939     return false;
    940   }
    941   // dst = src.length();
    942   RegLocation rl_obj = info->args[0];
    943   rl_obj = LoadValue(rl_obj, kCoreReg);
    944   RegLocation rl_dest = InlineTarget(info);
    945   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    946   GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
    947   LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
    948   if (is_empty) {
    949     // dst = (dst == 0);
    950     if (cu_->instruction_set == kThumb2) {
    951       int t_reg = AllocTemp();
    952       OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
    953       OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
    954     } else {
    955       DCHECK_EQ(cu_->instruction_set, kX86);
    956       OpRegImm(kOpSub, rl_result.low_reg, 1);
    957       OpRegImm(kOpLsr, rl_result.low_reg, 31);
    958     }
    959   }
    960   StoreValue(rl_dest, rl_result);
    961   return true;
    962 }
    963 
    964 bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
    965   if (cu_->instruction_set == kMips) {
    966     // TODO - add Mips implementation
    967     return false;
    968   }
    969   RegLocation rl_src = info->args[0];
    970   rl_src = LoadValue(rl_src, kCoreReg);
    971   RegLocation rl_dest = InlineTarget(info);
    972   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    973   int sign_reg = AllocTemp();
    974   // abs(x) = y<=x>>31, (x+y)^y.
    975   OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
    976   OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
    977   OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
    978   StoreValue(rl_dest, rl_result);
    979   return true;
    980 }
    981 
    982 bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
    983   if (cu_->instruction_set == kMips) {
    984     // TODO - add Mips implementation
    985     return false;
    986   }
    987   if (cu_->instruction_set == kThumb2) {
    988     RegLocation rl_src = info->args[0];
    989     rl_src = LoadValueWide(rl_src, kCoreReg);
    990     RegLocation rl_dest = InlineTargetWide(info);
    991     RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    992     int sign_reg = AllocTemp();
    993     // abs(x) = y<=x>>31, (x+y)^y.
    994     OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
    995     OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
    996     OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
    997     OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
    998     OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
    999     StoreValueWide(rl_dest, rl_result);
   1000     return true;
   1001   } else {
   1002     DCHECK_EQ(cu_->instruction_set, kX86);
   1003     // Reuse source registers to avoid running out of temps
   1004     RegLocation rl_src = info->args[0];
   1005     rl_src = LoadValueWide(rl_src, kCoreReg);
   1006     RegLocation rl_dest = InlineTargetWide(info);
   1007     RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1008     OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
   1009     FreeTemp(rl_src.low_reg);
   1010     FreeTemp(rl_src.high_reg);
   1011     int sign_reg = AllocTemp();
   1012     // abs(x) = y<=x>>31, (x+y)^y.
   1013     OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
   1014     OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
   1015     OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
   1016     OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
   1017     OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
   1018     StoreValueWide(rl_dest, rl_result);
   1019     return true;
   1020   }
   1021 }
   1022 
   1023 bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
   1024   if (cu_->instruction_set == kMips) {
   1025     // TODO - add Mips implementation
   1026     return false;
   1027   }
   1028   RegLocation rl_src = info->args[0];
   1029   RegLocation rl_dest = InlineTarget(info);
   1030   StoreValue(rl_dest, rl_src);
   1031   return true;
   1032 }
   1033 
   1034 bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
   1035   if (cu_->instruction_set == kMips) {
   1036     // TODO - add Mips implementation
   1037     return false;
   1038   }
   1039   RegLocation rl_src = info->args[0];
   1040   RegLocation rl_dest = InlineTargetWide(info);
   1041   StoreValueWide(rl_dest, rl_src);
   1042   return true;
   1043 }
   1044 
   1045 /*
   1046  * Fast string.index_of(I) & (II).  Tests for simple case of char <= 0xffff,
   1047  * otherwise bails to standard library code.
   1048  */
   1049 bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
   1050   if (cu_->instruction_set == kMips) {
   1051     // TODO - add Mips implementation
   1052     return false;
   1053   }
   1054   ClobberCalleeSave();
   1055   LockCallTemps();  // Using fixed registers
   1056   int reg_ptr = TargetReg(kArg0);
   1057   int reg_char = TargetReg(kArg1);
   1058   int reg_start = TargetReg(kArg2);
   1059 
   1060   RegLocation rl_obj = info->args[0];
   1061   RegLocation rl_char = info->args[1];
   1062   RegLocation rl_start = info->args[2];
   1063   LoadValueDirectFixed(rl_obj, reg_ptr);
   1064   LoadValueDirectFixed(rl_char, reg_char);
   1065   if (zero_based) {
   1066     LoadConstant(reg_start, 0);
   1067   } else {
   1068     LoadValueDirectFixed(rl_start, reg_start);
   1069   }
   1070   int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0;
   1071   GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
   1072   LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
   1073   intrinsic_launchpads_.Insert(launch_pad);
   1074   OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
   1075   // NOTE: not a safepoint
   1076   if (cu_->instruction_set != kX86) {
   1077     OpReg(kOpBlx, r_tgt);
   1078   } else {
   1079     OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf));
   1080   }
   1081   LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
   1082   launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
   1083   // Record that we've already inlined & null checked
   1084   info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
   1085   RegLocation rl_return = GetReturn(false);
   1086   RegLocation rl_dest = InlineTarget(info);
   1087   StoreValue(rl_dest, rl_return);
   1088   return true;
   1089 }
   1090 
   1091 /* Fast string.compareTo(Ljava/lang/string;)I. */
   1092 bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
   1093   if (cu_->instruction_set == kMips) {
   1094     // TODO - add Mips implementation
   1095     return false;
   1096   }
   1097   ClobberCalleeSave();
   1098   LockCallTemps();  // Using fixed registers
   1099   int reg_this = TargetReg(kArg0);
   1100   int reg_cmp = TargetReg(kArg1);
   1101 
   1102   RegLocation rl_this = info->args[0];
   1103   RegLocation rl_cmp = info->args[1];
   1104   LoadValueDirectFixed(rl_this, reg_this);
   1105   LoadValueDirectFixed(rl_cmp, reg_cmp);
   1106   int r_tgt = (cu_->instruction_set != kX86) ?
   1107       LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
   1108   GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
   1109   // TUNING: check if rl_cmp.s_reg_low is already null checked
   1110   LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
   1111   intrinsic_launchpads_.Insert(launch_pad);
   1112   OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
   1113   // NOTE: not a safepoint
   1114   if (cu_->instruction_set != kX86) {
   1115     OpReg(kOpBlx, r_tgt);
   1116   } else {
   1117     OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
   1118   }
   1119   launch_pad->operands[2] = 0;  // No return possible
   1120   // Record that we've already inlined & null checked
   1121   info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
   1122   RegLocation rl_return = GetReturn(false);
   1123   RegLocation rl_dest = InlineTarget(info);
   1124   StoreValue(rl_dest, rl_return);
   1125   return true;
   1126 }
   1127 
   1128 bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
   1129   RegLocation rl_dest = InlineTarget(info);
   1130   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1131   ThreadOffset offset = Thread::PeerOffset();
   1132   if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
   1133     LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg);
   1134   } else {
   1135     CHECK(cu_->instruction_set == kX86);
   1136     reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
   1137   }
   1138   StoreValue(rl_dest, rl_result);
   1139   return true;
   1140 }
   1141 
   1142 bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
   1143                                   bool is_long, bool is_volatile) {
   1144   if (cu_->instruction_set == kMips) {
   1145     // TODO - add Mips implementation
   1146     return false;
   1147   }
   1148   // Unused - RegLocation rl_src_unsafe = info->args[0];
   1149   RegLocation rl_src_obj = info->args[1];  // Object
   1150   RegLocation rl_src_offset = info->args[2];  // long low
   1151   rl_src_offset.wide = 0;  // ignore high half in info->args[3]
   1152   RegLocation rl_dest = InlineTarget(info);  // result reg
   1153   if (is_volatile) {
   1154     GenMemBarrier(kLoadLoad);
   1155   }
   1156   RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
   1157   RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
   1158   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
   1159   if (is_long) {
   1160     OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
   1161     LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
   1162     StoreValueWide(rl_dest, rl_result);
   1163   } else {
   1164     LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
   1165     StoreValue(rl_dest, rl_result);
   1166   }
   1167   return true;
   1168 }
   1169 
   1170 bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
   1171                                   bool is_object, bool is_volatile, bool is_ordered) {
   1172   if (cu_->instruction_set == kMips) {
   1173     // TODO - add Mips implementation
   1174     return false;
   1175   }
   1176   if (cu_->instruction_set == kX86 && is_object) {
   1177     // TODO: fix X86, it exhausts registers for card marking.
   1178     return false;
   1179   }
   1180   // Unused - RegLocation rl_src_unsafe = info->args[0];
   1181   RegLocation rl_src_obj = info->args[1];  // Object
   1182   RegLocation rl_src_offset = info->args[2];  // long low
   1183   rl_src_offset.wide = 0;  // ignore high half in info->args[3]
   1184   RegLocation rl_src_value = info->args[4];  // value to store
   1185   if (is_volatile || is_ordered) {
   1186     GenMemBarrier(kStoreStore);
   1187   }
   1188   RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
   1189   RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
   1190   RegLocation rl_value;
   1191   if (is_long) {
   1192     rl_value = LoadValueWide(rl_src_value, kCoreReg);
   1193     OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
   1194     StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
   1195   } else {
   1196     rl_value = LoadValue(rl_src_value, kCoreReg);
   1197     StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
   1198   }
   1199   if (is_volatile) {
   1200     GenMemBarrier(kStoreLoad);
   1201   }
   1202   if (is_object) {
   1203     MarkGCCard(rl_value.low_reg, rl_object.low_reg);
   1204   }
   1205   return true;
   1206 }
   1207 
   1208 bool Mir2Lir::GenIntrinsic(CallInfo* info) {
   1209   if (info->opt_flags & MIR_INLINED) {
   1210     return false;
   1211   }
   1212   /*
   1213    * TODO: move these to a target-specific structured constant array
   1214    * and use a generic match function.  The list of intrinsics may be
   1215    * slightly different depending on target.
   1216    * TODO: Fold this into a matching function that runs during
   1217    * basic block building.  This should be part of the action for
   1218    * small method inlining and recognition of the special object init
   1219    * method.  By doing this during basic block construction, we can also
   1220    * take advantage of/generate new useful dataflow info.
   1221    */
   1222   StringPiece tgt_methods_declaring_class(
   1223       cu_->dex_file->GetMethodDeclaringClassDescriptor(cu_->dex_file->GetMethodId(info->index)));
   1224   if (tgt_methods_declaring_class.starts_with("Ljava/lang/Double;")) {
   1225     std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
   1226     if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
   1227       return GenInlinedDoubleCvt(info);
   1228     }
   1229     if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") {
   1230       return GenInlinedDoubleCvt(info);
   1231     }
   1232   } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Float;")) {
   1233     std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
   1234     if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") {
   1235       return GenInlinedFloatCvt(info);
   1236     }
   1237     if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
   1238       return GenInlinedFloatCvt(info);
   1239     }
   1240   } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Math;") ||
   1241              tgt_methods_declaring_class.starts_with("Ljava/lang/StrictMath;")) {
   1242     std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
   1243     if (tgt_method == "int java.lang.Math.abs(int)" ||
   1244         tgt_method == "int java.lang.StrictMath.abs(int)") {
   1245       return GenInlinedAbsInt(info);
   1246     }
   1247     if (tgt_method == "long java.lang.Math.abs(long)" ||
   1248         tgt_method == "long java.lang.StrictMath.abs(long)") {
   1249       return GenInlinedAbsLong(info);
   1250     }
   1251     if (tgt_method == "int java.lang.Math.max(int, int)" ||
   1252         tgt_method == "int java.lang.StrictMath.max(int, int)") {
   1253       return GenInlinedMinMaxInt(info, false /* is_min */);
   1254     }
   1255     if (tgt_method == "int java.lang.Math.min(int, int)" ||
   1256         tgt_method == "int java.lang.StrictMath.min(int, int)") {
   1257       return GenInlinedMinMaxInt(info, true /* is_min */);
   1258     }
   1259     if (tgt_method == "double java.lang.Math.sqrt(double)" ||
   1260         tgt_method == "double java.lang.StrictMath.sqrt(double)") {
   1261       return GenInlinedSqrt(info);
   1262     }
   1263   } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/String;")) {
   1264     std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
   1265     if (tgt_method == "char java.lang.String.charAt(int)") {
   1266       return GenInlinedCharAt(info);
   1267     }
   1268     if (tgt_method == "int java.lang.String.compareTo(java.lang.String)") {
   1269       return GenInlinedStringCompareTo(info);
   1270     }
   1271     if (tgt_method == "boolean java.lang.String.is_empty()") {
   1272       return GenInlinedStringIsEmptyOrLength(info, true /* is_empty */);
   1273     }
   1274     if (tgt_method == "int java.lang.String.index_of(int, int)") {
   1275       return GenInlinedIndexOf(info, false /* base 0 */);
   1276     }
   1277     if (tgt_method == "int java.lang.String.index_of(int)") {
   1278       return GenInlinedIndexOf(info, true /* base 0 */);
   1279     }
   1280     if (tgt_method == "int java.lang.String.length()") {
   1281       return GenInlinedStringIsEmptyOrLength(info, false /* is_empty */);
   1282     }
   1283   } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Thread;")) {
   1284     std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
   1285     if (tgt_method == "java.lang.Thread java.lang.Thread.currentThread()") {
   1286       return GenInlinedCurrentThread(info);
   1287     }
   1288   } else if (tgt_methods_declaring_class.starts_with("Lsun/misc/Unsafe;")) {
   1289     std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
   1290     if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
   1291       return GenInlinedCas32(info, false);
   1292     }
   1293     if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") {
   1294       return GenInlinedCas32(info, true);
   1295     }
   1296     if (tgt_method == "int sun.misc.Unsafe.getInt(java.lang.Object, long)") {
   1297       return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
   1298     }
   1299     if (tgt_method == "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") {
   1300       return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
   1301     }
   1302     if (tgt_method == "void sun.misc.Unsafe.putInt(java.lang.Object, long, int)") {
   1303       return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
   1304                                  false /* is_volatile */, false /* is_ordered */);
   1305     }
   1306     if (tgt_method == "void sun.misc.Unsafe.putIntVolatile(java.lang.Object, long, int)") {
   1307       return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
   1308                                  true /* is_volatile */, false /* is_ordered */);
   1309     }
   1310     if (tgt_method == "void sun.misc.Unsafe.putOrderedInt(java.lang.Object, long, int)") {
   1311       return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
   1312                                  false /* is_volatile */, true /* is_ordered */);
   1313     }
   1314     if (tgt_method == "long sun.misc.Unsafe.getLong(java.lang.Object, long)") {
   1315       return GenInlinedUnsafeGet(info, true /* is_long */, false /* is_volatile */);
   1316     }
   1317     if (tgt_method == "long sun.misc.Unsafe.getLongVolatile(java.lang.Object, long)") {
   1318       return GenInlinedUnsafeGet(info, true /* is_long */, true /* is_volatile */);
   1319     }
   1320     if (tgt_method == "void sun.misc.Unsafe.putLong(java.lang.Object, long, long)") {
   1321       return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
   1322                                  false /* is_volatile */, false /* is_ordered */);
   1323     }
   1324     if (tgt_method == "void sun.misc.Unsafe.putLongVolatile(java.lang.Object, long, long)") {
   1325       return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
   1326                                  true /* is_volatile */, false /* is_ordered */);
   1327     }
   1328     if (tgt_method == "void sun.misc.Unsafe.putOrderedLong(java.lang.Object, long, long)") {
   1329       return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
   1330                                  false /* is_volatile */, true /* is_ordered */);
   1331     }
   1332     if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObject(java.lang.Object, long)") {
   1333       return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
   1334     }
   1335     if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") {
   1336       return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
   1337     }
   1338     if (tgt_method == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") {
   1339       return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
   1340                                  false /* is_volatile */, false /* is_ordered */);
   1341     }
   1342     if (tgt_method == "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") {
   1343       return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
   1344                                  true /* is_volatile */, false /* is_ordered */);
   1345     }
   1346     if (tgt_method == "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") {
   1347       return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
   1348                                  false /* is_volatile */, true /* is_ordered */);
   1349     }
   1350   }
   1351   return false;
   1352 }
   1353 
   1354 void Mir2Lir::GenInvoke(CallInfo* info) {
   1355   if (GenIntrinsic(info)) {
   1356     return;
   1357   }
   1358   InvokeType original_type = info->type;  // avoiding mutation by ComputeInvokeInfo
   1359   int call_state = 0;
   1360   LIR* null_ck;
   1361   LIR** p_null_ck = NULL;
   1362   NextCallInsn next_call_insn;
   1363   FlushAllRegs();  /* Everything to home location */
   1364   // Explicit register usage
   1365   LockCallTemps();
   1366 
   1367   DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
   1368   MethodReference target_method(cUnit->GetDexFile(), info->index);
   1369   int vtable_idx;
   1370   uintptr_t direct_code;
   1371   uintptr_t direct_method;
   1372   bool skip_this;
   1373   bool fast_path =
   1374       cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
   1375                                               current_dalvik_offset_,
   1376                                               info->type, target_method,
   1377                                               vtable_idx,
   1378                                               direct_code, direct_method,
   1379                                               true) && !SLOW_INVOKE_PATH;
   1380   if (info->type == kInterface) {
   1381     if (fast_path) {
   1382       p_null_ck = &null_ck;
   1383     }
   1384     next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
   1385     skip_this = false;
   1386   } else if (info->type == kDirect) {
   1387     if (fast_path) {
   1388       p_null_ck = &null_ck;
   1389     }
   1390     next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
   1391     skip_this = false;
   1392   } else if (info->type == kStatic) {
   1393     next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
   1394     skip_this = false;
   1395   } else if (info->type == kSuper) {
   1396     DCHECK(!fast_path);  // Fast path is a direct call.
   1397     next_call_insn = NextSuperCallInsnSP;
   1398     skip_this = false;
   1399   } else {
   1400     DCHECK_EQ(info->type, kVirtual);
   1401     next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
   1402     skip_this = fast_path;
   1403   }
   1404   if (!info->is_range) {
   1405     call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
   1406                                       next_call_insn, target_method,
   1407                                       vtable_idx, direct_code, direct_method,
   1408                                       original_type, skip_this);
   1409   } else {
   1410     call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
   1411                                     next_call_insn, target_method, vtable_idx,
   1412                                     direct_code, direct_method, original_type,
   1413                                     skip_this);
   1414   }
   1415   // Finish up any of the call sequence not interleaved in arg loading
   1416   while (call_state >= 0) {
   1417     call_state = next_call_insn(cu_, info, call_state, target_method,
   1418                                 vtable_idx, direct_code, direct_method,
   1419                                 original_type);
   1420   }
   1421   LIR* call_inst;
   1422   if (cu_->instruction_set != kX86) {
   1423     call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
   1424   } else {
   1425     if (fast_path && info->type != kInterface) {
   1426       call_inst = OpMem(kOpBlx, TargetReg(kArg0),
   1427                         mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
   1428     } else {
   1429       ThreadOffset trampoline(-1);
   1430       switch (info->type) {
   1431       case kInterface:
   1432         trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
   1433             : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
   1434         break;
   1435       case kDirect:
   1436         trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
   1437         break;
   1438       case kStatic:
   1439         trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
   1440         break;
   1441       case kSuper:
   1442         trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
   1443         break;
   1444       case kVirtual:
   1445         trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
   1446         break;
   1447       default:
   1448         LOG(FATAL) << "Unexpected invoke type";
   1449       }
   1450       call_inst = OpThreadMem(kOpBlx, trampoline);
   1451     }
   1452   }
   1453   MarkSafepointPC(call_inst);
   1454 
   1455   ClobberCalleeSave();
   1456   if (info->result.location != kLocInvalid) {
   1457     // We have a following MOVE_RESULT - do it now.
   1458     if (info->result.wide) {
   1459       RegLocation ret_loc = GetReturnWide(info->result.fp);
   1460       StoreValueWide(info->result, ret_loc);
   1461     } else {
   1462       RegLocation ret_loc = GetReturn(info->result.fp);
   1463       StoreValue(info->result, ret_loc);
   1464     }
   1465   }
   1466 }
   1467 
   1468 }  // namespace art
   1469