Home | History | Annotate | Download | only in quick
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "art_method-inl.h"
     18 #include "base/enums.h"
     19 #include "callee_save_frame.h"
     20 #include "common_throws.h"
     21 #include "dex_file-inl.h"
     22 #include "dex_instruction-inl.h"
     23 #include "entrypoints/entrypoint_utils-inl.h"
     24 #include "entrypoints/runtime_asm_entrypoints.h"
     25 #include "gc/accounting/card_table-inl.h"
     26 #include "imt_conflict_table.h"
     27 #include "imtable-inl.h"
     28 #include "interpreter/interpreter.h"
     29 #include "linear_alloc.h"
     30 #include "method_handles.h"
     31 #include "method_reference.h"
     32 #include "mirror/class-inl.h"
     33 #include "mirror/dex_cache-inl.h"
     34 #include "mirror/method.h"
     35 #include "mirror/method_handle_impl.h"
     36 #include "mirror/object-inl.h"
     37 #include "mirror/object_array-inl.h"
     38 #include "oat_quick_method_header.h"
     39 #include "quick_exception_handler.h"
     40 #include "runtime.h"
     41 #include "scoped_thread_state_change-inl.h"
     42 #include "stack.h"
     43 #include "debugger.h"
     44 #include "well_known_classes.h"
     45 
     46 namespace art {
     47 
     48 // Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
     49 class QuickArgumentVisitor {
     50   // Number of bytes for each out register in the caller method's frame.
     51   static constexpr size_t kBytesStackArgLocation = 4;
     52   // Frame size in bytes of a callee-save frame for RefsAndArgs.
     53   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
     54       GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
     55 #if defined(__arm__)
     56   // The callee save frame is pointed to by SP.
     57   // | argN       |  |
     58   // | ...        |  |
     59   // | arg4       |  |
     60   // | arg3 spill |  |  Caller's frame
     61   // | arg2 spill |  |
     62   // | arg1 spill |  |
     63   // | Method*    | ---
     64   // | LR         |
     65   // | ...        |    4x6 bytes callee saves
     66   // | R3         |
     67   // | R2         |
     68   // | R1         |
     69   // | S15        |
     70   // | :          |
     71   // | S0         |
     72   // |            |    4x2 bytes padding
     73   // | Method*    |  <- sp
     74   static constexpr bool kSplitPairAcrossRegisterAndStack = kArm32QuickCodeUseSoftFloat;
     75   static constexpr bool kAlignPairRegister = !kArm32QuickCodeUseSoftFloat;
     76   static constexpr bool kQuickSoftFloatAbi = kArm32QuickCodeUseSoftFloat;
     77   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat;
     78   static constexpr bool kQuickSkipOddFpRegisters = false;
     79   static constexpr size_t kNumQuickGprArgs = 3;
     80   static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
     81   static constexpr bool kGprFprLockstep = false;
     82   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
     83       arm::ArmCalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs);  // Offset of first FPR arg.
     84   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
     85       arm::ArmCalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs);  // Offset of first GPR arg.
     86   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
     87       arm::ArmCalleeSaveLrOffset(Runtime::kSaveRefsAndArgs);  // Offset of return address.
     88   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     89     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
     90   }
     91 #elif defined(__aarch64__)
     92   // The callee save frame is pointed to by SP.
     93   // | argN       |  |
     94   // | ...        |  |
     95   // | arg4       |  |
     96   // | arg3 spill |  |  Caller's frame
     97   // | arg2 spill |  |
     98   // | arg1 spill |  |
     99   // | Method*    | ---
    100   // | LR         |
    101   // | X29        |
    102   // |  :         |
    103   // | X20        |
    104   // | X7         |
    105   // | :          |
    106   // | X1         |
    107   // | D7         |
    108   // |  :         |
    109   // | D0         |
    110   // |            |    padding
    111   // | Method*    |  <- sp
    112   static constexpr bool kSplitPairAcrossRegisterAndStack = false;
    113   static constexpr bool kAlignPairRegister = false;
    114   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
    115   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
    116   static constexpr bool kQuickSkipOddFpRegisters = false;
    117   static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
    118   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
    119   static constexpr bool kGprFprLockstep = false;
    120   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
    121       arm64::Arm64CalleeSaveFpr1Offset(Runtime::kSaveRefsAndArgs);  // Offset of first FPR arg.
    122   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
    123       arm64::Arm64CalleeSaveGpr1Offset(Runtime::kSaveRefsAndArgs);  // Offset of first GPR arg.
    124   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
    125       arm64::Arm64CalleeSaveLrOffset(Runtime::kSaveRefsAndArgs);  // Offset of return address.
    126   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    127     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    128   }
    129 #elif defined(__mips__) && !defined(__LP64__)
    130   // The callee save frame is pointed to by SP.
    131   // | argN       |  |
    132   // | ...        |  |
    133   // | arg4       |  |
    134   // | arg3 spill |  |  Caller's frame
    135   // | arg2 spill |  |
    136   // | arg1 spill |  |
    137   // | Method*    | ---
    138   // | RA         |
    139   // | ...        |    callee saves
    140   // | T1         |    arg5
    141   // | T0         |    arg4
    142   // | A3         |    arg3
    143   // | A2         |    arg2
    144   // | A1         |    arg1
    145   // | F19        |
    146   // | F18        |    f_arg5
    147   // | F17        |
    148   // | F16        |    f_arg4
    149   // | F15        |
    150   // | F14        |    f_arg3
    151   // | F13        |
    152   // | F12        |    f_arg2
    153   // | F11        |
    154   // | F10        |    f_arg1
    155   // | F9         |
    156   // | F8         |    f_arg0
    157   // |            |    padding
    158   // | A0/Method* |  <- sp
    159   static constexpr bool kSplitPairAcrossRegisterAndStack = false;
    160   static constexpr bool kAlignPairRegister = true;
    161   static constexpr bool kQuickSoftFloatAbi = false;
    162   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
    163   static constexpr bool kQuickSkipOddFpRegisters = true;
    164   static constexpr size_t kNumQuickGprArgs = 5;   // 5 arguments passed in GPRs.
    165   static constexpr size_t kNumQuickFprArgs = 12;  // 6 arguments passed in FPRs. Floats can be
    166                                                   // passed only in even numbered registers and each
    167                                                   // double occupies two registers.
    168   static constexpr bool kGprFprLockstep = false;
    169   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 8;  // Offset of first FPR arg.
    170   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 56;  // Offset of first GPR arg.
    171   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 108;  // Offset of return address.
    172   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    173     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    174   }
    175 #elif defined(__mips__) && defined(__LP64__)
    176   // The callee save frame is pointed to by SP.
    177   // | argN       |  |
    178   // | ...        |  |
    179   // | arg4       |  |
    180   // | arg3 spill |  |  Caller's frame
    181   // | arg2 spill |  |
    182   // | arg1 spill |  |
    183   // | Method*    | ---
    184   // | RA         |
    185   // | ...        |    callee saves
    186   // | A7         |    arg7
    187   // | A6         |    arg6
    188   // | A5         |    arg5
    189   // | A4         |    arg4
    190   // | A3         |    arg3
    191   // | A2         |    arg2
    192   // | A1         |    arg1
    193   // | F19        |    f_arg7
    194   // | F18        |    f_arg6
    195   // | F17        |    f_arg5
    196   // | F16        |    f_arg4
    197   // | F15        |    f_arg3
    198   // | F14        |    f_arg2
    199   // | F13        |    f_arg1
    200   // | F12        |    f_arg0
    201   // |            |    padding
    202   // | A0/Method* |  <- sp
    203   // NOTE: for Mip64, when A0 is skipped, F12 is also skipped.
    204   static constexpr bool kSplitPairAcrossRegisterAndStack = false;
    205   static constexpr bool kAlignPairRegister = false;
    206   static constexpr bool kQuickSoftFloatAbi = false;
    207   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
    208   static constexpr bool kQuickSkipOddFpRegisters = false;
    209   static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
    210   static constexpr size_t kNumQuickFprArgs = 7;  // 7 arguments passed in FPRs.
    211   static constexpr bool kGprFprLockstep = true;
    212 
    213   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24;  // Offset of first FPR arg (F13).
    214   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80;  // Offset of first GPR arg (A1).
    215   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200;  // Offset of return address.
    216   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    217     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    218   }
    219 #elif defined(__i386__)
    220   // The callee save frame is pointed to by SP.
    221   // | argN        |  |
    222   // | ...         |  |
    223   // | arg4        |  |
    224   // | arg3 spill  |  |  Caller's frame
    225   // | arg2 spill  |  |
    226   // | arg1 spill  |  |
    227   // | Method*     | ---
    228   // | Return      |
    229   // | EBP,ESI,EDI |    callee saves
    230   // | EBX         |    arg3
    231   // | EDX         |    arg2
    232   // | ECX         |    arg1
    233   // | XMM3        |    float arg 4
    234   // | XMM2        |    float arg 3
    235   // | XMM1        |    float arg 2
    236   // | XMM0        |    float arg 1
    237   // | EAX/Method* |  <- sp
    238   static constexpr bool kSplitPairAcrossRegisterAndStack = false;
    239   static constexpr bool kAlignPairRegister = false;
    240   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
    241   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
    242   static constexpr bool kQuickSkipOddFpRegisters = false;
    243   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
    244   static constexpr size_t kNumQuickFprArgs = 4;  // 4 arguments passed in FPRs.
    245   static constexpr bool kGprFprLockstep = false;
    246   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4;  // Offset of first FPR arg.
    247   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8;  // Offset of first GPR arg.
    248   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8;  // Offset of return address.
    249   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    250     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    251   }
    252 #elif defined(__x86_64__)
    253   // The callee save frame is pointed to by SP.
    254   // | argN            |  |
    255   // | ...             |  |
    256   // | reg. arg spills |  |  Caller's frame
    257   // | Method*         | ---
    258   // | Return          |
    259   // | R15             |    callee save
    260   // | R14             |    callee save
    261   // | R13             |    callee save
    262   // | R12             |    callee save
    263   // | R9              |    arg5
    264   // | R8              |    arg4
    265   // | RSI/R6          |    arg1
    266   // | RBP/R5          |    callee save
    267   // | RBX/R3          |    callee save
    268   // | RDX/R2          |    arg2
    269   // | RCX/R1          |    arg3
    270   // | XMM7            |    float arg 8
    271   // | XMM6            |    float arg 7
    272   // | XMM5            |    float arg 6
    273   // | XMM4            |    float arg 5
    274   // | XMM3            |    float arg 4
    275   // | XMM2            |    float arg 3
    276   // | XMM1            |    float arg 2
    277   // | XMM0            |    float arg 1
    278   // | Padding         |
    279   // | RDI/Method*     |  <- sp
    280   static constexpr bool kSplitPairAcrossRegisterAndStack = false;
    281   static constexpr bool kAlignPairRegister = false;
    282   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
    283   static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
    284   static constexpr bool kQuickSkipOddFpRegisters = false;
    285   static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
    286   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
    287   static constexpr bool kGprFprLockstep = false;
    288   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
    289   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8;  // Offset of first GPR arg.
    290   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8;  // Offset of return address.
    291   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    292     switch (gpr_index) {
    293       case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
    294       case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
    295       case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
    296       case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
    297       case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
    298       default:
    299       LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
    300       return 0;
    301     }
    302   }
    303 #else
    304 #error "Unsupported architecture"
    305 #endif
    306 
    307  public:
    308   // Special handling for proxy methods. Proxy methods are instance methods so the
    309   // 'this' object is the 1st argument. They also have the same frame layout as the
    310   // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
    311   // 1st GPR.
    312   static mirror::Object* GetProxyThisObject(ArtMethod** sp)
    313       REQUIRES_SHARED(Locks::mutator_lock_) {
    314     CHECK((*sp)->IsProxyMethod());
    315     CHECK_GT(kNumQuickGprArgs, 0u);
    316     constexpr uint32_t kThisGprIndex = 0u;  // 'this' is in the 1st GPR.
    317     size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
    318         GprIndexToGprOffset(kThisGprIndex);
    319     uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
    320     return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
    321   }
    322 
    323   static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
    324     DCHECK((*sp)->IsCalleeSaveMethod());
    325     return GetCalleeSaveMethodCaller(sp, Runtime::kSaveRefsAndArgs);
    326   }
    327 
    328   static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
    329     DCHECK((*sp)->IsCalleeSaveMethod());
    330     uint8_t* previous_sp =
    331         reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
    332     return *reinterpret_cast<ArtMethod**>(previous_sp);
    333   }
    334 
    335   static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
    336     DCHECK((*sp)->IsCalleeSaveMethod());
    337     const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
    338     ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
    339         reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
    340     uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
    341     const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc);
    342     uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
    343 
    344     if (current_code->IsOptimized()) {
    345       CodeInfo code_info = current_code->GetOptimizedCodeInfo();
    346       CodeInfoEncoding encoding = code_info.ExtractEncoding();
    347       StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
    348       DCHECK(stack_map.IsValid());
    349       if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
    350         InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
    351         return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding,
    352                                            inline_info.GetDepth(encoding.inline_info.encoding)-1);
    353       } else {
    354         return stack_map.GetDexPc(encoding.stack_map.encoding);
    355       }
    356     } else {
    357       return current_code->ToDexPc(*caller_sp, outer_pc);
    358     }
    359   }
    360 
    361   static bool GetInvokeType(ArtMethod** sp, InvokeType* invoke_type, uint32_t* dex_method_index)
    362       REQUIRES_SHARED(Locks::mutator_lock_) {
    363     DCHECK((*sp)->IsCalleeSaveMethod());
    364     const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
    365     ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
    366         reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
    367     uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
    368     const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc);
    369     if (!current_code->IsOptimized()) {
    370       return false;
    371     }
    372     uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
    373     CodeInfo code_info = current_code->GetOptimizedCodeInfo();
    374     CodeInfoEncoding encoding = code_info.ExtractEncoding();
    375     MethodInfo method_info = current_code->GetOptimizedMethodInfo();
    376     InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding));
    377     if (invoke.IsValid()) {
    378       *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding));
    379       *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info);
    380       return true;
    381     }
    382     return false;
    383   }
    384 
    385   // For the given quick ref and args quick frame, return the caller's PC.
    386   static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
    387     DCHECK((*sp)->IsCalleeSaveMethod());
    388     uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
    389     return *reinterpret_cast<uintptr_t*>(lr);
    390   }
    391 
    392   QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
    393                        uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) :
    394           is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
    395           gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
    396           fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
    397           stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
    398               + sizeof(ArtMethod*)),  // Skip ArtMethod*.
    399           gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
    400           cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
    401     static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
    402                   "Number of Quick FPR arguments unexpected");
    403     static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
    404                   "Double alignment unexpected");
    405     // For register alignment, we want to assume that counters(fpr_double_index_) are even if the
    406     // next register is even.
    407     static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
    408                   "Number of Quick FPR arguments not even");
    409     DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
    410   }
    411 
    412   virtual ~QuickArgumentVisitor() {}
    413 
    414   virtual void Visit() = 0;
    415 
    416   Primitive::Type GetParamPrimitiveType() const {
    417     return cur_type_;
    418   }
    419 
    420   uint8_t* GetParamAddress() const {
    421     if (!kQuickSoftFloatAbi) {
    422       Primitive::Type type = GetParamPrimitiveType();
    423       if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
    424         if (type == Primitive::kPrimDouble && kQuickDoubleRegAlignedFloatBackFilled) {
    425           if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
    426             return fpr_args_ + (fpr_double_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
    427           }
    428         } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
    429           return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
    430         }
    431         return stack_args_ + (stack_index_ * kBytesStackArgLocation);
    432       }
    433     }
    434     if (gpr_index_ < kNumQuickGprArgs) {
    435       return gpr_args_ + GprIndexToGprOffset(gpr_index_);
    436     }
    437     return stack_args_ + (stack_index_ * kBytesStackArgLocation);
    438   }
    439 
    440   bool IsSplitLongOrDouble() const {
    441     if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) ||
    442         (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
    443       return is_split_long_or_double_;
    444     } else {
    445       return false;  // An optimization for when GPR and FPRs are 64bit.
    446     }
    447   }
    448 
    449   bool IsParamAReference() const {
    450     return GetParamPrimitiveType() == Primitive::kPrimNot;
    451   }
    452 
    453   bool IsParamALongOrDouble() const {
    454     Primitive::Type type = GetParamPrimitiveType();
    455     return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
    456   }
    457 
    458   uint64_t ReadSplitLongParam() const {
    459     // The splitted long is always available through the stack.
    460     return *reinterpret_cast<uint64_t*>(stack_args_
    461         + stack_index_ * kBytesStackArgLocation);
    462   }
    463 
    464   void IncGprIndex() {
    465     gpr_index_++;
    466     if (kGprFprLockstep) {
    467       fpr_index_++;
    468     }
    469   }
    470 
    471   void IncFprIndex() {
    472     fpr_index_++;
    473     if (kGprFprLockstep) {
    474       gpr_index_++;
    475     }
    476   }
    477 
    478   void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) {
    479     // (a) 'stack_args_' should point to the first method's argument
    480     // (b) whatever the argument type it is, the 'stack_index_' should
    481     //     be moved forward along with every visiting.
    482     gpr_index_ = 0;
    483     fpr_index_ = 0;
    484     if (kQuickDoubleRegAlignedFloatBackFilled) {
    485       fpr_double_index_ = 0;
    486     }
    487     stack_index_ = 0;
    488     if (!is_static_) {  // Handle this.
    489       cur_type_ = Primitive::kPrimNot;
    490       is_split_long_or_double_ = false;
    491       Visit();
    492       stack_index_++;
    493       if (kNumQuickGprArgs > 0) {
    494         IncGprIndex();
    495       }
    496     }
    497     for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
    498       cur_type_ = Primitive::GetType(shorty_[shorty_index]);
    499       switch (cur_type_) {
    500         case Primitive::kPrimNot:
    501         case Primitive::kPrimBoolean:
    502         case Primitive::kPrimByte:
    503         case Primitive::kPrimChar:
    504         case Primitive::kPrimShort:
    505         case Primitive::kPrimInt:
    506           is_split_long_or_double_ = false;
    507           Visit();
    508           stack_index_++;
    509           if (gpr_index_ < kNumQuickGprArgs) {
    510             IncGprIndex();
    511           }
    512           break;
    513         case Primitive::kPrimFloat:
    514           is_split_long_or_double_ = false;
    515           Visit();
    516           stack_index_++;
    517           if (kQuickSoftFloatAbi) {
    518             if (gpr_index_ < kNumQuickGprArgs) {
    519               IncGprIndex();
    520             }
    521           } else {
    522             if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
    523               IncFprIndex();
    524               if (kQuickDoubleRegAlignedFloatBackFilled) {
    525                 // Double should not overlap with float.
    526                 // For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
    527                 fpr_double_index_ = std::max(fpr_double_index_, RoundUp(fpr_index_, 2));
    528                 // Float should not overlap with double.
    529                 if (fpr_index_ % 2 == 0) {
    530                   fpr_index_ = std::max(fpr_double_index_, fpr_index_);
    531                 }
    532               } else if (kQuickSkipOddFpRegisters) {
    533                 IncFprIndex();
    534               }
    535             }
    536           }
    537           break;
    538         case Primitive::kPrimDouble:
    539         case Primitive::kPrimLong:
    540           if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
    541             if (cur_type_ == Primitive::kPrimLong &&
    542 #if defined(__mips__) && !defined(__LP64__)
    543                 (gpr_index_ == 0 || gpr_index_ == 2) &&
    544 #else
    545                 gpr_index_ == 0 &&
    546 #endif
    547                 kAlignPairRegister) {
    548               // Currently, this is only for ARM and MIPS, where we align long parameters with
    549               // even-numbered registers by skipping R1 (on ARM) or A1(A3) (on MIPS) and using
    550               // R2 (on ARM) or A2(T0) (on MIPS) instead.
    551               IncGprIndex();
    552             }
    553             is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
    554                 ((gpr_index_ + 1) == kNumQuickGprArgs);
    555             if (!kSplitPairAcrossRegisterAndStack && is_split_long_or_double_) {
    556               // We don't want to split this. Pass over this register.
    557               gpr_index_++;
    558               is_split_long_or_double_ = false;
    559             }
    560             Visit();
    561             if (kBytesStackArgLocation == 4) {
    562               stack_index_+= 2;
    563             } else {
    564               CHECK_EQ(kBytesStackArgLocation, 8U);
    565               stack_index_++;
    566             }
    567             if (gpr_index_ < kNumQuickGprArgs) {
    568               IncGprIndex();
    569               if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
    570                 if (gpr_index_ < kNumQuickGprArgs) {
    571                   IncGprIndex();
    572                 }
    573               }
    574             }
    575           } else {
    576             is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
    577                 ((fpr_index_ + 1) == kNumQuickFprArgs) && !kQuickDoubleRegAlignedFloatBackFilled;
    578             Visit();
    579             if (kBytesStackArgLocation == 4) {
    580               stack_index_+= 2;
    581             } else {
    582               CHECK_EQ(kBytesStackArgLocation, 8U);
    583               stack_index_++;
    584             }
    585             if (kQuickDoubleRegAlignedFloatBackFilled) {
    586               if (fpr_double_index_ + 2 < kNumQuickFprArgs + 1) {
    587                 fpr_double_index_ += 2;
    588                 // Float should not overlap with double.
    589                 if (fpr_index_ % 2 == 0) {
    590                   fpr_index_ = std::max(fpr_double_index_, fpr_index_);
    591                 }
    592               }
    593             } else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
    594               IncFprIndex();
    595               if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
    596                 if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
    597                   IncFprIndex();
    598                 }
    599               }
    600             }
    601           }
    602           break;
    603         default:
    604           LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
    605       }
    606     }
    607   }
    608 
    609  protected:
    610   const bool is_static_;
    611   const char* const shorty_;
    612   const uint32_t shorty_len_;
    613 
    614  private:
    615   uint8_t* const gpr_args_;  // Address of GPR arguments in callee save frame.
    616   uint8_t* const fpr_args_;  // Address of FPR arguments in callee save frame.
    617   uint8_t* const stack_args_;  // Address of stack arguments in caller's frame.
    618   uint32_t gpr_index_;  // Index into spilled GPRs.
    619   // Index into spilled FPRs.
    620   // In case kQuickDoubleRegAlignedFloatBackFilled, it may index a hole while fpr_double_index_
    621   // holds a higher register number.
    622   uint32_t fpr_index_;
    623   // Index into spilled FPRs for aligned double.
    624   // Only used when kQuickDoubleRegAlignedFloatBackFilled. Next available double register indexed in
    625   // terms of singles, may be behind fpr_index.
    626   uint32_t fpr_double_index_;
    627   uint32_t stack_index_;  // Index into arguments on the stack.
    628   // The current type of argument during VisitArguments.
    629   Primitive::Type cur_type_;
    630   // Does a 64bit parameter straddle the register and stack arguments?
    631   bool is_split_long_or_double_;
    632 };
    633 
    634 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
    635 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
    636 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
    637     REQUIRES_SHARED(Locks::mutator_lock_) {
    638   return QuickArgumentVisitor::GetProxyThisObject(sp);
    639 }
    640 
    641 // Visits arguments on the stack placing them into the shadow frame.
    642 class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
    643  public:
    644   BuildQuickShadowFrameVisitor(ArtMethod** sp, bool is_static, const char* shorty,
    645                                uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
    646       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
    647 
    648   void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
    649 
    650  private:
    651   ShadowFrame* const sf_;
    652   uint32_t cur_reg_;
    653 
    654   DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
    655 };
    656 
    657 void BuildQuickShadowFrameVisitor::Visit() {
    658   Primitive::Type type = GetParamPrimitiveType();
    659   switch (type) {
    660     case Primitive::kPrimLong:  // Fall-through.
    661     case Primitive::kPrimDouble:
    662       if (IsSplitLongOrDouble()) {
    663         sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
    664       } else {
    665         sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
    666       }
    667       ++cur_reg_;
    668       break;
    669     case Primitive::kPrimNot: {
    670         StackReference<mirror::Object>* stack_ref =
    671             reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
    672         sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
    673       }
    674       break;
    675     case Primitive::kPrimBoolean:  // Fall-through.
    676     case Primitive::kPrimByte:     // Fall-through.
    677     case Primitive::kPrimChar:     // Fall-through.
    678     case Primitive::kPrimShort:    // Fall-through.
    679     case Primitive::kPrimInt:      // Fall-through.
    680     case Primitive::kPrimFloat:
    681       sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
    682       break;
    683     case Primitive::kPrimVoid:
    684       LOG(FATAL) << "UNREACHABLE";
    685       UNREACHABLE();
    686   }
    687   ++cur_reg_;
    688 }
    689 
    690 extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
    691     REQUIRES_SHARED(Locks::mutator_lock_) {
    692   // Ensure we don't get thread suspension until the object arguments are safely in the shadow
    693   // frame.
    694   ScopedQuickEntrypointChecks sqec(self);
    695 
    696   if (UNLIKELY(!method->IsInvokable())) {
    697     method->ThrowInvocationTimeError();
    698     return 0;
    699   }
    700 
    701   JValue tmp_value;
    702   ShadowFrame* deopt_frame = self->PopStackedShadowFrame(
    703       StackedShadowFrameType::kDeoptimizationShadowFrame, false);
    704   ManagedStack fragment;
    705 
    706   DCHECK(!method->IsNative()) << method->PrettyMethod();
    707   uint32_t shorty_len = 0;
    708   ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
    709   const DexFile::CodeItem* code_item = non_proxy_method->GetCodeItem();
    710   DCHECK(code_item != nullptr) << method->PrettyMethod();
    711   const char* shorty = non_proxy_method->GetShorty(&shorty_len);
    712 
    713   JValue result;
    714 
    715   if (deopt_frame != nullptr) {
    716     // Coming from partial-fragment deopt.
    717 
    718     if (kIsDebugBuild) {
    719       // Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom
    720       // of the call-stack) corresponds to the called method.
    721       ShadowFrame* linked = deopt_frame;
    722       while (linked->GetLink() != nullptr) {
    723         linked = linked->GetLink();
    724       }
    725       CHECK_EQ(method, linked->GetMethod()) << method->PrettyMethod() << " "
    726           << ArtMethod::PrettyMethod(linked->GetMethod());
    727     }
    728 
    729     if (VLOG_IS_ON(deopt)) {
    730       // Print out the stack to verify that it was a partial-fragment deopt.
    731       LOG(INFO) << "Continue-ing from deopt. Stack is:";
    732       QuickExceptionHandler::DumpFramesWithType(self, true);
    733     }
    734 
    735     ObjPtr<mirror::Throwable> pending_exception;
    736     bool from_code = false;
    737     self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code);
    738 
    739     // Push a transition back into managed code onto the linked list in thread.
    740     self->PushManagedStackFragment(&fragment);
    741 
    742     // Ensure that the stack is still in order.
    743     if (kIsDebugBuild) {
    744       class DummyStackVisitor : public StackVisitor {
    745        public:
    746         explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
    747             : StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
    748 
    749         bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
    750           // Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
    751           // logic. Just always say we want to continue.
    752           return true;
    753         }
    754       };
    755       DummyStackVisitor dsv(self);
    756       dsv.WalkStack();
    757     }
    758 
    759     // Restore the exception that was pending before deoptimization then interpret the
    760     // deoptimized frames.
    761     if (pending_exception != nullptr) {
    762       self->SetException(pending_exception);
    763     }
    764     interpreter::EnterInterpreterFromDeoptimize(self, deopt_frame, from_code, &result);
    765   } else {
    766     const char* old_cause = self->StartAssertNoThreadSuspension(
    767         "Building interpreter shadow frame");
    768     uint16_t num_regs = code_item->registers_size_;
    769     // No last shadow coming from quick.
    770     ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
    771         CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
    772     ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
    773     size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
    774     BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
    775                                                       shadow_frame, first_arg_reg);
    776     shadow_frame_builder.VisitArguments();
    777     const bool needs_initialization =
    778         method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
    779     // Push a transition back into managed code onto the linked list in thread.
    780     self->PushManagedStackFragment(&fragment);
    781     self->PushShadowFrame(shadow_frame);
    782     self->EndAssertNoThreadSuspension(old_cause);
    783 
    784     if (needs_initialization) {
    785       // Ensure static method's class is initialized.
    786       StackHandleScope<1> hs(self);
    787       Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass()));
    788       if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
    789         DCHECK(Thread::Current()->IsExceptionPending())
    790             << shadow_frame->GetMethod()->PrettyMethod();
    791         self->PopManagedStackFragment(fragment);
    792         return 0;
    793       }
    794     }
    795 
    796     result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame);
    797   }
    798 
    799   // Pop transition.
    800   self->PopManagedStackFragment(fragment);
    801 
    802   // Request a stack deoptimization if needed
    803   ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
    804   uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
    805   // If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
    806   // should be done and it knows the real return pc.
    807   if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
    808                Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
    809     if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
    810       LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
    811                    << caller->PrettyMethod();
    812     } else {
    813       // Push the context of the deoptimization stack so we can restore the return value and the
    814       // exception before executing the deoptimized frames.
    815       self->PushDeoptimizationContext(
    816           result, shorty[0] == 'L', /* from_code */ false, self->GetException());
    817 
    818       // Set special exception to cause deoptimization.
    819       self->SetException(Thread::GetDeoptimizationException());
    820     }
    821   }
    822 
    823   // No need to restore the args since the method has already been run by the interpreter.
    824   return result.GetJ();
    825 }
    826 
    827 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
    828 // to jobjects.
    829 class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
    830  public:
    831   BuildQuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, uint32_t shorty_len,
    832                             ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
    833       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
    834 
    835   void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
    836 
    837   void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
    838 
    839  private:
    840   ScopedObjectAccessUnchecked* const soa_;
    841   std::vector<jvalue>* const args_;
    842   // References which we must update when exiting in case the GC moved the objects.
    843   std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
    844 
    845   DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
    846 };
    847 
    848 void BuildQuickArgumentVisitor::Visit() {
    849   jvalue val;
    850   Primitive::Type type = GetParamPrimitiveType();
    851   switch (type) {
    852     case Primitive::kPrimNot: {
    853       StackReference<mirror::Object>* stack_ref =
    854           reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
    855       val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
    856       references_.push_back(std::make_pair(val.l, stack_ref));
    857       break;
    858     }
    859     case Primitive::kPrimLong:  // Fall-through.
    860     case Primitive::kPrimDouble:
    861       if (IsSplitLongOrDouble()) {
    862         val.j = ReadSplitLongParam();
    863       } else {
    864         val.j = *reinterpret_cast<jlong*>(GetParamAddress());
    865       }
    866       break;
    867     case Primitive::kPrimBoolean:  // Fall-through.
    868     case Primitive::kPrimByte:     // Fall-through.
    869     case Primitive::kPrimChar:     // Fall-through.
    870     case Primitive::kPrimShort:    // Fall-through.
    871     case Primitive::kPrimInt:      // Fall-through.
    872     case Primitive::kPrimFloat:
    873       val.i = *reinterpret_cast<jint*>(GetParamAddress());
    874       break;
    875     case Primitive::kPrimVoid:
    876       LOG(FATAL) << "UNREACHABLE";
    877       UNREACHABLE();
    878   }
    879   args_->push_back(val);
    880 }
    881 
    882 void BuildQuickArgumentVisitor::FixupReferences() {
    883   // Fixup any references which may have changed.
    884   for (const auto& pair : references_) {
    885     pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
    886     soa_->Env()->DeleteLocalRef(pair.first);
    887   }
    888 }
    889 
    890 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
    891 // which is responsible for recording callee save registers. We explicitly place into jobjects the
    892 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
    893 // field within the proxy object, which will box the primitive arguments and deal with error cases.
    894 extern "C" uint64_t artQuickProxyInvokeHandler(
    895     ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
    896     REQUIRES_SHARED(Locks::mutator_lock_) {
    897   DCHECK(proxy_method->IsProxyMethod()) << proxy_method->PrettyMethod();
    898   DCHECK(receiver->GetClass()->IsProxyClass()) << proxy_method->PrettyMethod();
    899   // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
    900   const char* old_cause =
    901       self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
    902   // Register the top of the managed stack, making stack crawlable.
    903   DCHECK_EQ((*sp), proxy_method) << proxy_method->PrettyMethod();
    904   self->VerifyStack();
    905   // Start new JNI local reference state.
    906   JNIEnvExt* env = self->GetJniEnv();
    907   ScopedObjectAccessUnchecked soa(env);
    908   ScopedJniEnvLocalRefState env_state(env);
    909   // Create local ref. copies of proxy method and the receiver.
    910   jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
    911 
    912   // Placing arguments into args vector and remove the receiver.
    913   ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
    914   CHECK(!non_proxy_method->IsStatic()) << proxy_method->PrettyMethod() << " "
    915                                        << non_proxy_method->PrettyMethod();
    916   std::vector<jvalue> args;
    917   uint32_t shorty_len = 0;
    918   const char* shorty = non_proxy_method->GetShorty(&shorty_len);
    919   BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args);
    920 
    921   local_ref_visitor.VisitArguments();
    922   DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod();
    923   args.erase(args.begin());
    924 
    925   // Convert proxy method into expected interface method.
    926   ArtMethod* interface_method = proxy_method->FindOverriddenMethod(kRuntimePointerSize);
    927   DCHECK(interface_method != nullptr) << proxy_method->PrettyMethod();
    928   DCHECK(!interface_method->IsProxyMethod()) << interface_method->PrettyMethod();
    929   self->EndAssertNoThreadSuspension(old_cause);
    930   DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
    931   DCHECK(!Runtime::Current()->IsActiveTransaction());
    932   jobject interface_method_jobj = soa.AddLocalReference<jobject>(
    933       mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(),
    934                                                                       interface_method));
    935 
    936   // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
    937   // that performs allocations.
    938   JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
    939   // Restore references which might have moved.
    940   local_ref_visitor.FixupReferences();
    941   return result.GetJ();
    942 }
    943 
    944 // Read object references held in arguments from quick frames and place in a JNI local references,
    945 // so they don't get garbage collected.
    946 class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
    947  public:
    948   RememberForGcArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
    949                                uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
    950       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
    951 
    952   void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
    953 
    954   void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
    955 
    956  private:
    957   ScopedObjectAccessUnchecked* const soa_;
    958   // References which we must update when exiting in case the GC moved the objects.
    959   std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
    960 
    961   DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
    962 };
    963 
    964 void RememberForGcArgumentVisitor::Visit() {
    965   if (IsParamAReference()) {
    966     StackReference<mirror::Object>* stack_ref =
    967         reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
    968     jobject reference =
    969         soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
    970     references_.push_back(std::make_pair(reference, stack_ref));
    971   }
    972 }
    973 
    974 void RememberForGcArgumentVisitor::FixupReferences() {
    975   // Fixup any references which may have changed.
    976   for (const auto& pair : references_) {
    977     pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
    978     soa_->Env()->DeleteLocalRef(pair.first);
    979   }
    980 }
    981 
    982 // Lazily resolve a method for quick. Called by stub code.
    983 extern "C" const void* artQuickResolutionTrampoline(
    984     ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
    985     REQUIRES_SHARED(Locks::mutator_lock_) {
    986   // The resolution trampoline stashes the resolved method into the callee-save frame to transport
    987   // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
    988   // does not have the same stack layout as the callee-save method).
    989   ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
    990   // Start new JNI local reference state
    991   JNIEnvExt* env = self->GetJniEnv();
    992   ScopedObjectAccessUnchecked soa(env);
    993   ScopedJniEnvLocalRefState env_state(env);
    994   const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
    995 
    996   // Compute details about the called method (avoid GCs)
    997   ClassLinker* linker = Runtime::Current()->GetClassLinker();
    998   InvokeType invoke_type;
    999   MethodReference called_method(nullptr, 0);
   1000   const bool called_method_known_on_entry = !called->IsRuntimeMethod();
   1001   ArtMethod* caller = nullptr;
   1002   if (!called_method_known_on_entry) {
   1003     caller = QuickArgumentVisitor::GetCallingMethod(sp);
   1004     called_method.dex_file = caller->GetDexFile();
   1005 
   1006     InvokeType stack_map_invoke_type;
   1007     uint32_t stack_map_dex_method_idx;
   1008     const bool found_stack_map = QuickArgumentVisitor::GetInvokeType(sp,
   1009                                                                      &stack_map_invoke_type,
   1010                                                                      &stack_map_dex_method_idx);
   1011     // For debug builds, we make sure both of the paths are consistent by also looking at the dex
   1012     // code.
   1013     if (!found_stack_map || kIsDebugBuild) {
   1014       uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
   1015       const DexFile::CodeItem* code;
   1016       code = caller->GetCodeItem();
   1017       CHECK_LT(dex_pc, code->insns_size_in_code_units_);
   1018       const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
   1019       Instruction::Code instr_code = instr->Opcode();
   1020       bool is_range;
   1021       switch (instr_code) {
   1022         case Instruction::INVOKE_DIRECT:
   1023           invoke_type = kDirect;
   1024           is_range = false;
   1025           break;
   1026         case Instruction::INVOKE_DIRECT_RANGE:
   1027           invoke_type = kDirect;
   1028           is_range = true;
   1029           break;
   1030         case Instruction::INVOKE_STATIC:
   1031           invoke_type = kStatic;
   1032           is_range = false;
   1033           break;
   1034         case Instruction::INVOKE_STATIC_RANGE:
   1035           invoke_type = kStatic;
   1036           is_range = true;
   1037           break;
   1038         case Instruction::INVOKE_SUPER:
   1039           invoke_type = kSuper;
   1040           is_range = false;
   1041           break;
   1042         case Instruction::INVOKE_SUPER_RANGE:
   1043           invoke_type = kSuper;
   1044           is_range = true;
   1045           break;
   1046         case Instruction::INVOKE_VIRTUAL:
   1047           invoke_type = kVirtual;
   1048           is_range = false;
   1049           break;
   1050         case Instruction::INVOKE_VIRTUAL_RANGE:
   1051           invoke_type = kVirtual;
   1052           is_range = true;
   1053           break;
   1054         case Instruction::INVOKE_INTERFACE:
   1055           invoke_type = kInterface;
   1056           is_range = false;
   1057           break;
   1058         case Instruction::INVOKE_INTERFACE_RANGE:
   1059           invoke_type = kInterface;
   1060           is_range = true;
   1061           break;
   1062         default:
   1063           LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(nullptr);
   1064           UNREACHABLE();
   1065       }
   1066       called_method.dex_method_index = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
   1067       // Check that the invoke matches what we expected, note that this path only happens for debug
   1068       // builds.
   1069       if (found_stack_map) {
   1070         DCHECK_EQ(stack_map_invoke_type, invoke_type);
   1071         if (invoke_type != kSuper) {
   1072           // Super may be sharpened.
   1073           DCHECK_EQ(stack_map_dex_method_idx, called_method.dex_method_index)
   1074               << called_method.dex_file->PrettyMethod(stack_map_dex_method_idx) << " "
   1075               << called_method.dex_file->PrettyMethod(called_method.dex_method_index);
   1076         }
   1077       } else {
   1078         VLOG(dex) << "Accessed dex file for invoke " << invoke_type << " "
   1079                   << called_method.dex_method_index;
   1080       }
   1081     } else {
   1082       invoke_type = stack_map_invoke_type;
   1083       called_method.dex_method_index = stack_map_dex_method_idx;
   1084     }
   1085   } else {
   1086     invoke_type = kStatic;
   1087     called_method.dex_file = called->GetDexFile();
   1088     called_method.dex_method_index = called->GetDexMethodIndex();
   1089   }
   1090   uint32_t shorty_len;
   1091   const char* shorty =
   1092       called_method.dex_file->GetMethodShorty(
   1093           called_method.dex_file->GetMethodId(called_method.dex_method_index), &shorty_len);
   1094   RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
   1095   visitor.VisitArguments();
   1096   self->EndAssertNoThreadSuspension(old_cause);
   1097   const bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
   1098   // Resolve method filling in dex cache.
   1099   if (!called_method_known_on_entry) {
   1100     StackHandleScope<1> hs(self);
   1101     mirror::Object* dummy = nullptr;
   1102     HandleWrapper<mirror::Object> h_receiver(
   1103         hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
   1104     DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
   1105     called = linker->ResolveMethod<ClassLinker::kForceICCECheck>(
   1106         self, called_method.dex_method_index, caller, invoke_type);
   1107   }
   1108   const void* code = nullptr;
   1109   if (LIKELY(!self->IsExceptionPending())) {
   1110     // Incompatible class change should have been handled in resolve method.
   1111     CHECK(!called->CheckIncompatibleClassChange(invoke_type))
   1112         << called->PrettyMethod() << " " << invoke_type;
   1113     if (virtual_or_interface || invoke_type == kSuper) {
   1114       // Refine called method based on receiver for kVirtual/kInterface, and
   1115       // caller for kSuper.
   1116       ArtMethod* orig_called = called;
   1117       if (invoke_type == kVirtual) {
   1118         CHECK(receiver != nullptr) << invoke_type;
   1119         called = receiver->GetClass()->FindVirtualMethodForVirtual(called, kRuntimePointerSize);
   1120       } else if (invoke_type == kInterface) {
   1121         CHECK(receiver != nullptr) << invoke_type;
   1122         called = receiver->GetClass()->FindVirtualMethodForInterface(called, kRuntimePointerSize);
   1123       } else {
   1124         DCHECK_EQ(invoke_type, kSuper);
   1125         CHECK(caller != nullptr) << invoke_type;
   1126         StackHandleScope<2> hs(self);
   1127         Handle<mirror::DexCache> dex_cache(
   1128             hs.NewHandle(caller->GetDeclaringClass()->GetDexCache()));
   1129         Handle<mirror::ClassLoader> class_loader(
   1130             hs.NewHandle(caller->GetDeclaringClass()->GetClassLoader()));
   1131         // TODO Maybe put this into a mirror::Class function.
   1132         mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod(
   1133             called_method.dex_method_index, dex_cache, class_loader);
   1134         if (ref_class->IsInterface()) {
   1135           called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize);
   1136         } else {
   1137           called = caller->GetDeclaringClass()->GetSuperClass()->GetVTableEntry(
   1138               called->GetMethodIndex(), kRuntimePointerSize);
   1139         }
   1140       }
   1141 
   1142       CHECK(called != nullptr) << orig_called->PrettyMethod() << " "
   1143                                << mirror::Object::PrettyTypeOf(receiver) << " "
   1144                                << invoke_type << " " << orig_called->GetVtableIndex();
   1145 
   1146       // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
   1147       // of the sharpened method avoiding dirtying the dex cache if possible.
   1148       // Note, called_method.dex_method_index references the dex method before the
   1149       // FindVirtualMethodFor... This is ok for FindDexMethodIndexInOtherDexFile that only cares
   1150       // about the name and signature.
   1151       uint32_t update_dex_cache_method_index = called->GetDexMethodIndex();
   1152       if (!called->HasSameDexCacheResolvedMethods(caller, kRuntimePointerSize)) {
   1153         // Calling from one dex file to another, need to compute the method index appropriate to
   1154         // the caller's dex file. Since we get here only if the original called was a runtime
   1155         // method, we've got the correct dex_file and a dex_method_idx from above.
   1156         DCHECK(!called_method_known_on_entry);
   1157         DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
   1158         const DexFile* caller_dex_file = called_method.dex_file;
   1159         uint32_t caller_method_name_and_sig_index = called_method.dex_method_index;
   1160         update_dex_cache_method_index =
   1161             called->FindDexMethodIndexInOtherDexFile(*caller_dex_file,
   1162                                                      caller_method_name_and_sig_index);
   1163       }
   1164       if ((update_dex_cache_method_index != DexFile::kDexNoIndex) &&
   1165           (caller->GetDexCacheResolvedMethod(
   1166               update_dex_cache_method_index, kRuntimePointerSize) != called)) {
   1167         caller->SetDexCacheResolvedMethod(update_dex_cache_method_index,
   1168                                           called,
   1169                                           kRuntimePointerSize);
   1170       }
   1171     } else if (invoke_type == kStatic) {
   1172       const auto called_dex_method_idx = called->GetDexMethodIndex();
   1173       // For static invokes, we may dispatch to the static method in the superclass but resolve
   1174       // using the subclass. To prevent getting slow paths on each invoke, we force set the
   1175       // resolved method for the super class dex method index if we are in the same dex file.
   1176       // b/19175856
   1177       if (called->GetDexFile() == called_method.dex_file &&
   1178           called_method.dex_method_index != called_dex_method_idx) {
   1179         called->GetDexCache()->SetResolvedMethod(called_dex_method_idx,
   1180                                                  called,
   1181                                                  kRuntimePointerSize);
   1182       }
   1183     }
   1184 
   1185     // Ensure that the called method's class is initialized.
   1186     StackHandleScope<1> hs(soa.Self());
   1187     Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
   1188     linker->EnsureInitialized(soa.Self(), called_class, true, true);
   1189     if (LIKELY(called_class->IsInitialized())) {
   1190       if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
   1191         // If we are single-stepping or the called method is deoptimized (by a
   1192         // breakpoint, for example), then we have to execute the called method
   1193         // with the interpreter.
   1194         code = GetQuickToInterpreterBridge();
   1195       } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) {
   1196         // If the caller is deoptimized (by a breakpoint, for example), we have to
   1197         // continue its execution with interpreter when returning from the called
   1198         // method. Because we do not want to execute the called method with the
   1199         // interpreter, we wrap its execution into the instrumentation stubs.
   1200         // When the called method returns, it will execute the instrumentation
   1201         // exit hook that will determine the need of the interpreter with a call
   1202         // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if
   1203         // it is needed.
   1204         code = GetQuickInstrumentationEntryPoint();
   1205       } else {
   1206         code = called->GetEntryPointFromQuickCompiledCode();
   1207       }
   1208     } else if (called_class->IsInitializing()) {
   1209       if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
   1210         // If we are single-stepping or the called method is deoptimized (by a
   1211         // breakpoint, for example), then we have to execute the called method
   1212         // with the interpreter.
   1213         code = GetQuickToInterpreterBridge();
   1214       } else if (invoke_type == kStatic) {
   1215         // Class is still initializing, go to oat and grab code (trampoline must be left in place
   1216         // until class is initialized to stop races between threads).
   1217         code = linker->GetQuickOatCodeFor(called);
   1218       } else {
   1219         // No trampoline for non-static methods.
   1220         code = called->GetEntryPointFromQuickCompiledCode();
   1221       }
   1222     } else {
   1223       DCHECK(called_class->IsErroneous());
   1224     }
   1225   }
   1226   CHECK_EQ(code == nullptr, self->IsExceptionPending());
   1227   // Fixup any locally saved objects may have moved during a GC.
   1228   visitor.FixupReferences();
   1229   // Place called method in callee-save frame to be placed as first argument to quick method.
   1230   *sp = called;
   1231 
   1232   return code;
   1233 }
   1234 
   1235 /*
   1236  * This class uses a couple of observations to unite the different calling conventions through
   1237  * a few constants.
   1238  *
   1239  * 1) Number of registers used for passing is normally even, so counting down has no penalty for
   1240  *    possible alignment.
   1241  * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
   1242  *    types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
   1243  *    when we have to split things
   1244  * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
   1245  *    and we can use Int handling directly.
   1246  * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
   1247  *    necessary when widening. Also, widening of Ints will take place implicitly, and the
   1248  *    extension should be compatible with Aarch64, which mandates copying the available bits
   1249  *    into LSB and leaving the rest unspecified.
   1250  * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
   1251  *    the stack.
   1252  * 6) There is only little endian.
   1253  *
   1254  *
   1255  * Actual work is supposed to be done in a delegate of the template type. The interface is as
   1256  * follows:
   1257  *
   1258  * void PushGpr(uintptr_t):   Add a value for the next GPR
   1259  *
   1260  * void PushFpr4(float):      Add a value for the next FPR of size 32b. Is only called if we need
   1261  *                            padding, that is, think the architecture is 32b and aligns 64b.
   1262  *
   1263  * void PushFpr8(uint64_t):   Push a double. We _will_ call this on 32b, it's the callee's job to
   1264  *                            split this if necessary. The current state will have aligned, if
   1265  *                            necessary.
   1266  *
   1267  * void PushStack(uintptr_t): Push a value to the stack.
   1268  *
   1269  * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
   1270  *                                          as this might be important for null initialization.
   1271  *                                          Must return the jobject, that is, the reference to the
   1272  *                                          entry in the HandleScope (nullptr if necessary).
   1273  *
   1274  */
   1275 template<class T> class BuildNativeCallFrameStateMachine {
   1276  public:
   1277 #if defined(__arm__)
   1278   // TODO: These are all dummy values!
   1279   static constexpr bool kNativeSoftFloatAbi = true;
   1280   static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs, r0-r3
   1281   static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
   1282 
   1283   static constexpr size_t kRegistersNeededForLong = 2;
   1284   static constexpr size_t kRegistersNeededForDouble = 2;
   1285   static constexpr bool kMultiRegistersAligned = true;
   1286   static constexpr bool kMultiFPRegistersWidened = false;
   1287   static constexpr bool kMultiGPRegistersWidened = false;
   1288   static constexpr bool kAlignLongOnStack = true;
   1289   static constexpr bool kAlignDoubleOnStack = true;
   1290 #elif defined(__aarch64__)
   1291   static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
   1292   static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
   1293   static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
   1294 
   1295   static constexpr size_t kRegistersNeededForLong = 1;
   1296   static constexpr size_t kRegistersNeededForDouble = 1;
   1297   static constexpr bool kMultiRegistersAligned = false;
   1298   static constexpr bool kMultiFPRegistersWidened = false;
   1299   static constexpr bool kMultiGPRegistersWidened = false;
   1300   static constexpr bool kAlignLongOnStack = false;
   1301   static constexpr bool kAlignDoubleOnStack = false;
   1302 #elif defined(__mips__) && !defined(__LP64__)
   1303   static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
   1304   static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs.
   1305   static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
   1306 
   1307   static constexpr size_t kRegistersNeededForLong = 2;
   1308   static constexpr size_t kRegistersNeededForDouble = 2;
   1309   static constexpr bool kMultiRegistersAligned = true;
   1310   static constexpr bool kMultiFPRegistersWidened = true;
   1311   static constexpr bool kMultiGPRegistersWidened = false;
   1312   static constexpr bool kAlignLongOnStack = true;
   1313   static constexpr bool kAlignDoubleOnStack = true;
   1314 #elif defined(__mips__) && defined(__LP64__)
   1315   // Let the code prepare GPRs only and we will load the FPRs with same data.
   1316   static constexpr bool kNativeSoftFloatAbi = true;
   1317   static constexpr size_t kNumNativeGprArgs = 8;
   1318   static constexpr size_t kNumNativeFprArgs = 0;
   1319 
   1320   static constexpr size_t kRegistersNeededForLong = 1;
   1321   static constexpr size_t kRegistersNeededForDouble = 1;
   1322   static constexpr bool kMultiRegistersAligned = false;
   1323   static constexpr bool kMultiFPRegistersWidened = false;
   1324   static constexpr bool kMultiGPRegistersWidened = true;
   1325   static constexpr bool kAlignLongOnStack = false;
   1326   static constexpr bool kAlignDoubleOnStack = false;
   1327 #elif defined(__i386__)
   1328   // TODO: Check these!
   1329   static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
   1330   static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
   1331   static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
   1332 
   1333   static constexpr size_t kRegistersNeededForLong = 2;
   1334   static constexpr size_t kRegistersNeededForDouble = 2;
   1335   static constexpr bool kMultiRegistersAligned = false;  // x86 not using regs, anyways
   1336   static constexpr bool kMultiFPRegistersWidened = false;
   1337   static constexpr bool kMultiGPRegistersWidened = false;
   1338   static constexpr bool kAlignLongOnStack = false;
   1339   static constexpr bool kAlignDoubleOnStack = false;
   1340 #elif defined(__x86_64__)
   1341   static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
   1342   static constexpr size_t kNumNativeGprArgs = 6;  // 6 arguments passed in GPRs.
   1343   static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
   1344 
   1345   static constexpr size_t kRegistersNeededForLong = 1;
   1346   static constexpr size_t kRegistersNeededForDouble = 1;
   1347   static constexpr bool kMultiRegistersAligned = false;
   1348   static constexpr bool kMultiFPRegistersWidened = false;
   1349   static constexpr bool kMultiGPRegistersWidened = false;
   1350   static constexpr bool kAlignLongOnStack = false;
   1351   static constexpr bool kAlignDoubleOnStack = false;
   1352 #else
   1353 #error "Unsupported architecture"
   1354 #endif
   1355 
   1356  public:
   1357   explicit BuildNativeCallFrameStateMachine(T* delegate)
   1358       : gpr_index_(kNumNativeGprArgs),
   1359         fpr_index_(kNumNativeFprArgs),
   1360         stack_entries_(0),
   1361         delegate_(delegate) {
   1362     // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
   1363     // the next register is even; counting down is just to make the compiler happy...
   1364     static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
   1365     static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
   1366   }
   1367 
   1368   virtual ~BuildNativeCallFrameStateMachine() {}
   1369 
   1370   bool HavePointerGpr() const {
   1371     return gpr_index_ > 0;
   1372   }
   1373 
   1374   void AdvancePointer(const void* val) {
   1375     if (HavePointerGpr()) {
   1376       gpr_index_--;
   1377       PushGpr(reinterpret_cast<uintptr_t>(val));
   1378     } else {
   1379       stack_entries_++;  // TODO: have a field for pointer length as multiple of 32b
   1380       PushStack(reinterpret_cast<uintptr_t>(val));
   1381       gpr_index_ = 0;
   1382     }
   1383   }
   1384 
   1385   bool HaveHandleScopeGpr() const {
   1386     return gpr_index_ > 0;
   1387   }
   1388 
   1389   void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
   1390     uintptr_t handle = PushHandle(ptr);
   1391     if (HaveHandleScopeGpr()) {
   1392       gpr_index_--;
   1393       PushGpr(handle);
   1394     } else {
   1395       stack_entries_++;
   1396       PushStack(handle);
   1397       gpr_index_ = 0;
   1398     }
   1399   }
   1400 
   1401   bool HaveIntGpr() const {
   1402     return gpr_index_ > 0;
   1403   }
   1404 
   1405   void AdvanceInt(uint32_t val) {
   1406     if (HaveIntGpr()) {
   1407       gpr_index_--;
   1408       if (kMultiGPRegistersWidened) {
   1409         DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
   1410         PushGpr(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
   1411       } else {
   1412         PushGpr(val);
   1413       }
   1414     } else {
   1415       stack_entries_++;
   1416       if (kMultiGPRegistersWidened) {
   1417         DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
   1418         PushStack(static_cast<int64_t>(bit_cast<int32_t, uint32_t>(val)));
   1419       } else {
   1420         PushStack(val);
   1421       }
   1422       gpr_index_ = 0;
   1423     }
   1424   }
   1425 
   1426   bool HaveLongGpr() const {
   1427     return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
   1428   }
   1429 
   1430   bool LongGprNeedsPadding() const {
   1431     return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
   1432         kAlignLongOnStack &&                  // and when it needs alignment
   1433         (gpr_index_ & 1) == 1;                // counter is odd, see constructor
   1434   }
   1435 
   1436   bool LongStackNeedsPadding() const {
   1437     return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
   1438         kAlignLongOnStack &&                  // and when it needs 8B alignment
   1439         (stack_entries_ & 1) == 1;            // counter is odd
   1440   }
   1441 
   1442   void AdvanceLong(uint64_t val) {
   1443     if (HaveLongGpr()) {
   1444       if (LongGprNeedsPadding()) {
   1445         PushGpr(0);
   1446         gpr_index_--;
   1447       }
   1448       if (kRegistersNeededForLong == 1) {
   1449         PushGpr(static_cast<uintptr_t>(val));
   1450       } else {
   1451         PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
   1452         PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
   1453       }
   1454       gpr_index_ -= kRegistersNeededForLong;
   1455     } else {
   1456       if (LongStackNeedsPadding()) {
   1457         PushStack(0);
   1458         stack_entries_++;
   1459       }
   1460       if (kRegistersNeededForLong == 1) {
   1461         PushStack(static_cast<uintptr_t>(val));
   1462         stack_entries_++;
   1463       } else {
   1464         PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
   1465         PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
   1466         stack_entries_ += 2;
   1467       }
   1468       gpr_index_ = 0;
   1469     }
   1470   }
   1471 
   1472   bool HaveFloatFpr() const {
   1473     return fpr_index_ > 0;
   1474   }
   1475 
   1476   void AdvanceFloat(float val) {
   1477     if (kNativeSoftFloatAbi) {
   1478       AdvanceInt(bit_cast<uint32_t, float>(val));
   1479     } else {
   1480       if (HaveFloatFpr()) {
   1481         fpr_index_--;
   1482         if (kRegistersNeededForDouble == 1) {
   1483           if (kMultiFPRegistersWidened) {
   1484             PushFpr8(bit_cast<uint64_t, double>(val));
   1485           } else {
   1486             // No widening, just use the bits.
   1487             PushFpr8(static_cast<uint64_t>(bit_cast<uint32_t, float>(val)));
   1488           }
   1489         } else {
   1490           PushFpr4(val);
   1491         }
   1492       } else {
   1493         stack_entries_++;
   1494         if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) {
   1495           // Need to widen before storing: Note the "double" in the template instantiation.
   1496           // Note: We need to jump through those hoops to make the compiler happy.
   1497           DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
   1498           PushStack(static_cast<uintptr_t>(bit_cast<uint64_t, double>(val)));
   1499         } else {
   1500           PushStack(static_cast<uintptr_t>(bit_cast<uint32_t, float>(val)));
   1501         }
   1502         fpr_index_ = 0;
   1503       }
   1504     }
   1505   }
   1506 
   1507   bool HaveDoubleFpr() const {
   1508     return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
   1509   }
   1510 
   1511   bool DoubleFprNeedsPadding() const {
   1512     return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
   1513         kAlignDoubleOnStack &&                  // and when it needs alignment
   1514         (fpr_index_ & 1) == 1;                  // counter is odd, see constructor
   1515   }
   1516 
   1517   bool DoubleStackNeedsPadding() const {
   1518     return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
   1519         kAlignDoubleOnStack &&                  // and when it needs 8B alignment
   1520         (stack_entries_ & 1) == 1;              // counter is odd
   1521   }
   1522 
   1523   void AdvanceDouble(uint64_t val) {
   1524     if (kNativeSoftFloatAbi) {
   1525       AdvanceLong(val);
   1526     } else {
   1527       if (HaveDoubleFpr()) {
   1528         if (DoubleFprNeedsPadding()) {
   1529           PushFpr4(0);
   1530           fpr_index_--;
   1531         }
   1532         PushFpr8(val);
   1533         fpr_index_ -= kRegistersNeededForDouble;
   1534       } else {
   1535         if (DoubleStackNeedsPadding()) {
   1536           PushStack(0);
   1537           stack_entries_++;
   1538         }
   1539         if (kRegistersNeededForDouble == 1) {
   1540           PushStack(static_cast<uintptr_t>(val));
   1541           stack_entries_++;
   1542         } else {
   1543           PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
   1544           PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
   1545           stack_entries_ += 2;
   1546         }
   1547         fpr_index_ = 0;
   1548       }
   1549     }
   1550   }
   1551 
   1552   uint32_t GetStackEntries() const {
   1553     return stack_entries_;
   1554   }
   1555 
   1556   uint32_t GetNumberOfUsedGprs() const {
   1557     return kNumNativeGprArgs - gpr_index_;
   1558   }
   1559 
   1560   uint32_t GetNumberOfUsedFprs() const {
   1561     return kNumNativeFprArgs - fpr_index_;
   1562   }
   1563 
   1564  private:
   1565   void PushGpr(uintptr_t val) {
   1566     delegate_->PushGpr(val);
   1567   }
   1568   void PushFpr4(float val) {
   1569     delegate_->PushFpr4(val);
   1570   }
   1571   void PushFpr8(uint64_t val) {
   1572     delegate_->PushFpr8(val);
   1573   }
   1574   void PushStack(uintptr_t val) {
   1575     delegate_->PushStack(val);
   1576   }
   1577   uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
   1578     return delegate_->PushHandle(ref);
   1579   }
   1580 
   1581   uint32_t gpr_index_;      // Number of free GPRs
   1582   uint32_t fpr_index_;      // Number of free FPRs
   1583   uint32_t stack_entries_;  // Stack entries are in multiples of 32b, as floats are usually not
   1584                             // extended
   1585   T* const delegate_;             // What Push implementation gets called
   1586 };
   1587 
   1588 // Computes the sizes of register stacks and call stack area. Handling of references can be extended
   1589 // in subclasses.
   1590 //
   1591 // To handle native pointers, use "L" in the shorty for an object reference, which simulates
   1592 // them with handles.
   1593 class ComputeNativeCallFrameSize {
   1594  public:
   1595   ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
   1596 
   1597   virtual ~ComputeNativeCallFrameSize() {}
   1598 
   1599   uint32_t GetStackSize() const {
   1600     return num_stack_entries_ * sizeof(uintptr_t);
   1601   }
   1602 
   1603   uint8_t* LayoutCallStack(uint8_t* sp8) const {
   1604     sp8 -= GetStackSize();
   1605     // Align by kStackAlignment.
   1606     sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
   1607     return sp8;
   1608   }
   1609 
   1610   uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr)
   1611       const {
   1612     // Assumption is OK right now, as we have soft-float arm
   1613     size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
   1614     sp8 -= fregs * sizeof(uintptr_t);
   1615     *start_fpr = reinterpret_cast<uint32_t*>(sp8);
   1616     size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
   1617     sp8 -= iregs * sizeof(uintptr_t);
   1618     *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
   1619     return sp8;
   1620   }
   1621 
   1622   uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr,
   1623                             uint32_t** start_fpr) const {
   1624     // Native call stack.
   1625     sp8 = LayoutCallStack(sp8);
   1626     *start_stack = reinterpret_cast<uintptr_t*>(sp8);
   1627 
   1628     // Put fprs and gprs below.
   1629     sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr);
   1630 
   1631     // Return the new bottom.
   1632     return sp8;
   1633   }
   1634 
   1635   virtual void WalkHeader(
   1636       BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
   1637       REQUIRES_SHARED(Locks::mutator_lock_) {
   1638   }
   1639 
   1640   void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) {
   1641     BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
   1642 
   1643     WalkHeader(&sm);
   1644 
   1645     for (uint32_t i = 1; i < shorty_len; ++i) {
   1646       Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
   1647       switch (cur_type_) {
   1648         case Primitive::kPrimNot:
   1649           // TODO: fix abuse of mirror types.
   1650           sm.AdvanceHandleScope(
   1651               reinterpret_cast<mirror::Object*>(0x12345678));
   1652           break;
   1653 
   1654         case Primitive::kPrimBoolean:
   1655         case Primitive::kPrimByte:
   1656         case Primitive::kPrimChar:
   1657         case Primitive::kPrimShort:
   1658         case Primitive::kPrimInt:
   1659           sm.AdvanceInt(0);
   1660           break;
   1661         case Primitive::kPrimFloat:
   1662           sm.AdvanceFloat(0);
   1663           break;
   1664         case Primitive::kPrimDouble:
   1665           sm.AdvanceDouble(0);
   1666           break;
   1667         case Primitive::kPrimLong:
   1668           sm.AdvanceLong(0);
   1669           break;
   1670         default:
   1671           LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
   1672           UNREACHABLE();
   1673       }
   1674     }
   1675 
   1676     num_stack_entries_ = sm.GetStackEntries();
   1677   }
   1678 
   1679   void PushGpr(uintptr_t /* val */) {
   1680     // not optimizing registers, yet
   1681   }
   1682 
   1683   void PushFpr4(float /* val */) {
   1684     // not optimizing registers, yet
   1685   }
   1686 
   1687   void PushFpr8(uint64_t /* val */) {
   1688     // not optimizing registers, yet
   1689   }
   1690 
   1691   void PushStack(uintptr_t /* val */) {
   1692     // counting is already done in the superclass
   1693   }
   1694 
   1695   virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
   1696     return reinterpret_cast<uintptr_t>(nullptr);
   1697   }
   1698 
   1699  protected:
   1700   uint32_t num_stack_entries_;
   1701 };
   1702 
   1703 class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
   1704  public:
   1705   explicit ComputeGenericJniFrameSize(bool critical_native)
   1706     : num_handle_scope_references_(0), critical_native_(critical_native) {}
   1707 
   1708   // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
   1709   // is at *m = sp. Will update to point to the bottom of the save frame.
   1710   //
   1711   // Note: assumes ComputeAll() has been run before.
   1712   void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
   1713       REQUIRES_SHARED(Locks::mutator_lock_) {
   1714     ArtMethod* method = **m;
   1715 
   1716     DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
   1717 
   1718     uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
   1719 
   1720     // First, fix up the layout of the callee-save frame.
   1721     // We have to squeeze in the HandleScope, and relocate the method pointer.
   1722 
   1723     // "Free" the slot for the method.
   1724     sp8 += sizeof(void*);  // In the callee-save frame we use a full pointer.
   1725 
   1726     // Under the callee saves put handle scope and new method stack reference.
   1727     size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
   1728     size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*);
   1729 
   1730     sp8 -= scope_and_method;
   1731     // Align by kStackAlignment.
   1732     sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
   1733 
   1734     uint8_t* sp8_table = sp8 + sizeof(ArtMethod*);
   1735     *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(),
   1736                                         num_handle_scope_references_);
   1737 
   1738     // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
   1739     uint8_t* method_pointer = sp8;
   1740     auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer);
   1741     *new_method_ref = method;
   1742     *m = new_method_ref;
   1743   }
   1744 
   1745   // Adds space for the cookie. Note: may leave stack unaligned.
   1746   void LayoutCookie(uint8_t** sp) const {
   1747     // Reference cookie and padding
   1748     *sp -= 8;
   1749   }
   1750 
   1751   // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
   1752   // Returns the new bottom. Note: this may be unaligned.
   1753   uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
   1754       REQUIRES_SHARED(Locks::mutator_lock_) {
   1755     // First, fix up the layout of the callee-save frame.
   1756     // We have to squeeze in the HandleScope, and relocate the method pointer.
   1757     LayoutCalleeSaveFrame(self, m, sp, handle_scope);
   1758 
   1759     // The bottom of the callee-save frame is now where the method is, *m.
   1760     uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m);
   1761 
   1762     // Add space for cookie.
   1763     LayoutCookie(&sp8);
   1764 
   1765     return sp8;
   1766   }
   1767 
   1768   // WARNING: After this, *sp won't be pointing to the method anymore!
   1769   uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
   1770                          HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
   1771                          uint32_t** start_fpr)
   1772       REQUIRES_SHARED(Locks::mutator_lock_) {
   1773     Walk(shorty, shorty_len);
   1774 
   1775     // JNI part.
   1776     uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope);
   1777 
   1778     sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr);
   1779 
   1780     // Return the new bottom.
   1781     return sp8;
   1782   }
   1783 
   1784   uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
   1785 
   1786   // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
   1787   void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
   1788       REQUIRES_SHARED(Locks::mutator_lock_);
   1789 
   1790  private:
   1791   uint32_t num_handle_scope_references_;
   1792   const bool critical_native_;
   1793 };
   1794 
   1795 uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
   1796   num_handle_scope_references_++;
   1797   return reinterpret_cast<uintptr_t>(nullptr);
   1798 }
   1799 
   1800 void ComputeGenericJniFrameSize::WalkHeader(
   1801     BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
   1802   // First 2 parameters are always excluded for @CriticalNative.
   1803   if (UNLIKELY(critical_native_)) {
   1804     return;
   1805   }
   1806 
   1807   // JNIEnv
   1808   sm->AdvancePointer(nullptr);
   1809 
   1810   // Class object or this as first argument
   1811   sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
   1812 }
   1813 
   1814 // Class to push values to three separate regions. Used to fill the native call part. Adheres to
   1815 // the template requirements of BuildGenericJniFrameStateMachine.
   1816 class FillNativeCall {
   1817  public:
   1818   FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
   1819       cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
   1820 
   1821   virtual ~FillNativeCall() {}
   1822 
   1823   void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
   1824     cur_gpr_reg_ = gpr_regs;
   1825     cur_fpr_reg_ = fpr_regs;
   1826     cur_stack_arg_ = stack_args;
   1827   }
   1828 
   1829   void PushGpr(uintptr_t val) {
   1830     *cur_gpr_reg_ = val;
   1831     cur_gpr_reg_++;
   1832   }
   1833 
   1834   void PushFpr4(float val) {
   1835     *cur_fpr_reg_ = val;
   1836     cur_fpr_reg_++;
   1837   }
   1838 
   1839   void PushFpr8(uint64_t val) {
   1840     uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
   1841     *tmp = val;
   1842     cur_fpr_reg_ += 2;
   1843   }
   1844 
   1845   void PushStack(uintptr_t val) {
   1846     *cur_stack_arg_ = val;
   1847     cur_stack_arg_++;
   1848   }
   1849 
   1850   virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) {
   1851     LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
   1852     UNREACHABLE();
   1853   }
   1854 
   1855  private:
   1856   uintptr_t* cur_gpr_reg_;
   1857   uint32_t* cur_fpr_reg_;
   1858   uintptr_t* cur_stack_arg_;
   1859 };
   1860 
   1861 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
   1862 // of transitioning into native code.
   1863 class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
   1864  public:
   1865   BuildGenericJniFrameVisitor(Thread* self,
   1866                               bool is_static,
   1867                               bool critical_native,
   1868                               const char* shorty,
   1869                               uint32_t shorty_len,
   1870                               ArtMethod*** sp)
   1871      : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
   1872        jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native),
   1873        sm_(&jni_call_) {
   1874     ComputeGenericJniFrameSize fsc(critical_native);
   1875     uintptr_t* start_gpr_reg;
   1876     uint32_t* start_fpr_reg;
   1877     uintptr_t* start_stack_arg;
   1878     bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
   1879                                              &handle_scope_,
   1880                                              &start_stack_arg,
   1881                                              &start_gpr_reg, &start_fpr_reg);
   1882 
   1883     jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
   1884 
   1885     // First 2 parameters are always excluded for CriticalNative methods.
   1886     if (LIKELY(!critical_native)) {
   1887       // jni environment is always first argument
   1888       sm_.AdvancePointer(self->GetJniEnv());
   1889 
   1890       if (is_static) {
   1891         sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
   1892       }  // else "this" reference is already handled by QuickArgumentVisitor.
   1893     }
   1894   }
   1895 
   1896   void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
   1897 
   1898   void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
   1899 
   1900   StackReference<mirror::Object>* GetFirstHandleScopeEntry() {
   1901     return handle_scope_->GetHandle(0).GetReference();
   1902   }
   1903 
   1904   jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
   1905     return handle_scope_->GetHandle(0).ToJObject();
   1906   }
   1907 
   1908   void* GetBottomOfUsedArea() const {
   1909     return bottom_of_used_area_;
   1910   }
   1911 
   1912  private:
   1913   // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
   1914   class FillJniCall FINAL : public FillNativeCall {
   1915    public:
   1916     FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
   1917                 HandleScope* handle_scope, bool critical_native)
   1918       : FillNativeCall(gpr_regs, fpr_regs, stack_args),
   1919                        handle_scope_(handle_scope),
   1920         cur_entry_(0),
   1921         critical_native_(critical_native) {}
   1922 
   1923     uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
   1924 
   1925     void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
   1926       FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
   1927       handle_scope_ = scope;
   1928       cur_entry_ = 0U;
   1929     }
   1930 
   1931     void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) {
   1932       // Initialize padding entries.
   1933       size_t expected_slots = handle_scope_->NumberOfReferences();
   1934       while (cur_entry_ < expected_slots) {
   1935         handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
   1936       }
   1937 
   1938       if (!critical_native_) {
   1939         // Non-critical natives have at least the self class (jclass) or this (jobject).
   1940         DCHECK_NE(cur_entry_, 0U);
   1941       }
   1942     }
   1943 
   1944     bool CriticalNative() const {
   1945       return critical_native_;
   1946     }
   1947 
   1948    private:
   1949     HandleScope* handle_scope_;
   1950     size_t cur_entry_;
   1951     const bool critical_native_;
   1952   };
   1953 
   1954   HandleScope* handle_scope_;
   1955   FillJniCall jni_call_;
   1956   void* bottom_of_used_area_;
   1957 
   1958   BuildNativeCallFrameStateMachine<FillJniCall> sm_;
   1959 
   1960   DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
   1961 };
   1962 
   1963 uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
   1964   uintptr_t tmp;
   1965   MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_);
   1966   h.Assign(ref);
   1967   tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
   1968   cur_entry_++;
   1969   return tmp;
   1970 }
   1971 
   1972 void BuildGenericJniFrameVisitor::Visit() {
   1973   Primitive::Type type = GetParamPrimitiveType();
   1974   switch (type) {
   1975     case Primitive::kPrimLong: {
   1976       jlong long_arg;
   1977       if (IsSplitLongOrDouble()) {
   1978         long_arg = ReadSplitLongParam();
   1979       } else {
   1980         long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
   1981       }
   1982       sm_.AdvanceLong(long_arg);
   1983       break;
   1984     }
   1985     case Primitive::kPrimDouble: {
   1986       uint64_t double_arg;
   1987       if (IsSplitLongOrDouble()) {
   1988         // Read into union so that we don't case to a double.
   1989         double_arg = ReadSplitLongParam();
   1990       } else {
   1991         double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
   1992       }
   1993       sm_.AdvanceDouble(double_arg);
   1994       break;
   1995     }
   1996     case Primitive::kPrimNot: {
   1997       StackReference<mirror::Object>* stack_ref =
   1998           reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
   1999       sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
   2000       break;
   2001     }
   2002     case Primitive::kPrimFloat:
   2003       sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
   2004       break;
   2005     case Primitive::kPrimBoolean:  // Fall-through.
   2006     case Primitive::kPrimByte:     // Fall-through.
   2007     case Primitive::kPrimChar:     // Fall-through.
   2008     case Primitive::kPrimShort:    // Fall-through.
   2009     case Primitive::kPrimInt:      // Fall-through.
   2010       sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
   2011       break;
   2012     case Primitive::kPrimVoid:
   2013       LOG(FATAL) << "UNREACHABLE";
   2014       UNREACHABLE();
   2015   }
   2016 }
   2017 
   2018 void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
   2019   // Clear out rest of the scope.
   2020   jni_call_.ResetRemainingScopeSlots();
   2021   if (!jni_call_.CriticalNative()) {
   2022     // Install HandleScope.
   2023     self->PushHandleScope(handle_scope_);
   2024   }
   2025 }
   2026 
   2027 #if defined(__arm__) || defined(__aarch64__)
   2028 extern "C" const void* artFindNativeMethod();
   2029 #else
   2030 extern "C" const void* artFindNativeMethod(Thread* self);
   2031 #endif
   2032 
   2033 static uint64_t artQuickGenericJniEndJNIRef(Thread* self,
   2034                                             uint32_t cookie,
   2035                                             bool fast_native ATTRIBUTE_UNUSED,
   2036                                             jobject l,
   2037                                             jobject lock) {
   2038   // TODO: add entrypoints for @FastNative returning objects.
   2039   if (lock != nullptr) {
   2040     return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
   2041   } else {
   2042     return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
   2043   }
   2044 }
   2045 
   2046 static void artQuickGenericJniEndJNINonRef(Thread* self,
   2047                                            uint32_t cookie,
   2048                                            bool fast_native,
   2049                                            jobject lock) {
   2050   if (lock != nullptr) {
   2051     JniMethodEndSynchronized(cookie, lock, self);
   2052     // Ignore "fast_native" here because synchronized functions aren't very fast.
   2053   } else {
   2054     if (UNLIKELY(fast_native)) {
   2055       JniMethodFastEnd(cookie, self);
   2056     } else {
   2057       JniMethodEnd(cookie, self);
   2058     }
   2059   }
   2060 }
   2061 
   2062 /*
   2063  * Initializes an alloca region assumed to be directly below sp for a native call:
   2064  * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
   2065  * The final element on the stack is a pointer to the native code.
   2066  *
   2067  * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
   2068  * We need to fix this, as the handle scope needs to go into the callee-save frame.
   2069  *
   2070  * The return of this function denotes:
   2071  * 1) How many bytes of the alloca can be released, if the value is non-negative.
   2072  * 2) An error, if the value is negative.
   2073  */
   2074 extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
   2075     REQUIRES_SHARED(Locks::mutator_lock_) {
   2076   ArtMethod* called = *sp;
   2077   DCHECK(called->IsNative()) << called->PrettyMethod(true);
   2078   // Fix up a callee-save frame at the bottom of the stack (at `*sp`,
   2079   // above the alloca region) while we check for optimization
   2080   // annotations, thus allowing stack walking until the completion of
   2081   // the JNI frame creation.
   2082   //
   2083   // Note however that the Generic JNI trampoline does not expect
   2084   // exception being thrown at that stage.
   2085   *sp = Runtime::Current()->GetCalleeSaveMethod(Runtime::CalleeSaveType::kSaveRefsAndArgs);
   2086   self->SetTopOfStack(sp);
   2087   uint32_t shorty_len = 0;
   2088   const char* shorty = called->GetShorty(&shorty_len);
   2089   // Optimization annotations lookup does not try to resolve classes,
   2090   // as this may throw an exception, which is not supported by the
   2091   // Generic JNI trampoline at this stage; instead, method's
   2092   // annotations' classes are looked up in the bootstrap class
   2093   // loader's resolved types (which won't trigger an exception).
   2094   bool critical_native = called->IsAnnotatedWithCriticalNative();
   2095   // ArtMethod::IsAnnotatedWithCriticalNative should not throw
   2096   // an exception; clear it if it happened anyway.
   2097   // TODO: Revisit this code path and turn this into a CHECK(!self->IsExceptionPending()).
   2098   if (self->IsExceptionPending()) {
   2099     self->ClearException();
   2100   }
   2101   bool fast_native = called->IsAnnotatedWithFastNative();
   2102   // ArtMethod::IsAnnotatedWithFastNative should not throw
   2103   // an exception; clear it if it happened anyway.
   2104   // TODO: Revisit this code path and turn this into a CHECK(!self->IsExceptionPending()).
   2105   if (self->IsExceptionPending()) {
   2106     self->ClearException();
   2107   }
   2108   bool normal_native = !critical_native && !fast_native;
   2109   // Restore the initial ArtMethod pointer at `*sp`.
   2110   *sp = called;
   2111 
   2112   // Run the visitor and update sp.
   2113   BuildGenericJniFrameVisitor visitor(self,
   2114                                       called->IsStatic(),
   2115                                       critical_native,
   2116                                       shorty,
   2117                                       shorty_len,
   2118                                       &sp);
   2119   {
   2120     ScopedAssertNoThreadSuspension sants(__FUNCTION__);
   2121     visitor.VisitArguments();
   2122     // FinalizeHandleScope pushes the handle scope on the thread.
   2123     visitor.FinalizeHandleScope(self);
   2124   }
   2125 
   2126   // Fix up managed-stack things in Thread.
   2127   self->SetTopOfStack(sp);
   2128 
   2129   self->VerifyStack();
   2130 
   2131   uint32_t cookie;
   2132   uint32_t* sp32;
   2133   // Skip calling JniMethodStart for @CriticalNative.
   2134   if (LIKELY(!critical_native)) {
   2135     // Start JNI, save the cookie.
   2136     if (called->IsSynchronized()) {
   2137       DCHECK(normal_native) << " @FastNative and synchronize is not supported";
   2138       cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
   2139       if (self->IsExceptionPending()) {
   2140         self->PopHandleScope();
   2141         // A negative value denotes an error.
   2142         return GetTwoWordFailureValue();
   2143       }
   2144     } else {
   2145       if (fast_native) {
   2146         cookie = JniMethodFastStart(self);
   2147       } else {
   2148         DCHECK(normal_native);
   2149         cookie = JniMethodStart(self);
   2150       }
   2151     }
   2152     sp32 = reinterpret_cast<uint32_t*>(sp);
   2153     *(sp32 - 1) = cookie;
   2154   }
   2155 
   2156   // Retrieve the stored native code.
   2157   void const* nativeCode = called->GetEntryPointFromJni();
   2158 
   2159   // There are two cases for the content of nativeCode:
   2160   // 1) Pointer to the native function.
   2161   // 2) Pointer to the trampoline for native code binding.
   2162   // In the second case, we need to execute the binding and continue with the actual native function
   2163   // pointer.
   2164   DCHECK(nativeCode != nullptr);
   2165   if (nativeCode == GetJniDlsymLookupStub()) {
   2166 #if defined(__arm__) || defined(__aarch64__)
   2167     nativeCode = artFindNativeMethod();
   2168 #else
   2169     nativeCode = artFindNativeMethod(self);
   2170 #endif
   2171 
   2172     if (nativeCode == nullptr) {
   2173       DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
   2174 
   2175       // @CriticalNative calls do not need to call back into JniMethodEnd.
   2176       if (LIKELY(!critical_native)) {
   2177         // End JNI, as the assembly will move to deliver the exception.
   2178         jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
   2179         if (shorty[0] == 'L') {
   2180           artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock);
   2181         } else {
   2182           artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock);
   2183         }
   2184       }
   2185 
   2186       return GetTwoWordFailureValue();
   2187     }
   2188     // Note that the native code pointer will be automatically set by artFindNativeMethod().
   2189   }
   2190 
   2191 #if defined(__mips__) && !defined(__LP64__)
   2192   // On MIPS32 if the first two arguments are floating-point, we need to know their types
   2193   // so that art_quick_generic_jni_trampoline can correctly extract them from the stack
   2194   // and load into floating-point registers.
   2195   // Possible arrangements of first two floating-point arguments on the stack (32-bit FPU
   2196   // view):
   2197   // (1)
   2198   //  |     DOUBLE    |     DOUBLE    | other args, if any
   2199   //  |  F12  |  F13  |  F14  |  F15  |
   2200   //  |  SP+0 |  SP+4 |  SP+8 | SP+12 | SP+16
   2201   // (2)
   2202   //  |     DOUBLE    | FLOAT | (PAD) | other args, if any
   2203   //  |  F12  |  F13  |  F14  |       |
   2204   //  |  SP+0 |  SP+4 |  SP+8 | SP+12 | SP+16
   2205   // (3)
   2206   //  | FLOAT | (PAD) |     DOUBLE    | other args, if any
   2207   //  |  F12  |       |  F14  |  F15  |
   2208   //  |  SP+0 |  SP+4 |  SP+8 | SP+12 | SP+16
   2209   // (4)
   2210   //  | FLOAT | FLOAT | other args, if any
   2211   //  |  F12  |  F14  |
   2212   //  |  SP+0 |  SP+4 | SP+8
   2213   // As you can see, only the last case (4) is special. In all others we can just
   2214   // load F12/F13 and F14/F15 in the same manner.
   2215   // Set bit 0 of the native code address to 1 in this case (valid code addresses
   2216   // are always a multiple of 4 on MIPS32, so we have 2 spare bits available).
   2217   if (nativeCode != nullptr &&
   2218       shorty != nullptr &&
   2219       shorty_len >= 3 &&
   2220       shorty[1] == 'F' &&
   2221       shorty[2] == 'F') {
   2222     nativeCode = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(nativeCode) | 1);
   2223   }
   2224 #endif
   2225 
   2226   // Return native code addr(lo) and bottom of alloca address(hi).
   2227   return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()),
   2228                                 reinterpret_cast<uintptr_t>(nativeCode));
   2229 }
   2230 
   2231 // Defined in quick_jni_entrypoints.cc.
   2232 extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie,
   2233                                     jvalue result, uint64_t result_f, ArtMethod* called,
   2234                                     HandleScope* handle_scope);
   2235 /*
   2236  * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
   2237  * unlocking.
   2238  */
   2239 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
   2240                                                     jvalue result,
   2241                                                     uint64_t result_f) {
   2242   // We're here just back from a native call. We don't have the shared mutator lock at this point
   2243   // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing
   2244   // anything that requires a mutator lock before that would cause problems as GC may have the
   2245   // exclusive mutator lock and may be moving objects, etc.
   2246   ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
   2247   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
   2248   ArtMethod* called = *sp;
   2249   uint32_t cookie = *(sp32 - 1);
   2250   HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp));
   2251   return GenericJniMethodEnd(self, cookie, result, result_f, called, table);
   2252 }
   2253 
   2254 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
   2255 // for the method pointer.
   2256 //
   2257 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming
   2258 // to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
   2259 
   2260 template<InvokeType type, bool access_check>
   2261 static TwoWordReturn artInvokeCommon(uint32_t method_idx,
   2262                                      ObjPtr<mirror::Object> this_object,
   2263                                      Thread* self,
   2264                                      ArtMethod** sp) {
   2265   ScopedQuickEntrypointChecks sqec(self);
   2266   DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
   2267   ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
   2268   ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
   2269   if (UNLIKELY(method == nullptr)) {
   2270     const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
   2271     uint32_t shorty_len;
   2272     const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
   2273     {
   2274       // Remember the args in case a GC happens in FindMethodFromCode.
   2275       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
   2276       RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
   2277       visitor.VisitArguments();
   2278       method = FindMethodFromCode<type, access_check>(method_idx,
   2279                                                       &this_object,
   2280                                                       caller_method,
   2281                                                       self);
   2282       visitor.FixupReferences();
   2283     }
   2284 
   2285     if (UNLIKELY(method == nullptr)) {
   2286       CHECK(self->IsExceptionPending());
   2287       return GetTwoWordFailureValue();  // Failure.
   2288     }
   2289   }
   2290   DCHECK(!self->IsExceptionPending());
   2291   const void* code = method->GetEntryPointFromQuickCompiledCode();
   2292 
   2293   // When we return, the caller will branch to this address, so it had better not be 0!
   2294   DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
   2295                           << " location: "
   2296                           << method->GetDexFile()->GetLocation();
   2297 
   2298   return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
   2299                                 reinterpret_cast<uintptr_t>(method));
   2300 }
   2301 
   2302 // Explicit artInvokeCommon template function declarations to please analysis tool.
   2303 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
   2304   template REQUIRES_SHARED(Locks::mutator_lock_)                                          \
   2305   TwoWordReturn artInvokeCommon<type, access_check>(                                            \
   2306       uint32_t method_idx, ObjPtr<mirror::Object> his_object, Thread* self, ArtMethod** sp)
   2307 
   2308 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
   2309 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
   2310 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
   2311 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
   2312 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
   2313 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
   2314 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
   2315 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
   2316 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
   2317 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
   2318 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
   2319 
   2320 // See comments in runtime_support_asm.S
   2321 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
   2322     uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
   2323     REQUIRES_SHARED(Locks::mutator_lock_) {
   2324   return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
   2325 }
   2326 
   2327 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
   2328     uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
   2329     REQUIRES_SHARED(Locks::mutator_lock_) {
   2330   return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
   2331 }
   2332 
   2333 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
   2334     uint32_t method_idx,
   2335     mirror::Object* this_object ATTRIBUTE_UNUSED,
   2336     Thread* self,
   2337     ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
   2338   // For static, this_object is not required and may be random garbage. Don't pass it down so that
   2339   // it doesn't cause ObjPtr alignment failure check.
   2340   return artInvokeCommon<kStatic, true>(method_idx, nullptr, self, sp);
   2341 }
   2342 
   2343 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
   2344     uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
   2345     REQUIRES_SHARED(Locks::mutator_lock_) {
   2346   return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
   2347 }
   2348 
   2349 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
   2350     uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
   2351     REQUIRES_SHARED(Locks::mutator_lock_) {
   2352   return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
   2353 }
   2354 
   2355 // Determine target of interface dispatch. The interface method and this object are known non-null.
   2356 // The interface method is the method returned by the dex cache in the conflict trampoline.
   2357 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method,
   2358                                                       mirror::Object* raw_this_object,
   2359                                                       Thread* self,
   2360                                                       ArtMethod** sp)
   2361     REQUIRES_SHARED(Locks::mutator_lock_) {
   2362   CHECK(interface_method != nullptr);
   2363   ObjPtr<mirror::Object> this_object(raw_this_object);
   2364   ScopedQuickEntrypointChecks sqec(self);
   2365   StackHandleScope<1> hs(self);
   2366   Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
   2367 
   2368   ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
   2369   ArtMethod* method = nullptr;
   2370   ImTable* imt = cls->GetImt(kRuntimePointerSize);
   2371 
   2372   if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
   2373     // If the interface method is already resolved, look whether we have a match in the
   2374     // ImtConflictTable.
   2375     ArtMethod* conflict_method = imt->Get(ImTable::GetImtIndex(interface_method),
   2376                                           kRuntimePointerSize);
   2377     if (LIKELY(conflict_method->IsRuntimeMethod())) {
   2378       ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
   2379       DCHECK(current_table != nullptr);
   2380       method = current_table->Lookup(interface_method, kRuntimePointerSize);
   2381     } else {
   2382       // It seems we aren't really a conflict method!
   2383       method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
   2384     }
   2385     if (method != nullptr) {
   2386       return GetTwoWordSuccessValue(
   2387           reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode()),
   2388           reinterpret_cast<uintptr_t>(method));
   2389     }
   2390 
   2391     // No match, use the IfTable.
   2392     method = cls->FindVirtualMethodForInterface(interface_method, kRuntimePointerSize);
   2393     if (UNLIKELY(method == nullptr)) {
   2394       ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
   2395           interface_method, this_object, caller_method);
   2396       return GetTwoWordFailureValue();  // Failure.
   2397     }
   2398   } else {
   2399     // The interface method is unresolved, so look it up in the dex file of the caller.
   2400     DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod());
   2401 
   2402     // Fetch the dex_method_idx of the target interface method from the caller.
   2403     uint32_t dex_method_idx;
   2404     uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
   2405     const DexFile::CodeItem* code_item = caller_method->GetCodeItem();
   2406     DCHECK_LT(dex_pc, code_item->insns_size_in_code_units_);
   2407     const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]);
   2408     Instruction::Code instr_code = instr->Opcode();
   2409     DCHECK(instr_code == Instruction::INVOKE_INTERFACE ||
   2410            instr_code == Instruction::INVOKE_INTERFACE_RANGE)
   2411         << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr);
   2412     if (instr_code == Instruction::INVOKE_INTERFACE) {
   2413       dex_method_idx = instr->VRegB_35c();
   2414     } else {
   2415       DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
   2416       dex_method_idx = instr->VRegB_3rc();
   2417     }
   2418 
   2419     const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
   2420         ->GetDexFile();
   2421     uint32_t shorty_len;
   2422     const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx),
   2423                                                    &shorty_len);
   2424     {
   2425       // Remember the args in case a GC happens in FindMethodFromCode.
   2426       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
   2427       RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
   2428       visitor.VisitArguments();
   2429       method = FindMethodFromCode<kInterface, false>(dex_method_idx,
   2430                                                      &this_object,
   2431                                                      caller_method,
   2432                                                      self);
   2433       visitor.FixupReferences();
   2434     }
   2435 
   2436     if (UNLIKELY(method == nullptr)) {
   2437       CHECK(self->IsExceptionPending());
   2438       return GetTwoWordFailureValue();  // Failure.
   2439     }
   2440     interface_method =
   2441         caller_method->GetDexCacheResolvedMethod(dex_method_idx, kRuntimePointerSize);
   2442     DCHECK(!interface_method->IsRuntimeMethod());
   2443   }
   2444 
   2445   // We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
   2446   // We create a new table with the new pair { interface_method, method }.
   2447   uint32_t imt_index = ImTable::GetImtIndex(interface_method);
   2448   ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
   2449   if (conflict_method->IsRuntimeMethod()) {
   2450     ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
   2451         cls.Get(),
   2452         conflict_method,
   2453         interface_method,
   2454         method,
   2455         /*force_new_conflict_method*/false);
   2456     if (new_conflict_method != conflict_method) {
   2457       // Update the IMT if we create a new conflict method. No fence needed here, as the
   2458       // data is consistent.
   2459       imt->Set(imt_index,
   2460                new_conflict_method,
   2461                kRuntimePointerSize);
   2462     }
   2463   }
   2464 
   2465   const void* code = method->GetEntryPointFromQuickCompiledCode();
   2466 
   2467   // When we return, the caller will branch to this address, so it had better not be 0!
   2468   DCHECK(code != nullptr) << "Code was null in method: " << method->PrettyMethod()
   2469                           << " location: " << method->GetDexFile()->GetLocation();
   2470 
   2471   return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
   2472                                 reinterpret_cast<uintptr_t>(method));
   2473 }
   2474 
   2475 // Returns shorty type so the caller can determine how to put |result|
   2476 // into expected registers. The shorty type is static so the compiler
   2477 // could call different flavors of this code path depending on the
   2478 // shorty type though this would require different entry points for
   2479 // each type.
   2480 extern "C" uintptr_t artInvokePolymorphic(
   2481     JValue* result,
   2482     mirror::Object* raw_method_handle,
   2483     Thread* self,
   2484     ArtMethod** sp)
   2485     REQUIRES_SHARED(Locks::mutator_lock_) {
   2486   ScopedQuickEntrypointChecks sqec(self);
   2487   DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
   2488 
   2489   // Start new JNI local reference state
   2490   JNIEnvExt* env = self->GetJniEnv();
   2491   ScopedObjectAccessUnchecked soa(env);
   2492   ScopedJniEnvLocalRefState env_state(env);
   2493   const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
   2494 
   2495   // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
   2496   ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
   2497   uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
   2498   const DexFile::CodeItem* code = caller_method->GetCodeItem();
   2499   const Instruction* inst = Instruction::At(&code->insns_[dex_pc]);
   2500   DCHECK(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC ||
   2501          inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
   2502   const DexFile* dex_file = caller_method->GetDexFile();
   2503   const uint32_t proto_idx = inst->VRegH();
   2504   const char* shorty = dex_file->GetShorty(proto_idx);
   2505   const size_t shorty_length = strlen(shorty);
   2506   static const bool kMethodIsStatic = false;  // invoke() and invokeExact() are not static.
   2507   RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa);
   2508   gc_visitor.VisitArguments();
   2509 
   2510   // Wrap raw_method_handle in a Handle for safety.
   2511   StackHandleScope<5> hs(self);
   2512   Handle<mirror::MethodHandle> method_handle(
   2513       hs.NewHandle(ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(raw_method_handle))));
   2514   raw_method_handle = nullptr;
   2515   self->EndAssertNoThreadSuspension(old_cause);
   2516 
   2517   // Resolve method - it's either MethodHandle.invoke() or MethodHandle.invokeExact().
   2518   ClassLinker* linker = Runtime::Current()->GetClassLinker();
   2519   ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::kForceICCECheck>(self,
   2520                                                                                    inst->VRegB(),
   2521                                                                                    caller_method,
   2522                                                                                    kVirtual);
   2523   DCHECK((resolved_method ==
   2524           jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) ||
   2525          (resolved_method ==
   2526           jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invoke)));
   2527   if (UNLIKELY(method_handle.IsNull())) {
   2528     ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual);
   2529     return static_cast<uintptr_t>('V');
   2530   }
   2531 
   2532   Handle<mirror::Class> caller_class(hs.NewHandle(caller_method->GetDeclaringClass()));
   2533   Handle<mirror::MethodType> method_type(hs.NewHandle(linker->ResolveMethodType(
   2534       *dex_file, proto_idx,
   2535       hs.NewHandle<mirror::DexCache>(caller_class->GetDexCache()),
   2536       hs.NewHandle<mirror::ClassLoader>(caller_class->GetClassLoader()))));
   2537   // This implies we couldn't resolve one or more types in this method handle.
   2538   if (UNLIKELY(method_type.IsNull())) {
   2539     CHECK(self->IsExceptionPending());
   2540     return static_cast<uintptr_t>('V');
   2541   }
   2542 
   2543   DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst->VRegA());
   2544   DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic);
   2545 
   2546   // Fix references before constructing the shadow frame.
   2547   gc_visitor.FixupReferences();
   2548 
   2549   // Construct shadow frame placing arguments consecutively from |first_arg|.
   2550   const bool is_range = (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
   2551   const size_t num_vregs = is_range ? inst->VRegA_4rcc() : inst->VRegA_45cc();
   2552   const size_t first_arg = 0;
   2553   ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
   2554       CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc);
   2555   ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
   2556   ScopedStackedShadowFramePusher
   2557       frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
   2558   BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
   2559                                                     kMethodIsStatic,
   2560                                                     shorty,
   2561                                                     strlen(shorty),
   2562                                                     shadow_frame,
   2563                                                     first_arg);
   2564   shadow_frame_builder.VisitArguments();
   2565 
   2566   // Push a transition back into managed code onto the linked list in thread.
   2567   ManagedStack fragment;
   2568   self->PushManagedStackFragment(&fragment);
   2569 
   2570   // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in
   2571   // consecutive order.
   2572   uint32_t unused_args[Instruction::kMaxVarArgRegs] = {};
   2573   uint32_t first_callee_arg = first_arg + 1;
   2574   if (!DoInvokePolymorphic<true /* is_range */>(self,
   2575                                                 resolved_method,
   2576                                                 *shadow_frame,
   2577                                                 method_handle,
   2578                                                 method_type,
   2579                                                 unused_args,
   2580                                                 first_callee_arg,
   2581                                                 result)) {
   2582     DCHECK(self->IsExceptionPending());
   2583   }
   2584 
   2585   // Pop transition record.
   2586   self->PopManagedStackFragment(fragment);
   2587 
   2588   return static_cast<uintptr_t>(shorty[0]);
   2589 }
   2590 
   2591 }  // namespace art
   2592