Home | History | Annotate | Download | only in quick
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "callee_save_frame.h"
     18 #include "common_throws.h"
     19 #include "dex_file-inl.h"
     20 #include "dex_instruction-inl.h"
     21 #include "entrypoints/entrypoint_utils-inl.h"
     22 #include "gc/accounting/card_table-inl.h"
     23 #include "instruction_set.h"
     24 #include "interpreter/interpreter.h"
     25 #include "mirror/art_method-inl.h"
     26 #include "mirror/class-inl.h"
     27 #include "mirror/dex_cache-inl.h"
     28 #include "mirror/object-inl.h"
     29 #include "mirror/object_array-inl.h"
     30 #include "runtime.h"
     31 #include "scoped_thread_state_change.h"
     32 
     33 namespace art {
     34 
     35 // Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
     36 class QuickArgumentVisitor {
     37   // Number of bytes for each out register in the caller method's frame.
     38   static constexpr size_t kBytesStackArgLocation = 4;
     39   // Frame size in bytes of a callee-save frame for RefsAndArgs.
     40   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
     41       GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
     42 #if defined(__arm__)
     43   // The callee save frame is pointed to by SP.
     44   // | argN       |  |
     45   // | ...        |  |
     46   // | arg4       |  |
     47   // | arg3 spill |  |  Caller's frame
     48   // | arg2 spill |  |
     49   // | arg1 spill |  |
     50   // | Method*    | ---
     51   // | LR         |
     52   // | ...        |    callee saves
     53   // | R3         |    arg3
     54   // | R2         |    arg2
     55   // | R1         |    arg1
     56   // | R0         |    padding
     57   // | Method*    |  <- sp
     58   static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
     59   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
     60   static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
     61   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
     62       arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs);  // Offset of first FPR arg.
     63   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
     64       arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs);  // Offset of first GPR arg.
     65   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
     66       arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs);  // Offset of return address.
     67   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     68     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
     69   }
     70 #elif defined(__aarch64__)
     71   // The callee save frame is pointed to by SP.
     72   // | argN       |  |
     73   // | ...        |  |
     74   // | arg4       |  |
     75   // | arg3 spill |  |  Caller's frame
     76   // | arg2 spill |  |
     77   // | arg1 spill |  |
     78   // | Method*    | ---
     79   // | LR         |
     80   // | X29        |
     81   // |  :         |
     82   // | X20        |
     83   // | X7         |
     84   // | :          |
     85   // | X1         |
     86   // | D7         |
     87   // |  :         |
     88   // | D0         |
     89   // |            |    padding
     90   // | Method*    |  <- sp
     91   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
     92   static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
     93   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
     94   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
     95       arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs);  // Offset of first FPR arg.
     96   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
     97       arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs);  // Offset of first GPR arg.
     98   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
     99       arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs);  // Offset of return address.
    100   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    101     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    102   }
    103 #elif defined(__mips__)
    104   // The callee save frame is pointed to by SP.
    105   // | argN       |  |
    106   // | ...        |  |
    107   // | arg4       |  |
    108   // | arg3 spill |  |  Caller's frame
    109   // | arg2 spill |  |
    110   // | arg1 spill |  |
    111   // | Method*    | ---
    112   // | RA         |
    113   // | ...        |    callee saves
    114   // | A3         |    arg3
    115   // | A2         |    arg2
    116   // | A1         |    arg1
    117   // | A0/Method* |  <- sp
    118   static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
    119   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
    120   static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
    121   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
    122   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
    123   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60;  // Offset of return address.
    124   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    125     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    126   }
    127 #elif defined(__i386__)
    128   // The callee save frame is pointed to by SP.
    129   // | argN        |  |
    130   // | ...         |  |
    131   // | arg4        |  |
    132   // | arg3 spill  |  |  Caller's frame
    133   // | arg2 spill  |  |
    134   // | arg1 spill  |  |
    135   // | Method*     | ---
    136   // | Return      |
    137   // | EBP,ESI,EDI |    callee saves
    138   // | EBX         |    arg3
    139   // | EDX         |    arg2
    140   // | ECX         |    arg1
    141   // | EAX/Method* |  <- sp
    142   static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
    143   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
    144   static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
    145   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
    146   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
    147   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28;  // Offset of return address.
    148   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    149     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    150   }
    151 #elif defined(__x86_64__)
    152   // The callee save frame is pointed to by SP.
    153   // | argN            |  |
    154   // | ...             |  |
    155   // | reg. arg spills |  |  Caller's frame
    156   // | Method*         | ---
    157   // | Return          |
    158   // | R15             |    callee save
    159   // | R14             |    callee save
    160   // | R13             |    callee save
    161   // | R12             |    callee save
    162   // | R9              |    arg5
    163   // | R8              |    arg4
    164   // | RSI/R6          |    arg1
    165   // | RBP/R5          |    callee save
    166   // | RBX/R3          |    callee save
    167   // | RDX/R2          |    arg2
    168   // | RCX/R1          |    arg3
    169   // | XMM7            |    float arg 8
    170   // | XMM6            |    float arg 7
    171   // | XMM5            |    float arg 6
    172   // | XMM4            |    float arg 5
    173   // | XMM3            |    float arg 4
    174   // | XMM2            |    float arg 3
    175   // | XMM1            |    float arg 2
    176   // | XMM0            |    float arg 1
    177   // | Padding         |
    178   // | RDI/Method*     |  <- sp
    179   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
    180   static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
    181   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
    182   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
    183   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8;  // Offset of first GPR arg.
    184   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8;  // Offset of return address.
    185   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    186     switch (gpr_index) {
    187       case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
    188       case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
    189       case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
    190       case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
    191       case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
    192       default:
    193       LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
    194       return 0;
    195     }
    196   }
    197 #else
    198 #error "Unsupported architecture"
    199 #endif
    200 
    201  public:
    202   static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
    203       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    204     DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
    205     byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
    206     return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
    207   }
    208 
    209   // For the given quick ref and args quick frame, return the caller's PC.
    210   static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
    211       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    212     DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
    213     byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
    214     return *reinterpret_cast<uintptr_t*>(lr);
    215   }
    216 
    217   QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty,
    218                        uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
    219           is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
    220           gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
    221           fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
    222           stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
    223                       + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
    224           gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid),
    225           is_split_long_or_double_(false) {}
    226 
    227   virtual ~QuickArgumentVisitor() {}
    228 
    229   virtual void Visit() = 0;
    230 
    231   Primitive::Type GetParamPrimitiveType() const {
    232     return cur_type_;
    233   }
    234 
    235   byte* GetParamAddress() const {
    236     if (!kQuickSoftFloatAbi) {
    237       Primitive::Type type = GetParamPrimitiveType();
    238       if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
    239         if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
    240           return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
    241         }
    242         return stack_args_ + (stack_index_ * kBytesStackArgLocation);
    243       }
    244     }
    245     if (gpr_index_ < kNumQuickGprArgs) {
    246       return gpr_args_ + GprIndexToGprOffset(gpr_index_);
    247     }
    248     return stack_args_ + (stack_index_ * kBytesStackArgLocation);
    249   }
    250 
    251   bool IsSplitLongOrDouble() const {
    252     if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
    253       return is_split_long_or_double_;
    254     } else {
    255       return false;  // An optimization for when GPR and FPRs are 64bit.
    256     }
    257   }
    258 
    259   bool IsParamAReference() const {
    260     return GetParamPrimitiveType() == Primitive::kPrimNot;
    261   }
    262 
    263   bool IsParamALongOrDouble() const {
    264     Primitive::Type type = GetParamPrimitiveType();
    265     return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
    266   }
    267 
    268   uint64_t ReadSplitLongParam() const {
    269     DCHECK(IsSplitLongOrDouble());
    270     uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
    271     uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
    272     return (low_half & 0xffffffffULL) | (high_half << 32);
    273   }
    274 
    275   void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    276     // This implementation doesn't support reg-spill area for hard float
    277     // ABI targets such as x86_64 and aarch64. So, for those targets whose
    278     // 'kQuickSoftFloatAbi' is 'false':
    279     //     (a) 'stack_args_' should point to the first method's argument
    280     //     (b) whatever the argument type it is, the 'stack_index_' should
    281     //         be moved forward along with every visiting.
    282     gpr_index_ = 0;
    283     fpr_index_ = 0;
    284     stack_index_ = 0;
    285     if (!is_static_) {  // Handle this.
    286       cur_type_ = Primitive::kPrimNot;
    287       is_split_long_or_double_ = false;
    288       Visit();
    289       if (!kQuickSoftFloatAbi || kNumQuickGprArgs == 0) {
    290         stack_index_++;
    291       }
    292       if (kNumQuickGprArgs > 0) {
    293         gpr_index_++;
    294       }
    295     }
    296     for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
    297       cur_type_ = Primitive::GetType(shorty_[shorty_index]);
    298       switch (cur_type_) {
    299         case Primitive::kPrimNot:
    300         case Primitive::kPrimBoolean:
    301         case Primitive::kPrimByte:
    302         case Primitive::kPrimChar:
    303         case Primitive::kPrimShort:
    304         case Primitive::kPrimInt:
    305           is_split_long_or_double_ = false;
    306           Visit();
    307           if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
    308             stack_index_++;
    309           }
    310           if (gpr_index_ < kNumQuickGprArgs) {
    311             gpr_index_++;
    312           }
    313           break;
    314         case Primitive::kPrimFloat:
    315           is_split_long_or_double_ = false;
    316           Visit();
    317           if (kQuickSoftFloatAbi) {
    318             if (gpr_index_ < kNumQuickGprArgs) {
    319               gpr_index_++;
    320             } else {
    321               stack_index_++;
    322             }
    323           } else {
    324             if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
    325               fpr_index_++;
    326             }
    327             stack_index_++;
    328           }
    329           break;
    330         case Primitive::kPrimDouble:
    331         case Primitive::kPrimLong:
    332           if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
    333             is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
    334                 ((gpr_index_ + 1) == kNumQuickGprArgs);
    335             Visit();
    336             if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
    337               if (kBytesStackArgLocation == 4) {
    338                 stack_index_+= 2;
    339               } else {
    340                 CHECK_EQ(kBytesStackArgLocation, 8U);
    341                 stack_index_++;
    342               }
    343             }
    344             if (gpr_index_ < kNumQuickGprArgs) {
    345               gpr_index_++;
    346               if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
    347                 if (gpr_index_ < kNumQuickGprArgs) {
    348                   gpr_index_++;
    349                 } else if (kQuickSoftFloatAbi) {
    350                   stack_index_++;
    351                 }
    352               }
    353             }
    354           } else {
    355             is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
    356                 ((fpr_index_ + 1) == kNumQuickFprArgs);
    357             Visit();
    358             if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
    359               fpr_index_++;
    360               if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
    361                 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
    362                   fpr_index_++;
    363                 }
    364               }
    365             }
    366             if (kBytesStackArgLocation == 4) {
    367               stack_index_+= 2;
    368             } else {
    369               CHECK_EQ(kBytesStackArgLocation, 8U);
    370               stack_index_++;
    371             }
    372           }
    373           break;
    374         default:
    375           LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
    376       }
    377     }
    378   }
    379 
    380  private:
    381   static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty,
    382                                              uint32_t shorty_len) {
    383     if (kQuickSoftFloatAbi) {
    384       CHECK_EQ(kNumQuickFprArgs, 0U);
    385       return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA))
    386           + sizeof(StackReference<mirror::ArtMethod>) /* StackReference<ArtMethod> */;
    387     } else {
    388       // For now, there is no reg-spill area for the targets with
    389       // hard float ABI. So, the offset pointing to the first method's
    390       // parameter ('this' for non-static methods) should be returned.
    391       return sizeof(StackReference<mirror::ArtMethod>);  // Skip StackReference<ArtMethod>.
    392     }
    393   }
    394 
    395  protected:
    396   const bool is_static_;
    397   const char* const shorty_;
    398   const uint32_t shorty_len_;
    399 
    400  private:
    401   byte* const gpr_args_;  // Address of GPR arguments in callee save frame.
    402   byte* const fpr_args_;  // Address of FPR arguments in callee save frame.
    403   byte* const stack_args_;  // Address of stack arguments in caller's frame.
    404   uint32_t gpr_index_;  // Index into spilled GPRs.
    405   uint32_t fpr_index_;  // Index into spilled FPRs.
    406   uint32_t stack_index_;  // Index into arguments on the stack.
    407   // The current type of argument during VisitArguments.
    408   Primitive::Type cur_type_;
    409   // Does a 64bit parameter straddle the register and stack arguments?
    410   bool is_split_long_or_double_;
    411 };
    412 
    413 // Visits arguments on the stack placing them into the shadow frame.
    414 class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
    415  public:
    416   BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
    417                                const char* shorty, uint32_t shorty_len, ShadowFrame* sf,
    418                                size_t first_arg_reg) :
    419       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
    420 
    421   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
    422 
    423  private:
    424   ShadowFrame* const sf_;
    425   uint32_t cur_reg_;
    426 
    427   DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
    428 };
    429 
    430 void BuildQuickShadowFrameVisitor::Visit() {
    431   Primitive::Type type = GetParamPrimitiveType();
    432   switch (type) {
    433     case Primitive::kPrimLong:  // Fall-through.
    434     case Primitive::kPrimDouble:
    435       if (IsSplitLongOrDouble()) {
    436         sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
    437       } else {
    438         sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
    439       }
    440       ++cur_reg_;
    441       break;
    442     case Primitive::kPrimNot: {
    443         StackReference<mirror::Object>* stack_ref =
    444             reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
    445         sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
    446       }
    447       break;
    448     case Primitive::kPrimBoolean:  // Fall-through.
    449     case Primitive::kPrimByte:     // Fall-through.
    450     case Primitive::kPrimChar:     // Fall-through.
    451     case Primitive::kPrimShort:    // Fall-through.
    452     case Primitive::kPrimInt:      // Fall-through.
    453     case Primitive::kPrimFloat:
    454       sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
    455       break;
    456     case Primitive::kPrimVoid:
    457       LOG(FATAL) << "UNREACHABLE";
    458       break;
    459   }
    460   ++cur_reg_;
    461 }
    462 
    463 extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
    464                                                 StackReference<mirror::ArtMethod>* sp)
    465     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    466   // Ensure we don't get thread suspension until the object arguments are safely in the shadow
    467   // frame.
    468   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
    469 
    470   if (method->IsAbstract()) {
    471     ThrowAbstractMethodError(method);
    472     return 0;
    473   } else {
    474     DCHECK(!method->IsNative()) << PrettyMethod(method);
    475     const char* old_cause = self->StartAssertNoThreadSuspension(
    476         "Building interpreter shadow frame");
    477     const DexFile::CodeItem* code_item = method->GetCodeItem();
    478     DCHECK(code_item != nullptr) << PrettyMethod(method);
    479     uint16_t num_regs = code_item->registers_size_;
    480     void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
    481     // No last shadow coming from quick.
    482     ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory));
    483     size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
    484     uint32_t shorty_len = 0;
    485     const char* shorty = method->GetShorty(&shorty_len);
    486     BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
    487                                                       shadow_frame, first_arg_reg);
    488     shadow_frame_builder.VisitArguments();
    489     // Push a transition back into managed code onto the linked list in thread.
    490     ManagedStack fragment;
    491     self->PushManagedStackFragment(&fragment);
    492     self->PushShadowFrame(shadow_frame);
    493     self->EndAssertNoThreadSuspension(old_cause);
    494 
    495     if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) {
    496       // Ensure static method's class is initialized.
    497       StackHandleScope<1> hs(self);
    498       Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
    499       if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
    500         DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
    501         self->PopManagedStackFragment(fragment);
    502         return 0;
    503       }
    504     }
    505 
    506     StackHandleScope<1> hs(self);
    507     MethodHelper mh(hs.NewHandle(method));
    508     JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
    509     // Pop transition.
    510     self->PopManagedStackFragment(fragment);
    511     // No need to restore the args since the method has already been run by the interpreter.
    512     return result.GetJ();
    513   }
    514 }
    515 
    516 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
    517 // to jobjects.
    518 class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
    519  public:
    520   BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
    521                             const char* shorty, uint32_t shorty_len,
    522                             ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
    523       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
    524 
    525   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
    526 
    527   void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    528 
    529  private:
    530   ScopedObjectAccessUnchecked* const soa_;
    531   std::vector<jvalue>* const args_;
    532   // References which we must update when exiting in case the GC moved the objects.
    533   std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
    534 
    535   DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
    536 };
    537 
    538 void BuildQuickArgumentVisitor::Visit() {
    539   jvalue val;
    540   Primitive::Type type = GetParamPrimitiveType();
    541   switch (type) {
    542     case Primitive::kPrimNot: {
    543       StackReference<mirror::Object>* stack_ref =
    544           reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
    545       val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
    546       references_.push_back(std::make_pair(val.l, stack_ref));
    547       break;
    548     }
    549     case Primitive::kPrimLong:  // Fall-through.
    550     case Primitive::kPrimDouble:
    551       if (IsSplitLongOrDouble()) {
    552         val.j = ReadSplitLongParam();
    553       } else {
    554         val.j = *reinterpret_cast<jlong*>(GetParamAddress());
    555       }
    556       break;
    557     case Primitive::kPrimBoolean:  // Fall-through.
    558     case Primitive::kPrimByte:     // Fall-through.
    559     case Primitive::kPrimChar:     // Fall-through.
    560     case Primitive::kPrimShort:    // Fall-through.
    561     case Primitive::kPrimInt:      // Fall-through.
    562     case Primitive::kPrimFloat:
    563       val.i = *reinterpret_cast<jint*>(GetParamAddress());
    564       break;
    565     case Primitive::kPrimVoid:
    566       LOG(FATAL) << "UNREACHABLE";
    567       val.j = 0;
    568       break;
    569   }
    570   args_->push_back(val);
    571 }
    572 
    573 void BuildQuickArgumentVisitor::FixupReferences() {
    574   // Fixup any references which may have changed.
    575   for (const auto& pair : references_) {
    576     pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
    577     soa_->Env()->DeleteLocalRef(pair.first);
    578   }
    579 }
    580 
    581 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
    582 // which is responsible for recording callee save registers. We explicitly place into jobjects the
    583 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
    584 // field within the proxy object, which will box the primitive arguments and deal with error cases.
    585 extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
    586                                                mirror::Object* receiver,
    587                                                Thread* self, StackReference<mirror::ArtMethod>* sp)
    588     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    589   DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
    590   DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
    591   // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
    592   const char* old_cause =
    593       self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
    594   // Register the top of the managed stack, making stack crawlable.
    595   DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
    596   self->SetTopOfStack(sp, 0);
    597   DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
    598             Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
    599       << PrettyMethod(proxy_method);
    600   self->VerifyStack();
    601   // Start new JNI local reference state.
    602   JNIEnvExt* env = self->GetJniEnv();
    603   ScopedObjectAccessUnchecked soa(env);
    604   ScopedJniEnvLocalRefState env_state(env);
    605   // Create local ref. copies of proxy method and the receiver.
    606   jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
    607 
    608   // Placing arguments into args vector and remove the receiver.
    609   mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy();
    610   CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
    611                                        << PrettyMethod(non_proxy_method);
    612   std::vector<jvalue> args;
    613   uint32_t shorty_len = 0;
    614   const char* shorty = proxy_method->GetShorty(&shorty_len);
    615   BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args);
    616 
    617   local_ref_visitor.VisitArguments();
    618   DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method);
    619   args.erase(args.begin());
    620 
    621   // Convert proxy method into expected interface method.
    622   mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
    623   DCHECK(interface_method != NULL) << PrettyMethod(proxy_method);
    624   DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
    625   jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
    626 
    627   // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
    628   // that performs allocations.
    629   self->EndAssertNoThreadSuspension(old_cause);
    630   JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
    631   // Restore references which might have moved.
    632   local_ref_visitor.FixupReferences();
    633   return result.GetJ();
    634 }
    635 
    636 // Read object references held in arguments from quick frames and place in a JNI local references,
    637 // so they don't get garbage collected.
    638 class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
    639  public:
    640   RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
    641                                const char* shorty, uint32_t shorty_len,
    642                                ScopedObjectAccessUnchecked* soa) :
    643       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
    644 
    645   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
    646 
    647   void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    648 
    649  private:
    650   ScopedObjectAccessUnchecked* const soa_;
    651   // References which we must update when exiting in case the GC moved the objects.
    652   std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
    653 
    654   DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
    655 };
    656 
    657 void RememberForGcArgumentVisitor::Visit() {
    658   if (IsParamAReference()) {
    659     StackReference<mirror::Object>* stack_ref =
    660         reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
    661     jobject reference =
    662         soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
    663     references_.push_back(std::make_pair(reference, stack_ref));
    664   }
    665 }
    666 
    667 void RememberForGcArgumentVisitor::FixupReferences() {
    668   // Fixup any references which may have changed.
    669   for (const auto& pair : references_) {
    670     pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
    671     soa_->Env()->DeleteLocalRef(pair.first);
    672   }
    673 }
    674 
    675 // Lazily resolve a method for quick. Called by stub code.
    676 extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
    677                                                     mirror::Object* receiver,
    678                                                     Thread* self,
    679                                                     StackReference<mirror::ArtMethod>* sp)
    680     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    681   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
    682   // Start new JNI local reference state
    683   JNIEnvExt* env = self->GetJniEnv();
    684   ScopedObjectAccessUnchecked soa(env);
    685   ScopedJniEnvLocalRefState env_state(env);
    686   const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
    687 
    688   // Compute details about the called method (avoid GCs)
    689   ClassLinker* linker = Runtime::Current()->GetClassLinker();
    690   mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
    691   InvokeType invoke_type;
    692   const DexFile* dex_file;
    693   uint32_t dex_method_idx;
    694   if (called->IsRuntimeMethod()) {
    695     uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
    696     const DexFile::CodeItem* code;
    697     dex_file = caller->GetDexFile();
    698     code = caller->GetCodeItem();
    699     CHECK_LT(dex_pc, code->insns_size_in_code_units_);
    700     const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
    701     Instruction::Code instr_code = instr->Opcode();
    702     bool is_range;
    703     switch (instr_code) {
    704       case Instruction::INVOKE_DIRECT:
    705         invoke_type = kDirect;
    706         is_range = false;
    707         break;
    708       case Instruction::INVOKE_DIRECT_RANGE:
    709         invoke_type = kDirect;
    710         is_range = true;
    711         break;
    712       case Instruction::INVOKE_STATIC:
    713         invoke_type = kStatic;
    714         is_range = false;
    715         break;
    716       case Instruction::INVOKE_STATIC_RANGE:
    717         invoke_type = kStatic;
    718         is_range = true;
    719         break;
    720       case Instruction::INVOKE_SUPER:
    721         invoke_type = kSuper;
    722         is_range = false;
    723         break;
    724       case Instruction::INVOKE_SUPER_RANGE:
    725         invoke_type = kSuper;
    726         is_range = true;
    727         break;
    728       case Instruction::INVOKE_VIRTUAL:
    729         invoke_type = kVirtual;
    730         is_range = false;
    731         break;
    732       case Instruction::INVOKE_VIRTUAL_RANGE:
    733         invoke_type = kVirtual;
    734         is_range = true;
    735         break;
    736       case Instruction::INVOKE_INTERFACE:
    737         invoke_type = kInterface;
    738         is_range = false;
    739         break;
    740       case Instruction::INVOKE_INTERFACE_RANGE:
    741         invoke_type = kInterface;
    742         is_range = true;
    743         break;
    744       default:
    745         LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
    746         // Avoid used uninitialized warnings.
    747         invoke_type = kDirect;
    748         is_range = false;
    749     }
    750     dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
    751   } else {
    752     invoke_type = kStatic;
    753     dex_file = called->GetDexFile();
    754     dex_method_idx = called->GetDexMethodIndex();
    755   }
    756   uint32_t shorty_len;
    757   const char* shorty =
    758       dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
    759   RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
    760   visitor.VisitArguments();
    761   self->EndAssertNoThreadSuspension(old_cause);
    762   bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
    763   // Resolve method filling in dex cache.
    764   if (UNLIKELY(called->IsRuntimeMethod())) {
    765     StackHandleScope<1> hs(self);
    766     mirror::Object* dummy = nullptr;
    767     HandleWrapper<mirror::Object> h_receiver(
    768         hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
    769     called = linker->ResolveMethod(self, dex_method_idx, &caller, invoke_type);
    770   }
    771   const void* code = NULL;
    772   if (LIKELY(!self->IsExceptionPending())) {
    773     // Incompatible class change should have been handled in resolve method.
    774     CHECK(!called->CheckIncompatibleClassChange(invoke_type))
    775         << PrettyMethod(called) << " " << invoke_type;
    776     if (virtual_or_interface) {
    777       // Refine called method based on receiver.
    778       CHECK(receiver != nullptr) << invoke_type;
    779 
    780       mirror::ArtMethod* orig_called = called;
    781       if (invoke_type == kVirtual) {
    782         called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
    783       } else {
    784         called = receiver->GetClass()->FindVirtualMethodForInterface(called);
    785       }
    786 
    787       CHECK(called != nullptr) << PrettyMethod(orig_called) << " "
    788                                << PrettyTypeOf(receiver) << " "
    789                                << invoke_type << " " << orig_called->GetVtableIndex();
    790 
    791       // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
    792       // of the sharpened method.
    793       if (called->HasSameDexCacheResolvedMethods(caller)) {
    794         caller->SetDexCacheResolvedMethod(called->GetDexMethodIndex(), called);
    795       } else {
    796         // Calling from one dex file to another, need to compute the method index appropriate to
    797         // the caller's dex file. Since we get here only if the original called was a runtime
    798         // method, we've got the correct dex_file and a dex_method_idx from above.
    799         DCHECK_EQ(caller->GetDexFile(), dex_file);
    800         StackHandleScope<1> hs(self);
    801         MethodHelper mh(hs.NewHandle(called));
    802         uint32_t method_index = mh.FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx);
    803         if (method_index != DexFile::kDexNoIndex) {
    804           caller->SetDexCacheResolvedMethod(method_index, called);
    805         }
    806       }
    807     }
    808     // Ensure that the called method's class is initialized.
    809     StackHandleScope<1> hs(soa.Self());
    810     Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
    811     linker->EnsureInitialized(called_class, true, true);
    812     if (LIKELY(called_class->IsInitialized())) {
    813       code = called->GetEntryPointFromQuickCompiledCode();
    814     } else if (called_class->IsInitializing()) {
    815       if (invoke_type == kStatic) {
    816         // Class is still initializing, go to oat and grab code (trampoline must be left in place
    817         // until class is initialized to stop races between threads).
    818         code = linker->GetQuickOatCodeFor(called);
    819       } else {
    820         // No trampoline for non-static methods.
    821         code = called->GetEntryPointFromQuickCompiledCode();
    822       }
    823     } else {
    824       DCHECK(called_class->IsErroneous());
    825     }
    826   }
    827   CHECK_EQ(code == NULL, self->IsExceptionPending());
    828   // Fixup any locally saved objects may have moved during a GC.
    829   visitor.FixupReferences();
    830   // Place called method in callee-save frame to be placed as first argument to quick method.
    831   sp->Assign(called);
    832   return code;
    833 }
    834 
    835 /*
    836  * This class uses a couple of observations to unite the different calling conventions through
    837  * a few constants.
    838  *
    839  * 1) Number of registers used for passing is normally even, so counting down has no penalty for
    840  *    possible alignment.
    841  * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
    842  *    types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
    843  *    when we have to split things
    844  * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
    845  *    and we can use Int handling directly.
    846  * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
    847  *    necessary when widening. Also, widening of Ints will take place implicitly, and the
    848  *    extension should be compatible with Aarch64, which mandates copying the available bits
    849  *    into LSB and leaving the rest unspecified.
    850  * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
    851  *    the stack.
    852  * 6) There is only little endian.
    853  *
    854  *
    855  * Actual work is supposed to be done in a delegate of the template type. The interface is as
    856  * follows:
    857  *
    858  * void PushGpr(uintptr_t):   Add a value for the next GPR
    859  *
    860  * void PushFpr4(float):      Add a value for the next FPR of size 32b. Is only called if we need
    861  *                            padding, that is, think the architecture is 32b and aligns 64b.
    862  *
    863  * void PushFpr8(uint64_t):   Push a double. We _will_ call this on 32b, it's the callee's job to
    864  *                            split this if necessary. The current state will have aligned, if
    865  *                            necessary.
    866  *
    867  * void PushStack(uintptr_t): Push a value to the stack.
    868  *
    869  * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
    870  *                                          as this might be important for null initialization.
    871  *                                          Must return the jobject, that is, the reference to the
    872  *                                          entry in the HandleScope (nullptr if necessary).
    873  *
    874  */
    875 template<class T> class BuildNativeCallFrameStateMachine {
    876  public:
    877 #if defined(__arm__)
    878   // TODO: These are all dummy values!
    879   static constexpr bool kNativeSoftFloatAbi = true;
    880   static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs, r0-r3
    881   static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
    882 
    883   static constexpr size_t kRegistersNeededForLong = 2;
    884   static constexpr size_t kRegistersNeededForDouble = 2;
    885   static constexpr bool kMultiRegistersAligned = true;
    886   static constexpr bool kMultiRegistersWidened = false;
    887   static constexpr bool kAlignLongOnStack = true;
    888   static constexpr bool kAlignDoubleOnStack = true;
    889 #elif defined(__aarch64__)
    890   static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
    891   static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
    892   static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
    893 
    894   static constexpr size_t kRegistersNeededForLong = 1;
    895   static constexpr size_t kRegistersNeededForDouble = 1;
    896   static constexpr bool kMultiRegistersAligned = false;
    897   static constexpr bool kMultiRegistersWidened = false;
    898   static constexpr bool kAlignLongOnStack = false;
    899   static constexpr bool kAlignDoubleOnStack = false;
    900 #elif defined(__mips__)
    901   // TODO: These are all dummy values!
    902   static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
    903   static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
    904   static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
    905 
    906   static constexpr size_t kRegistersNeededForLong = 2;
    907   static constexpr size_t kRegistersNeededForDouble = 2;
    908   static constexpr bool kMultiRegistersAligned = true;
    909   static constexpr bool kMultiRegistersWidened = true;
    910   static constexpr bool kAlignLongOnStack = false;
    911   static constexpr bool kAlignDoubleOnStack = false;
    912 #elif defined(__i386__)
    913   // TODO: Check these!
    914   static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
    915   static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
    916   static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
    917 
    918   static constexpr size_t kRegistersNeededForLong = 2;
    919   static constexpr size_t kRegistersNeededForDouble = 2;
    920   static constexpr bool kMultiRegistersAligned = false;  // x86 not using regs, anyways
    921   static constexpr bool kMultiRegistersWidened = false;
    922   static constexpr bool kAlignLongOnStack = false;
    923   static constexpr bool kAlignDoubleOnStack = false;
    924 #elif defined(__x86_64__)
    925   static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
    926   static constexpr size_t kNumNativeGprArgs = 6;  // 6 arguments passed in GPRs.
    927   static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
    928 
    929   static constexpr size_t kRegistersNeededForLong = 1;
    930   static constexpr size_t kRegistersNeededForDouble = 1;
    931   static constexpr bool kMultiRegistersAligned = false;
    932   static constexpr bool kMultiRegistersWidened = false;
    933   static constexpr bool kAlignLongOnStack = false;
    934   static constexpr bool kAlignDoubleOnStack = false;
    935 #else
    936 #error "Unsupported architecture"
    937 #endif
    938 
    939  public:
    940   explicit BuildNativeCallFrameStateMachine(T* delegate)
    941       : gpr_index_(kNumNativeGprArgs),
    942         fpr_index_(kNumNativeFprArgs),
    943         stack_entries_(0),
    944         delegate_(delegate) {
    945     // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
    946     // the next register is even; counting down is just to make the compiler happy...
    947     CHECK_EQ(kNumNativeGprArgs % 2, 0U);
    948     CHECK_EQ(kNumNativeFprArgs % 2, 0U);
    949   }
    950 
    951   virtual ~BuildNativeCallFrameStateMachine() {}
    952 
    953   bool HavePointerGpr() {
    954     return gpr_index_ > 0;
    955   }
    956 
    957   void AdvancePointer(const void* val) {
    958     if (HavePointerGpr()) {
    959       gpr_index_--;
    960       PushGpr(reinterpret_cast<uintptr_t>(val));
    961     } else {
    962       stack_entries_++;  // TODO: have a field for pointer length as multiple of 32b
    963       PushStack(reinterpret_cast<uintptr_t>(val));
    964       gpr_index_ = 0;
    965     }
    966   }
    967 
    968   bool HaveHandleScopeGpr() {
    969     return gpr_index_ > 0;
    970   }
    971 
    972   void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    973     uintptr_t handle = PushHandle(ptr);
    974     if (HaveHandleScopeGpr()) {
    975       gpr_index_--;
    976       PushGpr(handle);
    977     } else {
    978       stack_entries_++;
    979       PushStack(handle);
    980       gpr_index_ = 0;
    981     }
    982   }
    983 
    984   bool HaveIntGpr() {
    985     return gpr_index_ > 0;
    986   }
    987 
    988   void AdvanceInt(uint32_t val) {
    989     if (HaveIntGpr()) {
    990       gpr_index_--;
    991       PushGpr(val);
    992     } else {
    993       stack_entries_++;
    994       PushStack(val);
    995       gpr_index_ = 0;
    996     }
    997   }
    998 
    999   bool HaveLongGpr() {
   1000     return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
   1001   }
   1002 
   1003   bool LongGprNeedsPadding() {
   1004     return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
   1005         kAlignLongOnStack &&                  // and when it needs alignment
   1006         (gpr_index_ & 1) == 1;                // counter is odd, see constructor
   1007   }
   1008 
   1009   bool LongStackNeedsPadding() {
   1010     return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
   1011         kAlignLongOnStack &&                  // and when it needs 8B alignment
   1012         (stack_entries_ & 1) == 1;            // counter is odd
   1013   }
   1014 
   1015   void AdvanceLong(uint64_t val) {
   1016     if (HaveLongGpr()) {
   1017       if (LongGprNeedsPadding()) {
   1018         PushGpr(0);
   1019         gpr_index_--;
   1020       }
   1021       if (kRegistersNeededForLong == 1) {
   1022         PushGpr(static_cast<uintptr_t>(val));
   1023       } else {
   1024         PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
   1025         PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
   1026       }
   1027       gpr_index_ -= kRegistersNeededForLong;
   1028     } else {
   1029       if (LongStackNeedsPadding()) {
   1030         PushStack(0);
   1031         stack_entries_++;
   1032       }
   1033       if (kRegistersNeededForLong == 1) {
   1034         PushStack(static_cast<uintptr_t>(val));
   1035         stack_entries_++;
   1036       } else {
   1037         PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
   1038         PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
   1039         stack_entries_ += 2;
   1040       }
   1041       gpr_index_ = 0;
   1042     }
   1043   }
   1044 
   1045   bool HaveFloatFpr() {
   1046     return fpr_index_ > 0;
   1047   }
   1048 
   1049   void AdvanceFloat(float val) {
   1050     if (kNativeSoftFloatAbi) {
   1051       AdvanceInt(bit_cast<float, uint32_t>(val));
   1052     } else {
   1053       if (HaveFloatFpr()) {
   1054         fpr_index_--;
   1055         if (kRegistersNeededForDouble == 1) {
   1056           if (kMultiRegistersWidened) {
   1057             PushFpr8(bit_cast<double, uint64_t>(val));
   1058           } else {
   1059             // No widening, just use the bits.
   1060             PushFpr8(bit_cast<float, uint64_t>(val));
   1061           }
   1062         } else {
   1063           PushFpr4(val);
   1064         }
   1065       } else {
   1066         stack_entries_++;
   1067         if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) {
   1068           // Need to widen before storing: Note the "double" in the template instantiation.
   1069           // Note: We need to jump through those hoops to make the compiler happy.
   1070           DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
   1071           PushStack(static_cast<uintptr_t>(bit_cast<double, uint64_t>(val)));
   1072         } else {
   1073           PushStack(bit_cast<float, uintptr_t>(val));
   1074         }
   1075         fpr_index_ = 0;
   1076       }
   1077     }
   1078   }
   1079 
   1080   bool HaveDoubleFpr() {
   1081     return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
   1082   }
   1083 
   1084   bool DoubleFprNeedsPadding() {
   1085     return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
   1086         kAlignDoubleOnStack &&                  // and when it needs alignment
   1087         (fpr_index_ & 1) == 1;                  // counter is odd, see constructor
   1088   }
   1089 
   1090   bool DoubleStackNeedsPadding() {
   1091     return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
   1092         kAlignDoubleOnStack &&                  // and when it needs 8B alignment
   1093         (stack_entries_ & 1) == 1;              // counter is odd
   1094   }
   1095 
   1096   void AdvanceDouble(uint64_t val) {
   1097     if (kNativeSoftFloatAbi) {
   1098       AdvanceLong(val);
   1099     } else {
   1100       if (HaveDoubleFpr()) {
   1101         if (DoubleFprNeedsPadding()) {
   1102           PushFpr4(0);
   1103           fpr_index_--;
   1104         }
   1105         PushFpr8(val);
   1106         fpr_index_ -= kRegistersNeededForDouble;
   1107       } else {
   1108         if (DoubleStackNeedsPadding()) {
   1109           PushStack(0);
   1110           stack_entries_++;
   1111         }
   1112         if (kRegistersNeededForDouble == 1) {
   1113           PushStack(static_cast<uintptr_t>(val));
   1114           stack_entries_++;
   1115         } else {
   1116           PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
   1117           PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
   1118           stack_entries_ += 2;
   1119         }
   1120         fpr_index_ = 0;
   1121       }
   1122     }
   1123   }
   1124 
   1125   uint32_t getStackEntries() {
   1126     return stack_entries_;
   1127   }
   1128 
   1129   uint32_t getNumberOfUsedGprs() {
   1130     return kNumNativeGprArgs - gpr_index_;
   1131   }
   1132 
   1133   uint32_t getNumberOfUsedFprs() {
   1134     return kNumNativeFprArgs - fpr_index_;
   1135   }
   1136 
   1137  private:
   1138   void PushGpr(uintptr_t val) {
   1139     delegate_->PushGpr(val);
   1140   }
   1141   void PushFpr4(float val) {
   1142     delegate_->PushFpr4(val);
   1143   }
   1144   void PushFpr8(uint64_t val) {
   1145     delegate_->PushFpr8(val);
   1146   }
   1147   void PushStack(uintptr_t val) {
   1148     delegate_->PushStack(val);
   1149   }
   1150   uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1151     return delegate_->PushHandle(ref);
   1152   }
   1153 
   1154   uint32_t gpr_index_;      // Number of free GPRs
   1155   uint32_t fpr_index_;      // Number of free FPRs
   1156   uint32_t stack_entries_;  // Stack entries are in multiples of 32b, as floats are usually not
   1157                             // extended
   1158   T* delegate_;             // What Push implementation gets called
   1159 };
   1160 
   1161 // Computes the sizes of register stacks and call stack area. Handling of references can be extended
   1162 // in subclasses.
   1163 //
   1164 // To handle native pointers, use "L" in the shorty for an object reference, which simulates
   1165 // them with handles.
   1166 class ComputeNativeCallFrameSize {
   1167  public:
   1168   ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
   1169 
   1170   virtual ~ComputeNativeCallFrameSize() {}
   1171 
   1172   uint32_t GetStackSize() {
   1173     return num_stack_entries_ * sizeof(uintptr_t);
   1174   }
   1175 
   1176   uint8_t* LayoutCallStack(uint8_t* sp8) {
   1177     sp8 -= GetStackSize();
   1178     // Align by kStackAlignment.
   1179     sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
   1180     return sp8;
   1181   }
   1182 
   1183   uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) {
   1184     // Assumption is OK right now, as we have soft-float arm
   1185     size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
   1186     sp8 -= fregs * sizeof(uintptr_t);
   1187     *start_fpr = reinterpret_cast<uint32_t*>(sp8);
   1188     size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
   1189     sp8 -= iregs * sizeof(uintptr_t);
   1190     *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
   1191     return sp8;
   1192   }
   1193 
   1194   uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr,
   1195                             uint32_t** start_fpr) {
   1196     // Native call stack.
   1197     sp8 = LayoutCallStack(sp8);
   1198     *start_stack = reinterpret_cast<uintptr_t*>(sp8);
   1199 
   1200     // Put fprs and gprs below.
   1201     sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr);
   1202 
   1203     // Return the new bottom.
   1204     return sp8;
   1205   }
   1206 
   1207   virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
   1208       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {}
   1209 
   1210   void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1211     BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
   1212 
   1213     WalkHeader(&sm);
   1214 
   1215     for (uint32_t i = 1; i < shorty_len; ++i) {
   1216       Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
   1217       switch (cur_type_) {
   1218         case Primitive::kPrimNot:
   1219           sm.AdvanceHandleScope(
   1220               reinterpret_cast<mirror::Object*>(0x12345678));
   1221           break;
   1222 
   1223         case Primitive::kPrimBoolean:
   1224         case Primitive::kPrimByte:
   1225         case Primitive::kPrimChar:
   1226         case Primitive::kPrimShort:
   1227         case Primitive::kPrimInt:
   1228           sm.AdvanceInt(0);
   1229           break;
   1230         case Primitive::kPrimFloat:
   1231           sm.AdvanceFloat(0);
   1232           break;
   1233         case Primitive::kPrimDouble:
   1234           sm.AdvanceDouble(0);
   1235           break;
   1236         case Primitive::kPrimLong:
   1237           sm.AdvanceLong(0);
   1238           break;
   1239         default:
   1240           LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
   1241       }
   1242     }
   1243 
   1244     num_stack_entries_ = sm.getStackEntries();
   1245   }
   1246 
   1247   void PushGpr(uintptr_t /* val */) {
   1248     // not optimizing registers, yet
   1249   }
   1250 
   1251   void PushFpr4(float /* val */) {
   1252     // not optimizing registers, yet
   1253   }
   1254 
   1255   void PushFpr8(uint64_t /* val */) {
   1256     // not optimizing registers, yet
   1257   }
   1258 
   1259   void PushStack(uintptr_t /* val */) {
   1260     // counting is already done in the superclass
   1261   }
   1262 
   1263   virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
   1264     return reinterpret_cast<uintptr_t>(nullptr);
   1265   }
   1266 
   1267  protected:
   1268   uint32_t num_stack_entries_;
   1269 };
   1270 
   1271 class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
   1272  public:
   1273   ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {}
   1274 
   1275   // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
   1276   // is at *m = sp. Will update to point to the bottom of the save frame.
   1277   //
   1278   // Note: assumes ComputeAll() has been run before.
   1279   void LayoutCalleeSaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table,
   1280                              uint32_t* handle_scope_entries)
   1281       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1282     mirror::ArtMethod* method = (*m)->AsMirrorPtr();
   1283 
   1284     uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
   1285 
   1286     // First, fix up the layout of the callee-save frame.
   1287     // We have to squeeze in the HandleScope, and relocate the method pointer.
   1288 
   1289     // "Free" the slot for the method.
   1290     sp8 += kPointerSize;  // In the callee-save frame we use a full pointer.
   1291 
   1292     // Under the callee saves put handle scope and new method stack reference.
   1293     *handle_scope_entries = num_handle_scope_references_;
   1294 
   1295     size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
   1296     size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
   1297 
   1298     sp8 -= scope_and_method;
   1299     // Align by kStackAlignment.
   1300     sp8 = reinterpret_cast<uint8_t*>(RoundDown(
   1301         reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
   1302 
   1303     uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
   1304     *table = reinterpret_cast<HandleScope*>(sp8_table);
   1305     (*table)->SetNumberOfReferences(num_handle_scope_references_);
   1306 
   1307     // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
   1308     uint8_t* method_pointer = sp8;
   1309     StackReference<mirror::ArtMethod>* new_method_ref =
   1310         reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer);
   1311     new_method_ref->Assign(method);
   1312     *m = new_method_ref;
   1313   }
   1314 
   1315   // Adds space for the cookie. Note: may leave stack unaligned.
   1316   void LayoutCookie(uint8_t** sp) {
   1317     // Reference cookie and padding
   1318     *sp -= 8;
   1319   }
   1320 
   1321   // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
   1322   // Returns the new bottom. Note: this may be unaligned.
   1323   uint8_t* LayoutJNISaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table,
   1324                               uint32_t* handle_scope_entries)
   1325       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1326     // First, fix up the layout of the callee-save frame.
   1327     // We have to squeeze in the HandleScope, and relocate the method pointer.
   1328     LayoutCalleeSaveFrame(m, sp, table, handle_scope_entries);
   1329 
   1330     // The bottom of the callee-save frame is now where the method is, *m.
   1331     uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m);
   1332 
   1333     // Add space for cookie.
   1334     LayoutCookie(&sp8);
   1335 
   1336     return sp8;
   1337   }
   1338 
   1339   // WARNING: After this, *sp won't be pointing to the method anymore!
   1340   uint8_t* ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty,
   1341                          uint32_t shorty_len, HandleScope** table, uint32_t* handle_scope_entries,
   1342                          uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
   1343       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1344     Walk(shorty, shorty_len);
   1345 
   1346     // JNI part.
   1347     uint8_t* sp8 = LayoutJNISaveFrame(m, reinterpret_cast<void*>(*m), table, handle_scope_entries);
   1348 
   1349     sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr);
   1350 
   1351     // Return the new bottom.
   1352     return sp8;
   1353   }
   1354 
   1355   uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
   1356 
   1357   // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
   1358   void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
   1359       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   1360 
   1361  private:
   1362   uint32_t num_handle_scope_references_;
   1363 };
   1364 
   1365 uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
   1366   num_handle_scope_references_++;
   1367   return reinterpret_cast<uintptr_t>(nullptr);
   1368 }
   1369 
   1370 void ComputeGenericJniFrameSize::WalkHeader(
   1371     BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
   1372   // JNIEnv
   1373   sm->AdvancePointer(nullptr);
   1374 
   1375   // Class object or this as first argument
   1376   sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
   1377 }
   1378 
   1379 // Class to push values to three separate regions. Used to fill the native call part. Adheres to
   1380 // the template requirements of BuildGenericJniFrameStateMachine.
   1381 class FillNativeCall {
   1382  public:
   1383   FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
   1384       cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
   1385 
   1386   virtual ~FillNativeCall() {}
   1387 
   1388   void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
   1389     cur_gpr_reg_ = gpr_regs;
   1390     cur_fpr_reg_ = fpr_regs;
   1391     cur_stack_arg_ = stack_args;
   1392   }
   1393 
   1394   void PushGpr(uintptr_t val) {
   1395     *cur_gpr_reg_ = val;
   1396     cur_gpr_reg_++;
   1397   }
   1398 
   1399   void PushFpr4(float val) {
   1400     *cur_fpr_reg_ = val;
   1401     cur_fpr_reg_++;
   1402   }
   1403 
   1404   void PushFpr8(uint64_t val) {
   1405     uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
   1406     *tmp = val;
   1407     cur_fpr_reg_ += 2;
   1408   }
   1409 
   1410   void PushStack(uintptr_t val) {
   1411     *cur_stack_arg_ = val;
   1412     cur_stack_arg_++;
   1413   }
   1414 
   1415   virtual uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1416     LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
   1417     return 0U;
   1418   }
   1419 
   1420  private:
   1421   uintptr_t* cur_gpr_reg_;
   1422   uint32_t* cur_fpr_reg_;
   1423   uintptr_t* cur_stack_arg_;
   1424 };
   1425 
   1426 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
   1427 // of transitioning into native code.
   1428 class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
   1429  public:
   1430   BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static,
   1431                               const char* shorty, uint32_t shorty_len, Thread* self)
   1432      : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
   1433        jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) {
   1434     ComputeGenericJniFrameSize fsc;
   1435     uintptr_t* start_gpr_reg;
   1436     uint32_t* start_fpr_reg;
   1437     uintptr_t* start_stack_arg;
   1438     uint32_t handle_scope_entries;
   1439     bottom_of_used_area_ = fsc.ComputeLayout(sp, is_static, shorty, shorty_len, &handle_scope_,
   1440                                              &handle_scope_entries, &start_stack_arg,
   1441                                              &start_gpr_reg, &start_fpr_reg);
   1442 
   1443     handle_scope_->SetNumberOfReferences(handle_scope_entries);
   1444     jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
   1445 
   1446     // jni environment is always first argument
   1447     sm_.AdvancePointer(self->GetJniEnv());
   1448 
   1449     if (is_static) {
   1450       sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass());
   1451     }
   1452   }
   1453 
   1454   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
   1455 
   1456   void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   1457 
   1458   StackReference<mirror::Object>* GetFirstHandleScopeEntry()
   1459       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1460     return handle_scope_->GetHandle(0).GetReference();
   1461   }
   1462 
   1463   jobject GetFirstHandleScopeJObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1464     return handle_scope_->GetHandle(0).ToJObject();
   1465   }
   1466 
   1467   void* GetBottomOfUsedArea() {
   1468     return bottom_of_used_area_;
   1469   }
   1470 
   1471  private:
   1472   // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
   1473   class FillJniCall FINAL : public FillNativeCall {
   1474    public:
   1475     FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
   1476                 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
   1477                                              handle_scope_(handle_scope), cur_entry_(0) {}
   1478 
   1479     uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   1480 
   1481     void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
   1482       FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
   1483       handle_scope_ = scope;
   1484       cur_entry_ = 0U;
   1485     }
   1486 
   1487     void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1488       // Initialize padding entries.
   1489       size_t expected_slots = handle_scope_->NumberOfReferences();
   1490       while (cur_entry_ < expected_slots) {
   1491         handle_scope_->GetHandle(cur_entry_++).Assign(nullptr);
   1492       }
   1493       DCHECK_NE(cur_entry_, 0U);
   1494     }
   1495 
   1496    private:
   1497     HandleScope* handle_scope_;
   1498     size_t cur_entry_;
   1499   };
   1500 
   1501   HandleScope* handle_scope_;
   1502   FillJniCall jni_call_;
   1503   void* bottom_of_used_area_;
   1504 
   1505   BuildNativeCallFrameStateMachine<FillJniCall> sm_;
   1506 
   1507   DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
   1508 };
   1509 
   1510 uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
   1511   uintptr_t tmp;
   1512   Handle<mirror::Object> h = handle_scope_->GetHandle(cur_entry_);
   1513   h.Assign(ref);
   1514   tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
   1515   cur_entry_++;
   1516   return tmp;
   1517 }
   1518 
   1519 void BuildGenericJniFrameVisitor::Visit() {
   1520   Primitive::Type type = GetParamPrimitiveType();
   1521   switch (type) {
   1522     case Primitive::kPrimLong: {
   1523       jlong long_arg;
   1524       if (IsSplitLongOrDouble()) {
   1525         long_arg = ReadSplitLongParam();
   1526       } else {
   1527         long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
   1528       }
   1529       sm_.AdvanceLong(long_arg);
   1530       break;
   1531     }
   1532     case Primitive::kPrimDouble: {
   1533       uint64_t double_arg;
   1534       if (IsSplitLongOrDouble()) {
   1535         // Read into union so that we don't case to a double.
   1536         double_arg = ReadSplitLongParam();
   1537       } else {
   1538         double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
   1539       }
   1540       sm_.AdvanceDouble(double_arg);
   1541       break;
   1542     }
   1543     case Primitive::kPrimNot: {
   1544       StackReference<mirror::Object>* stack_ref =
   1545           reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
   1546       sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
   1547       break;
   1548     }
   1549     case Primitive::kPrimFloat:
   1550       sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
   1551       break;
   1552     case Primitive::kPrimBoolean:  // Fall-through.
   1553     case Primitive::kPrimByte:     // Fall-through.
   1554     case Primitive::kPrimChar:     // Fall-through.
   1555     case Primitive::kPrimShort:    // Fall-through.
   1556     case Primitive::kPrimInt:      // Fall-through.
   1557       sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
   1558       break;
   1559     case Primitive::kPrimVoid:
   1560       LOG(FATAL) << "UNREACHABLE";
   1561       break;
   1562   }
   1563 }
   1564 
   1565 void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
   1566   // Clear out rest of the scope.
   1567   jni_call_.ResetRemainingScopeSlots();
   1568   // Install HandleScope.
   1569   self->PushHandleScope(handle_scope_);
   1570 }
   1571 
   1572 #if defined(__arm__) || defined(__aarch64__)
   1573 extern "C" void* artFindNativeMethod();
   1574 #else
   1575 extern "C" void* artFindNativeMethod(Thread* self);
   1576 #endif
   1577 
   1578 uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
   1579   if (lock != nullptr) {
   1580     return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
   1581   } else {
   1582     return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
   1583   }
   1584 }
   1585 
   1586 void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
   1587   if (lock != nullptr) {
   1588     JniMethodEndSynchronized(cookie, lock, self);
   1589   } else {
   1590     JniMethodEnd(cookie, self);
   1591   }
   1592 }
   1593 
   1594 /*
   1595  * Initializes an alloca region assumed to be directly below sp for a native call:
   1596  * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
   1597  * The final element on the stack is a pointer to the native code.
   1598  *
   1599  * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
   1600  * We need to fix this, as the handle scope needs to go into the callee-save frame.
   1601  *
   1602  * The return of this function denotes:
   1603  * 1) How many bytes of the alloca can be released, if the value is non-negative.
   1604  * 2) An error, if the value is negative.
   1605  */
   1606 extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self,
   1607                                                       StackReference<mirror::ArtMethod>* sp)
   1608     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1609   mirror::ArtMethod* called = sp->AsMirrorPtr();
   1610   DCHECK(called->IsNative()) << PrettyMethod(called, true);
   1611   uint32_t shorty_len = 0;
   1612   const char* shorty = called->GetShorty(&shorty_len);
   1613 
   1614   // Run the visitor.
   1615   BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), shorty, shorty_len, self);
   1616   visitor.VisitArguments();
   1617   visitor.FinalizeHandleScope(self);
   1618 
   1619   // Fix up managed-stack things in Thread.
   1620   self->SetTopOfStack(sp, 0);
   1621 
   1622   self->VerifyStack();
   1623 
   1624   // Start JNI, save the cookie.
   1625   uint32_t cookie;
   1626   if (called->IsSynchronized()) {
   1627     cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
   1628     if (self->IsExceptionPending()) {
   1629       self->PopHandleScope();
   1630       // A negative value denotes an error.
   1631       return GetTwoWordFailureValue();
   1632     }
   1633   } else {
   1634     cookie = JniMethodStart(self);
   1635   }
   1636   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
   1637   *(sp32 - 1) = cookie;
   1638 
   1639   // Retrieve the stored native code.
   1640   const void* nativeCode = called->GetNativeMethod();
   1641 
   1642   // There are two cases for the content of nativeCode:
   1643   // 1) Pointer to the native function.
   1644   // 2) Pointer to the trampoline for native code binding.
   1645   // In the second case, we need to execute the binding and continue with the actual native function
   1646   // pointer.
   1647   DCHECK(nativeCode != nullptr);
   1648   if (nativeCode == GetJniDlsymLookupStub()) {
   1649 #if defined(__arm__) || defined(__aarch64__)
   1650     nativeCode = artFindNativeMethod();
   1651 #else
   1652     nativeCode = artFindNativeMethod(self);
   1653 #endif
   1654 
   1655     if (nativeCode == nullptr) {
   1656       DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
   1657 
   1658       // End JNI, as the assembly will move to deliver the exception.
   1659       jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
   1660       if (shorty[0] == 'L') {
   1661         artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
   1662       } else {
   1663         artQuickGenericJniEndJNINonRef(self, cookie, lock);
   1664       }
   1665 
   1666       return GetTwoWordFailureValue();
   1667     }
   1668     // Note that the native code pointer will be automatically set by artFindNativeMethod().
   1669   }
   1670 
   1671   // Return native code addr(lo) and bottom of alloca address(hi).
   1672   return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()),
   1673                                 reinterpret_cast<uintptr_t>(nativeCode));
   1674 }
   1675 
   1676 /*
   1677  * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
   1678  * unlocking.
   1679  */
   1680 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f)
   1681     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1682   StackReference<mirror::ArtMethod>* sp = self->GetManagedStack()->GetTopQuickFrame();
   1683   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
   1684   mirror::ArtMethod* called = sp->AsMirrorPtr();
   1685   uint32_t cookie = *(sp32 - 1);
   1686 
   1687   jobject lock = nullptr;
   1688   if (called->IsSynchronized()) {
   1689     HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp)
   1690         + sizeof(StackReference<mirror::ArtMethod>));
   1691     lock = table->GetHandle(0).ToJObject();
   1692   }
   1693 
   1694   char return_shorty_char = called->GetShorty()[0];
   1695 
   1696   if (return_shorty_char == 'L') {
   1697     return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock);
   1698   } else {
   1699     artQuickGenericJniEndJNINonRef(self, cookie, lock);
   1700 
   1701     switch (return_shorty_char) {
   1702       case 'F': {
   1703         if (kRuntimeISA == kX86) {
   1704           // Convert back the result to float.
   1705           double d = bit_cast<uint64_t, double>(result_f);
   1706           return bit_cast<float, uint32_t>(static_cast<float>(d));
   1707         } else {
   1708           return result_f;
   1709         }
   1710       }
   1711       case 'D':
   1712         return result_f;
   1713       case 'Z':
   1714         return result.z;
   1715       case 'B':
   1716         return result.b;
   1717       case 'C':
   1718         return result.c;
   1719       case 'S':
   1720         return result.s;
   1721       case 'I':
   1722         return result.i;
   1723       case 'J':
   1724         return result.j;
   1725       case 'V':
   1726         return 0;
   1727       default:
   1728         LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
   1729         return 0;
   1730     }
   1731   }
   1732 }
   1733 
   1734 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
   1735 // for the method pointer.
   1736 //
   1737 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming
   1738 // to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations).
   1739 
   1740 template<InvokeType type, bool access_check>
   1741 static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
   1742                                      mirror::ArtMethod* caller_method,
   1743                                      Thread* self, StackReference<mirror::ArtMethod>* sp);
   1744 
   1745 template<InvokeType type, bool access_check>
   1746 static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
   1747                                      mirror::ArtMethod* caller_method,
   1748                                      Thread* self, StackReference<mirror::ArtMethod>* sp) {
   1749   mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
   1750                                              type);
   1751   if (UNLIKELY(method == nullptr)) {
   1752     FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   1753     const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
   1754     uint32_t shorty_len;
   1755     const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
   1756     {
   1757       // Remember the args in case a GC happens in FindMethodFromCode.
   1758       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
   1759       RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
   1760       visitor.VisitArguments();
   1761       method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method,
   1762                                                       self);
   1763       visitor.FixupReferences();
   1764     }
   1765 
   1766     if (UNLIKELY(method == NULL)) {
   1767       CHECK(self->IsExceptionPending());
   1768       return GetTwoWordFailureValue();  // Failure.
   1769     }
   1770   }
   1771   DCHECK(!self->IsExceptionPending());
   1772   const void* code = method->GetEntryPointFromQuickCompiledCode();
   1773 
   1774   // When we return, the caller will branch to this address, so it had better not be 0!
   1775   DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method)
   1776                           << " location: "
   1777                           << method->GetDexFile()->GetLocation();
   1778 
   1779   return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
   1780                                 reinterpret_cast<uintptr_t>(method));
   1781 }
   1782 
   1783 // Explicit artInvokeCommon template function declarations to please analysis tool.
   1784 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
   1785   template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
   1786   TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx,                        \
   1787                                                     mirror::Object* this_object,                \
   1788                                                     mirror::ArtMethod* caller_method,           \
   1789                                                     Thread* self,                               \
   1790                                                     StackReference<mirror::ArtMethod>* sp)      \
   1791 
   1792 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
   1793 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
   1794 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
   1795 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
   1796 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
   1797 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
   1798 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
   1799 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
   1800 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
   1801 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
   1802 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
   1803 
   1804 // See comments in runtime_support_asm.S
   1805 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
   1806     uint32_t method_idx, mirror::Object* this_object,
   1807     mirror::ArtMethod* caller_method, Thread* self,
   1808     StackReference<mirror::ArtMethod>* sp)
   1809         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1810   return artInvokeCommon<kInterface, true>(method_idx, this_object,
   1811                                            caller_method, self, sp);
   1812 }
   1813 
   1814 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
   1815     uint32_t method_idx, mirror::Object* this_object,
   1816     mirror::ArtMethod* caller_method, Thread* self,
   1817     StackReference<mirror::ArtMethod>* sp)
   1818         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1819   return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method,
   1820                                         self, sp);
   1821 }
   1822 
   1823 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
   1824     uint32_t method_idx, mirror::Object* this_object,
   1825     mirror::ArtMethod* caller_method, Thread* self,
   1826     StackReference<mirror::ArtMethod>* sp)
   1827         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1828   return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method,
   1829                                         self, sp);
   1830 }
   1831 
   1832 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
   1833     uint32_t method_idx, mirror::Object* this_object,
   1834     mirror::ArtMethod* caller_method, Thread* self,
   1835     StackReference<mirror::ArtMethod>* sp)
   1836         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1837   return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method,
   1838                                        self, sp);
   1839 }
   1840 
   1841 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
   1842     uint32_t method_idx, mirror::Object* this_object,
   1843     mirror::ArtMethod* caller_method, Thread* self,
   1844     StackReference<mirror::ArtMethod>* sp)
   1845         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1846   return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method,
   1847                                          self, sp);
   1848 }
   1849 
   1850 // Determine target of interface dispatch. This object is known non-null.
   1851 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
   1852                                                       mirror::Object* this_object,
   1853                                                       mirror::ArtMethod* caller_method,
   1854                                                       Thread* self,
   1855                                                       StackReference<mirror::ArtMethod>* sp)
   1856     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1857   mirror::ArtMethod* method;
   1858   if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
   1859     method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
   1860     if (UNLIKELY(method == NULL)) {
   1861       FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   1862       ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
   1863                                                                  caller_method);
   1864       return GetTwoWordFailureValue();  // Failure.
   1865     }
   1866   } else {
   1867     FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   1868     DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
   1869 
   1870     // Find the caller PC.
   1871     constexpr size_t pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs);
   1872     uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + pc_offset);
   1873 
   1874     // Map the caller PC to a dex PC.
   1875     uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
   1876     const DexFile::CodeItem* code = caller_method->GetCodeItem();
   1877     CHECK_LT(dex_pc, code->insns_size_in_code_units_);
   1878     const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
   1879     Instruction::Code instr_code = instr->Opcode();
   1880     CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
   1881           instr_code == Instruction::INVOKE_INTERFACE_RANGE)
   1882         << "Unexpected call into interface trampoline: " << instr->DumpString(NULL);
   1883     uint32_t dex_method_idx;
   1884     if (instr_code == Instruction::INVOKE_INTERFACE) {
   1885       dex_method_idx = instr->VRegB_35c();
   1886     } else {
   1887       DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
   1888       dex_method_idx = instr->VRegB_3rc();
   1889     }
   1890 
   1891     const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
   1892         ->GetDexFile();
   1893     uint32_t shorty_len;
   1894     const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx),
   1895                                                    &shorty_len);
   1896     {
   1897       // Remember the args in case a GC happens in FindMethodFromCode.
   1898       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
   1899       RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
   1900       visitor.VisitArguments();
   1901       method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method,
   1902                                                      self);
   1903       visitor.FixupReferences();
   1904     }
   1905 
   1906     if (UNLIKELY(method == nullptr)) {
   1907       CHECK(self->IsExceptionPending());
   1908       return GetTwoWordFailureValue();  // Failure.
   1909     }
   1910   }
   1911   const void* code = method->GetEntryPointFromQuickCompiledCode();
   1912 
   1913   // When we return, the caller will branch to this address, so it had better not be 0!
   1914   DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method)
   1915                           << " location: " << method->GetDexFile()->GetLocation();
   1916 
   1917   return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
   1918                                 reinterpret_cast<uintptr_t>(method));
   1919 }
   1920 
   1921 }  // namespace art
   1922