Home | History | Annotate | Download | only in quick
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "callee_save_frame.h"
     18 #include "common_throws.h"
     19 #include "dex_file-inl.h"
     20 #include "dex_instruction-inl.h"
     21 #include "entrypoints/entrypoint_utils-inl.h"
     22 #include "gc/accounting/card_table-inl.h"
     23 #include "instruction_set.h"
     24 #include "interpreter/interpreter.h"
     25 #include "mirror/art_method-inl.h"
     26 #include "mirror/class-inl.h"
     27 #include "mirror/dex_cache-inl.h"
     28 #include "mirror/object-inl.h"
     29 #include "mirror/object_array-inl.h"
     30 #include "runtime.h"
     31 #include "scoped_thread_state_change.h"
     32 
     33 namespace art {
     34 
     35 // Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
     36 class QuickArgumentVisitor {
     37   // Number of bytes for each out register in the caller method's frame.
     38   static constexpr size_t kBytesStackArgLocation = 4;
     39   // Frame size in bytes of a callee-save frame for RefsAndArgs.
     40   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
     41       GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
     42 #if defined(__arm__)
     43   // The callee save frame is pointed to by SP.
     44   // | argN       |  |
     45   // | ...        |  |
     46   // | arg4       |  |
     47   // | arg3 spill |  |  Caller's frame
     48   // | arg2 spill |  |
     49   // | arg1 spill |  |
     50   // | Method*    | ---
     51   // | LR         |
     52   // | ...        |    callee saves
     53   // | R3         |    arg3
     54   // | R2         |    arg2
     55   // | R1         |    arg1
     56   // | R0         |    padding
     57   // | Method*    |  <- sp
     58   static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
     59   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
     60   static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
     61   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
     62       arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs);  // Offset of first FPR arg.
     63   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
     64       arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs);  // Offset of first GPR arg.
     65   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
     66       arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs);  // Offset of return address.
     67   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     68     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
     69   }
     70 #elif defined(__aarch64__)
     71   // The callee save frame is pointed to by SP.
     72   // | argN       |  |
     73   // | ...        |  |
     74   // | arg4       |  |
     75   // | arg3 spill |  |  Caller's frame
     76   // | arg2 spill |  |
     77   // | arg1 spill |  |
     78   // | Method*    | ---
     79   // | LR         |
     80   // | X29        |
     81   // |  :         |
     82   // | X20        |
     83   // | X7         |
     84   // | :          |
     85   // | X1         |
     86   // | D7         |
     87   // |  :         |
     88   // | D0         |
     89   // |            |    padding
     90   // | Method*    |  <- sp
     91   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
     92   static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
     93   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
     94   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
     95       arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs);  // Offset of first FPR arg.
     96   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
     97       arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs);  // Offset of first GPR arg.
     98   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
     99       arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs);  // Offset of return address.
    100   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    101     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    102   }
    103 #elif defined(__mips__)
    104   // The callee save frame is pointed to by SP.
    105   // | argN       |  |
    106   // | ...        |  |
    107   // | arg4       |  |
    108   // | arg3 spill |  |  Caller's frame
    109   // | arg2 spill |  |
    110   // | arg1 spill |  |
    111   // | Method*    | ---
    112   // | RA         |
    113   // | ...        |    callee saves
    114   // | A3         |    arg3
    115   // | A2         |    arg2
    116   // | A1         |    arg1
    117   // | A0/Method* |  <- sp
    118   static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
    119   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
    120   static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
    121   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
    122   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
    123   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60;  // Offset of return address.
    124   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    125     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    126   }
    127 #elif defined(__i386__)
    128   // The callee save frame is pointed to by SP.
    129   // | argN        |  |
    130   // | ...         |  |
    131   // | arg4        |  |
    132   // | arg3 spill  |  |  Caller's frame
    133   // | arg2 spill  |  |
    134   // | arg1 spill  |  |
    135   // | Method*     | ---
    136   // | Return      |
    137   // | EBP,ESI,EDI |    callee saves
    138   // | EBX         |    arg3
    139   // | EDX         |    arg2
    140   // | ECX         |    arg1
    141   // | EAX/Method* |  <- sp
    142   static constexpr bool kQuickSoftFloatAbi = true;  // This is a soft float ABI.
    143   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
    144   static constexpr size_t kNumQuickFprArgs = 0;  // 0 arguments passed in FPRs.
    145   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0;  // Offset of first FPR arg.
    146   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4;  // Offset of first GPR arg.
    147   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28;  // Offset of return address.
    148   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    149     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
    150   }
    151 #elif defined(__x86_64__)
    152   // The callee save frame is pointed to by SP.
    153   // | argN            |  |
    154   // | ...             |  |
    155   // | reg. arg spills |  |  Caller's frame
    156   // | Method*         | ---
    157   // | Return          |
    158   // | R15             |    callee save
    159   // | R14             |    callee save
    160   // | R13             |    callee save
    161   // | R12             |    callee save
    162   // | R9              |    arg5
    163   // | R8              |    arg4
    164   // | RSI/R6          |    arg1
    165   // | RBP/R5          |    callee save
    166   // | RBX/R3          |    callee save
    167   // | RDX/R2          |    arg2
    168   // | RCX/R1          |    arg3
    169   // | XMM7            |    float arg 8
    170   // | XMM6            |    float arg 7
    171   // | XMM5            |    float arg 6
    172   // | XMM4            |    float arg 5
    173   // | XMM3            |    float arg 4
    174   // | XMM2            |    float arg 3
    175   // | XMM1            |    float arg 2
    176   // | XMM0            |    float arg 1
    177   // | Padding         |
    178   // | RDI/Method*     |  <- sp
    179   static constexpr bool kQuickSoftFloatAbi = false;  // This is a hard float ABI.
    180   static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
    181   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
    182   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
    183   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8;  // Offset of first GPR arg.
    184   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8;  // Offset of return address.
    185   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
    186     switch (gpr_index) {
    187       case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
    188       case 1: return (1 * GetBytesPerGprSpillLocation(kRuntimeISA));
    189       case 2: return (0 * GetBytesPerGprSpillLocation(kRuntimeISA));
    190       case 3: return (5 * GetBytesPerGprSpillLocation(kRuntimeISA));
    191       case 4: return (6 * GetBytesPerGprSpillLocation(kRuntimeISA));
    192       default:
    193       LOG(FATAL) << "Unexpected GPR index: " << gpr_index;
    194       return 0;
    195     }
    196   }
    197 #else
    198 #error "Unsupported architecture"
    199 #endif
    200 
    201  public:
    202   // Special handling for proxy methods. Proxy methods are instance methods so the
    203   // 'this' object is the 1st argument. They also have the same frame layout as the
    204   // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
    205   // 1st GPR.
    206   static mirror::Object* GetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
    207       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    208     CHECK(sp->AsMirrorPtr()->IsProxyMethod());
    209     CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, sp->AsMirrorPtr()->GetFrameSizeInBytes());
    210     CHECK_GT(kNumQuickGprArgs, 0u);
    211     constexpr uint32_t kThisGprIndex = 0u;  // 'this' is in the 1st GPR.
    212     size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
    213         GprIndexToGprOffset(kThisGprIndex);
    214     uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
    215     return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
    216   }
    217 
    218   static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
    219       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    220     DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
    221     byte* previous_sp = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
    222     return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
    223   }
    224 
    225   // For the given quick ref and args quick frame, return the caller's PC.
    226   static uintptr_t GetCallingPc(StackReference<mirror::ArtMethod>* sp)
    227       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    228     DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
    229     byte* lr = reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
    230     return *reinterpret_cast<uintptr_t*>(lr);
    231   }
    232 
    233   QuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static, const char* shorty,
    234                        uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
    235           is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
    236           gpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
    237           fpr_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
    238           stack_args_(reinterpret_cast<byte*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
    239                       + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
    240           gpr_index_(0), fpr_index_(0), stack_index_(0), cur_type_(Primitive::kPrimVoid),
    241           is_split_long_or_double_(false) {}
    242 
    243   virtual ~QuickArgumentVisitor() {}
    244 
    245   virtual void Visit() = 0;
    246 
    247   Primitive::Type GetParamPrimitiveType() const {
    248     return cur_type_;
    249   }
    250 
    251   byte* GetParamAddress() const {
    252     if (!kQuickSoftFloatAbi) {
    253       Primitive::Type type = GetParamPrimitiveType();
    254       if (UNLIKELY((type == Primitive::kPrimDouble) || (type == Primitive::kPrimFloat))) {
    255         if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
    256           return fpr_args_ + (fpr_index_ * GetBytesPerFprSpillLocation(kRuntimeISA));
    257         }
    258         return stack_args_ + (stack_index_ * kBytesStackArgLocation);
    259       }
    260     }
    261     if (gpr_index_ < kNumQuickGprArgs) {
    262       return gpr_args_ + GprIndexToGprOffset(gpr_index_);
    263     }
    264     return stack_args_ + (stack_index_ * kBytesStackArgLocation);
    265   }
    266 
    267   bool IsSplitLongOrDouble() const {
    268     if ((GetBytesPerGprSpillLocation(kRuntimeISA) == 4) || (GetBytesPerFprSpillLocation(kRuntimeISA) == 4)) {
    269       return is_split_long_or_double_;
    270     } else {
    271       return false;  // An optimization for when GPR and FPRs are 64bit.
    272     }
    273   }
    274 
    275   bool IsParamAReference() const {
    276     return GetParamPrimitiveType() == Primitive::kPrimNot;
    277   }
    278 
    279   bool IsParamALongOrDouble() const {
    280     Primitive::Type type = GetParamPrimitiveType();
    281     return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
    282   }
    283 
    284   uint64_t ReadSplitLongParam() const {
    285     DCHECK(IsSplitLongOrDouble());
    286     uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
    287     uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
    288     return (low_half & 0xffffffffULL) | (high_half << 32);
    289   }
    290 
    291   void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    292     // This implementation doesn't support reg-spill area for hard float
    293     // ABI targets such as x86_64 and aarch64. So, for those targets whose
    294     // 'kQuickSoftFloatAbi' is 'false':
    295     //     (a) 'stack_args_' should point to the first method's argument
    296     //     (b) whatever the argument type it is, the 'stack_index_' should
    297     //         be moved forward along with every visiting.
    298     gpr_index_ = 0;
    299     fpr_index_ = 0;
    300     stack_index_ = 0;
    301     if (!is_static_) {  // Handle this.
    302       cur_type_ = Primitive::kPrimNot;
    303       is_split_long_or_double_ = false;
    304       Visit();
    305       if (!kQuickSoftFloatAbi || kNumQuickGprArgs == 0) {
    306         stack_index_++;
    307       }
    308       if (kNumQuickGprArgs > 0) {
    309         gpr_index_++;
    310       }
    311     }
    312     for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
    313       cur_type_ = Primitive::GetType(shorty_[shorty_index]);
    314       switch (cur_type_) {
    315         case Primitive::kPrimNot:
    316         case Primitive::kPrimBoolean:
    317         case Primitive::kPrimByte:
    318         case Primitive::kPrimChar:
    319         case Primitive::kPrimShort:
    320         case Primitive::kPrimInt:
    321           is_split_long_or_double_ = false;
    322           Visit();
    323           if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
    324             stack_index_++;
    325           }
    326           if (gpr_index_ < kNumQuickGprArgs) {
    327             gpr_index_++;
    328           }
    329           break;
    330         case Primitive::kPrimFloat:
    331           is_split_long_or_double_ = false;
    332           Visit();
    333           if (kQuickSoftFloatAbi) {
    334             if (gpr_index_ < kNumQuickGprArgs) {
    335               gpr_index_++;
    336             } else {
    337               stack_index_++;
    338             }
    339           } else {
    340             if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
    341               fpr_index_++;
    342             }
    343             stack_index_++;
    344           }
    345           break;
    346         case Primitive::kPrimDouble:
    347         case Primitive::kPrimLong:
    348           if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
    349             is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
    350                 ((gpr_index_ + 1) == kNumQuickGprArgs);
    351             Visit();
    352             if (!kQuickSoftFloatAbi || kNumQuickGprArgs == gpr_index_) {
    353               if (kBytesStackArgLocation == 4) {
    354                 stack_index_+= 2;
    355               } else {
    356                 CHECK_EQ(kBytesStackArgLocation, 8U);
    357                 stack_index_++;
    358               }
    359             }
    360             if (gpr_index_ < kNumQuickGprArgs) {
    361               gpr_index_++;
    362               if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
    363                 if (gpr_index_ < kNumQuickGprArgs) {
    364                   gpr_index_++;
    365                 } else if (kQuickSoftFloatAbi) {
    366                   stack_index_++;
    367                 }
    368               }
    369             }
    370           } else {
    371             is_split_long_or_double_ = (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) &&
    372                 ((fpr_index_ + 1) == kNumQuickFprArgs);
    373             Visit();
    374             if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
    375               fpr_index_++;
    376               if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
    377                 if ((kNumQuickFprArgs != 0) && (fpr_index_ + 1 < kNumQuickFprArgs + 1)) {
    378                   fpr_index_++;
    379                 }
    380               }
    381             }
    382             if (kBytesStackArgLocation == 4) {
    383               stack_index_+= 2;
    384             } else {
    385               CHECK_EQ(kBytesStackArgLocation, 8U);
    386               stack_index_++;
    387             }
    388           }
    389           break;
    390         default:
    391           LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty_;
    392       }
    393     }
    394   }
    395 
    396  private:
    397   static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty,
    398                                              uint32_t shorty_len) {
    399     if (kQuickSoftFloatAbi) {
    400       CHECK_EQ(kNumQuickFprArgs, 0U);
    401       return (kNumQuickGprArgs * GetBytesPerGprSpillLocation(kRuntimeISA))
    402           + sizeof(StackReference<mirror::ArtMethod>) /* StackReference<ArtMethod> */;
    403     } else {
    404       // For now, there is no reg-spill area for the targets with
    405       // hard float ABI. So, the offset pointing to the first method's
    406       // parameter ('this' for non-static methods) should be returned.
    407       return sizeof(StackReference<mirror::ArtMethod>);  // Skip StackReference<ArtMethod>.
    408     }
    409   }
    410 
    411  protected:
    412   const bool is_static_;
    413   const char* const shorty_;
    414   const uint32_t shorty_len_;
    415 
    416  private:
    417   byte* const gpr_args_;  // Address of GPR arguments in callee save frame.
    418   byte* const fpr_args_;  // Address of FPR arguments in callee save frame.
    419   byte* const stack_args_;  // Address of stack arguments in caller's frame.
    420   uint32_t gpr_index_;  // Index into spilled GPRs.
    421   uint32_t fpr_index_;  // Index into spilled FPRs.
    422   uint32_t stack_index_;  // Index into arguments on the stack.
    423   // The current type of argument during VisitArguments.
    424   Primitive::Type cur_type_;
    425   // Does a 64bit parameter straddle the register and stack arguments?
    426   bool is_split_long_or_double_;
    427 };
    428 
    429 // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
    430 // allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
    431 extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
    432     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    433   return QuickArgumentVisitor::GetProxyThisObject(sp);
    434 }
    435 
    436 // Visits arguments on the stack placing them into the shadow frame.
    437 class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
    438  public:
    439   BuildQuickShadowFrameVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
    440                                const char* shorty, uint32_t shorty_len, ShadowFrame* sf,
    441                                size_t first_arg_reg) :
    442       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
    443 
    444   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
    445 
    446  private:
    447   ShadowFrame* const sf_;
    448   uint32_t cur_reg_;
    449 
    450   DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
    451 };
    452 
    453 void BuildQuickShadowFrameVisitor::Visit() {
    454   Primitive::Type type = GetParamPrimitiveType();
    455   switch (type) {
    456     case Primitive::kPrimLong:  // Fall-through.
    457     case Primitive::kPrimDouble:
    458       if (IsSplitLongOrDouble()) {
    459         sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
    460       } else {
    461         sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
    462       }
    463       ++cur_reg_;
    464       break;
    465     case Primitive::kPrimNot: {
    466         StackReference<mirror::Object>* stack_ref =
    467             reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
    468         sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
    469       }
    470       break;
    471     case Primitive::kPrimBoolean:  // Fall-through.
    472     case Primitive::kPrimByte:     // Fall-through.
    473     case Primitive::kPrimChar:     // Fall-through.
    474     case Primitive::kPrimShort:    // Fall-through.
    475     case Primitive::kPrimInt:      // Fall-through.
    476     case Primitive::kPrimFloat:
    477       sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
    478       break;
    479     case Primitive::kPrimVoid:
    480       LOG(FATAL) << "UNREACHABLE";
    481       break;
    482   }
    483   ++cur_reg_;
    484 }
    485 
    486 extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
    487                                                 StackReference<mirror::ArtMethod>* sp)
    488     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    489   // Ensure we don't get thread suspension until the object arguments are safely in the shadow
    490   // frame.
    491   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
    492 
    493   if (method->IsAbstract()) {
    494     ThrowAbstractMethodError(method);
    495     return 0;
    496   } else {
    497     DCHECK(!method->IsNative()) << PrettyMethod(method);
    498     const char* old_cause = self->StartAssertNoThreadSuspension(
    499         "Building interpreter shadow frame");
    500     const DexFile::CodeItem* code_item = method->GetCodeItem();
    501     DCHECK(code_item != nullptr) << PrettyMethod(method);
    502     uint16_t num_regs = code_item->registers_size_;
    503     void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
    504     // No last shadow coming from quick.
    505     ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, nullptr, method, 0, memory));
    506     size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
    507     uint32_t shorty_len = 0;
    508     const char* shorty = method->GetShorty(&shorty_len);
    509     BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
    510                                                       shadow_frame, first_arg_reg);
    511     shadow_frame_builder.VisitArguments();
    512     // Push a transition back into managed code onto the linked list in thread.
    513     ManagedStack fragment;
    514     self->PushManagedStackFragment(&fragment);
    515     self->PushShadowFrame(shadow_frame);
    516     self->EndAssertNoThreadSuspension(old_cause);
    517 
    518     if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) {
    519       // Ensure static method's class is initialized.
    520       StackHandleScope<1> hs(self);
    521       Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
    522       if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
    523         DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
    524         self->PopManagedStackFragment(fragment);
    525         return 0;
    526       }
    527     }
    528 
    529     StackHandleScope<1> hs(self);
    530     MethodHelper mh(hs.NewHandle(method));
    531     JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
    532     // Pop transition.
    533     self->PopManagedStackFragment(fragment);
    534     // No need to restore the args since the method has already been run by the interpreter.
    535     return result.GetJ();
    536   }
    537 }
    538 
    539 // Visits arguments on the stack placing them into the args vector, Object* arguments are converted
    540 // to jobjects.
    541 class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
    542  public:
    543   BuildQuickArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
    544                             const char* shorty, uint32_t shorty_len,
    545                             ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
    546       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
    547 
    548   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
    549 
    550   void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    551 
    552  private:
    553   ScopedObjectAccessUnchecked* const soa_;
    554   std::vector<jvalue>* const args_;
    555   // References which we must update when exiting in case the GC moved the objects.
    556   std::vector<std::pair<jobject, StackReference<mirror::Object>*>> references_;
    557 
    558   DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
    559 };
    560 
    561 void BuildQuickArgumentVisitor::Visit() {
    562   jvalue val;
    563   Primitive::Type type = GetParamPrimitiveType();
    564   switch (type) {
    565     case Primitive::kPrimNot: {
    566       StackReference<mirror::Object>* stack_ref =
    567           reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
    568       val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
    569       references_.push_back(std::make_pair(val.l, stack_ref));
    570       break;
    571     }
    572     case Primitive::kPrimLong:  // Fall-through.
    573     case Primitive::kPrimDouble:
    574       if (IsSplitLongOrDouble()) {
    575         val.j = ReadSplitLongParam();
    576       } else {
    577         val.j = *reinterpret_cast<jlong*>(GetParamAddress());
    578       }
    579       break;
    580     case Primitive::kPrimBoolean:  // Fall-through.
    581     case Primitive::kPrimByte:     // Fall-through.
    582     case Primitive::kPrimChar:     // Fall-through.
    583     case Primitive::kPrimShort:    // Fall-through.
    584     case Primitive::kPrimInt:      // Fall-through.
    585     case Primitive::kPrimFloat:
    586       val.i = *reinterpret_cast<jint*>(GetParamAddress());
    587       break;
    588     case Primitive::kPrimVoid:
    589       LOG(FATAL) << "UNREACHABLE";
    590       val.j = 0;
    591       break;
    592   }
    593   args_->push_back(val);
    594 }
    595 
    596 void BuildQuickArgumentVisitor::FixupReferences() {
    597   // Fixup any references which may have changed.
    598   for (const auto& pair : references_) {
    599     pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
    600     soa_->Env()->DeleteLocalRef(pair.first);
    601   }
    602 }
    603 
    604 // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
    605 // which is responsible for recording callee save registers. We explicitly place into jobjects the
    606 // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
    607 // field within the proxy object, which will box the primitive arguments and deal with error cases.
    608 extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
    609                                                mirror::Object* receiver,
    610                                                Thread* self, StackReference<mirror::ArtMethod>* sp)
    611     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    612   DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
    613   DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
    614   // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
    615   const char* old_cause =
    616       self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
    617   // Register the top of the managed stack, making stack crawlable.
    618   DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
    619   self->SetTopOfStack(sp, 0);
    620   DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
    621             Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
    622       << PrettyMethod(proxy_method);
    623   self->VerifyStack();
    624   // Start new JNI local reference state.
    625   JNIEnvExt* env = self->GetJniEnv();
    626   ScopedObjectAccessUnchecked soa(env);
    627   ScopedJniEnvLocalRefState env_state(env);
    628   // Create local ref. copies of proxy method and the receiver.
    629   jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
    630 
    631   // Placing arguments into args vector and remove the receiver.
    632   mirror::ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy();
    633   CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
    634                                        << PrettyMethod(non_proxy_method);
    635   std::vector<jvalue> args;
    636   uint32_t shorty_len = 0;
    637   const char* shorty = proxy_method->GetShorty(&shorty_len);
    638   BuildQuickArgumentVisitor local_ref_visitor(sp, false, shorty, shorty_len, &soa, &args);
    639 
    640   local_ref_visitor.VisitArguments();
    641   DCHECK_GT(args.size(), 0U) << PrettyMethod(proxy_method);
    642   args.erase(args.begin());
    643 
    644   // Convert proxy method into expected interface method.
    645   mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
    646   DCHECK(interface_method != NULL) << PrettyMethod(proxy_method);
    647   DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
    648   jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
    649 
    650   // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
    651   // that performs allocations.
    652   self->EndAssertNoThreadSuspension(old_cause);
    653   JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
    654   // Restore references which might have moved.
    655   local_ref_visitor.FixupReferences();
    656   return result.GetJ();
    657 }
    658 
    659 // Read object references held in arguments from quick frames and place in a JNI local references,
    660 // so they don't get garbage collected.
    661 class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
    662  public:
    663   RememberForGcArgumentVisitor(StackReference<mirror::ArtMethod>* sp, bool is_static,
    664                                const char* shorty, uint32_t shorty_len,
    665                                ScopedObjectAccessUnchecked* soa) :
    666       QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
    667 
    668   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
    669 
    670   void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    671 
    672  private:
    673   ScopedObjectAccessUnchecked* const soa_;
    674   // References which we must update when exiting in case the GC moved the objects.
    675   std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
    676 
    677   DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
    678 };
    679 
    680 void RememberForGcArgumentVisitor::Visit() {
    681   if (IsParamAReference()) {
    682     StackReference<mirror::Object>* stack_ref =
    683         reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
    684     jobject reference =
    685         soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
    686     references_.push_back(std::make_pair(reference, stack_ref));
    687   }
    688 }
    689 
    690 void RememberForGcArgumentVisitor::FixupReferences() {
    691   // Fixup any references which may have changed.
    692   for (const auto& pair : references_) {
    693     pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
    694     soa_->Env()->DeleteLocalRef(pair.first);
    695   }
    696 }
    697 
    698 // Lazily resolve a method for quick. Called by stub code.
    699 extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
    700                                                     mirror::Object* receiver,
    701                                                     Thread* self,
    702                                                     StackReference<mirror::ArtMethod>* sp)
    703     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    704   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
    705   // Start new JNI local reference state
    706   JNIEnvExt* env = self->GetJniEnv();
    707   ScopedObjectAccessUnchecked soa(env);
    708   ScopedJniEnvLocalRefState env_state(env);
    709   const char* old_cause = self->StartAssertNoThreadSuspension("Quick method resolution set up");
    710 
    711   // Compute details about the called method (avoid GCs)
    712   ClassLinker* linker = Runtime::Current()->GetClassLinker();
    713   mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
    714   InvokeType invoke_type;
    715   const DexFile* dex_file;
    716   uint32_t dex_method_idx;
    717   if (called->IsRuntimeMethod()) {
    718     uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
    719     const DexFile::CodeItem* code;
    720     dex_file = caller->GetDexFile();
    721     code = caller->GetCodeItem();
    722     CHECK_LT(dex_pc, code->insns_size_in_code_units_);
    723     const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
    724     Instruction::Code instr_code = instr->Opcode();
    725     bool is_range;
    726     switch (instr_code) {
    727       case Instruction::INVOKE_DIRECT:
    728         invoke_type = kDirect;
    729         is_range = false;
    730         break;
    731       case Instruction::INVOKE_DIRECT_RANGE:
    732         invoke_type = kDirect;
    733         is_range = true;
    734         break;
    735       case Instruction::INVOKE_STATIC:
    736         invoke_type = kStatic;
    737         is_range = false;
    738         break;
    739       case Instruction::INVOKE_STATIC_RANGE:
    740         invoke_type = kStatic;
    741         is_range = true;
    742         break;
    743       case Instruction::INVOKE_SUPER:
    744         invoke_type = kSuper;
    745         is_range = false;
    746         break;
    747       case Instruction::INVOKE_SUPER_RANGE:
    748         invoke_type = kSuper;
    749         is_range = true;
    750         break;
    751       case Instruction::INVOKE_VIRTUAL:
    752         invoke_type = kVirtual;
    753         is_range = false;
    754         break;
    755       case Instruction::INVOKE_VIRTUAL_RANGE:
    756         invoke_type = kVirtual;
    757         is_range = true;
    758         break;
    759       case Instruction::INVOKE_INTERFACE:
    760         invoke_type = kInterface;
    761         is_range = false;
    762         break;
    763       case Instruction::INVOKE_INTERFACE_RANGE:
    764         invoke_type = kInterface;
    765         is_range = true;
    766         break;
    767       default:
    768         LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
    769         // Avoid used uninitialized warnings.
    770         invoke_type = kDirect;
    771         is_range = false;
    772     }
    773     dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
    774   } else {
    775     invoke_type = kStatic;
    776     dex_file = called->GetDexFile();
    777     dex_method_idx = called->GetDexMethodIndex();
    778   }
    779   uint32_t shorty_len;
    780   const char* shorty =
    781       dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
    782   RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
    783   visitor.VisitArguments();
    784   self->EndAssertNoThreadSuspension(old_cause);
    785   bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
    786   // Resolve method filling in dex cache.
    787   if (UNLIKELY(called->IsRuntimeMethod())) {
    788     StackHandleScope<1> hs(self);
    789     mirror::Object* dummy = nullptr;
    790     HandleWrapper<mirror::Object> h_receiver(
    791         hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
    792     called = linker->ResolveMethod(self, dex_method_idx, &caller, invoke_type);
    793   }
    794   const void* code = NULL;
    795   if (LIKELY(!self->IsExceptionPending())) {
    796     // Incompatible class change should have been handled in resolve method.
    797     CHECK(!called->CheckIncompatibleClassChange(invoke_type))
    798         << PrettyMethod(called) << " " << invoke_type;
    799     if (virtual_or_interface) {
    800       // Refine called method based on receiver.
    801       CHECK(receiver != nullptr) << invoke_type;
    802 
    803       mirror::ArtMethod* orig_called = called;
    804       if (invoke_type == kVirtual) {
    805         called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
    806       } else {
    807         called = receiver->GetClass()->FindVirtualMethodForInterface(called);
    808       }
    809 
    810       CHECK(called != nullptr) << PrettyMethod(orig_called) << " "
    811                                << PrettyTypeOf(receiver) << " "
    812                                << invoke_type << " " << orig_called->GetVtableIndex();
    813 
    814       // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
    815       // of the sharpened method.
    816       if (called->HasSameDexCacheResolvedMethods(caller)) {
    817         caller->SetDexCacheResolvedMethod(called->GetDexMethodIndex(), called);
    818       } else {
    819         // Calling from one dex file to another, need to compute the method index appropriate to
    820         // the caller's dex file. Since we get here only if the original called was a runtime
    821         // method, we've got the correct dex_file and a dex_method_idx from above.
    822         DCHECK_EQ(caller->GetDexFile(), dex_file);
    823         StackHandleScope<1> hs(self);
    824         MethodHelper mh(hs.NewHandle(called));
    825         uint32_t method_index = mh.FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx);
    826         if (method_index != DexFile::kDexNoIndex) {
    827           caller->SetDexCacheResolvedMethod(method_index, called);
    828         }
    829       }
    830     } else if (invoke_type == kStatic) {
    831       const auto called_dex_method_idx = called->GetDexMethodIndex();
    832       // For static invokes, we may dispatch to the static method in the superclass but resolve
    833       // using the subclass. To prevent getting slow paths on each invoke, we force set the
    834       // resolved method for the super class dex method index if we are in the same dex file.
    835       // b/19175856
    836       if (called->GetDexFile() == dex_file && dex_method_idx != called_dex_method_idx) {
    837         called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called);
    838       }
    839     }
    840     // Ensure that the called method's class is initialized.
    841     StackHandleScope<1> hs(soa.Self());
    842     Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
    843     linker->EnsureInitialized(called_class, true, true);
    844     if (LIKELY(called_class->IsInitialized())) {
    845       code = called->GetEntryPointFromQuickCompiledCode();
    846     } else if (called_class->IsInitializing()) {
    847       if (invoke_type == kStatic) {
    848         // Class is still initializing, go to oat and grab code (trampoline must be left in place
    849         // until class is initialized to stop races between threads).
    850         code = linker->GetQuickOatCodeFor(called);
    851       } else {
    852         // No trampoline for non-static methods.
    853         code = called->GetEntryPointFromQuickCompiledCode();
    854       }
    855     } else {
    856       DCHECK(called_class->IsErroneous());
    857     }
    858   }
    859   CHECK_EQ(code == NULL, self->IsExceptionPending());
    860   // Fixup any locally saved objects may have moved during a GC.
    861   visitor.FixupReferences();
    862   // Place called method in callee-save frame to be placed as first argument to quick method.
    863   sp->Assign(called);
    864   return code;
    865 }
    866 
    867 /*
    868  * This class uses a couple of observations to unite the different calling conventions through
    869  * a few constants.
    870  *
    871  * 1) Number of registers used for passing is normally even, so counting down has no penalty for
    872  *    possible alignment.
    873  * 2) Known 64b architectures store 8B units on the stack, both for integral and floating point
    874  *    types, so using uintptr_t is OK. Also means that we can use kRegistersNeededX to denote
    875  *    when we have to split things
    876  * 3) The only soft-float, Arm, is 32b, so no widening needs to be taken into account for floats
    877  *    and we can use Int handling directly.
    878  * 4) Only 64b architectures widen, and their stack is aligned 8B anyways, so no padding code
    879  *    necessary when widening. Also, widening of Ints will take place implicitly, and the
    880  *    extension should be compatible with Aarch64, which mandates copying the available bits
    881  *    into LSB and leaving the rest unspecified.
    882  * 5) Aligning longs and doubles is necessary on arm only, and it's the same in registers and on
    883  *    the stack.
    884  * 6) There is only little endian.
    885  *
    886  *
    887  * Actual work is supposed to be done in a delegate of the template type. The interface is as
    888  * follows:
    889  *
    890  * void PushGpr(uintptr_t):   Add a value for the next GPR
    891  *
    892  * void PushFpr4(float):      Add a value for the next FPR of size 32b. Is only called if we need
    893  *                            padding, that is, think the architecture is 32b and aligns 64b.
    894  *
    895  * void PushFpr8(uint64_t):   Push a double. We _will_ call this on 32b, it's the callee's job to
    896  *                            split this if necessary. The current state will have aligned, if
    897  *                            necessary.
    898  *
    899  * void PushStack(uintptr_t): Push a value to the stack.
    900  *
    901  * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
    902  *                                          as this might be important for null initialization.
    903  *                                          Must return the jobject, that is, the reference to the
    904  *                                          entry in the HandleScope (nullptr if necessary).
    905  *
    906  */
    907 template<class T> class BuildNativeCallFrameStateMachine {
    908  public:
    909 #if defined(__arm__)
    910   // TODO: These are all dummy values!
    911   static constexpr bool kNativeSoftFloatAbi = true;
    912   static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs, r0-r3
    913   static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
    914 
    915   static constexpr size_t kRegistersNeededForLong = 2;
    916   static constexpr size_t kRegistersNeededForDouble = 2;
    917   static constexpr bool kMultiRegistersAligned = true;
    918   static constexpr bool kMultiRegistersWidened = false;
    919   static constexpr bool kAlignLongOnStack = true;
    920   static constexpr bool kAlignDoubleOnStack = true;
    921 #elif defined(__aarch64__)
    922   static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
    923   static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
    924   static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
    925 
    926   static constexpr size_t kRegistersNeededForLong = 1;
    927   static constexpr size_t kRegistersNeededForDouble = 1;
    928   static constexpr bool kMultiRegistersAligned = false;
    929   static constexpr bool kMultiRegistersWidened = false;
    930   static constexpr bool kAlignLongOnStack = false;
    931   static constexpr bool kAlignDoubleOnStack = false;
    932 #elif defined(__mips__)
    933   // TODO: These are all dummy values!
    934   static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
    935   static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
    936   static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
    937 
    938   static constexpr size_t kRegistersNeededForLong = 2;
    939   static constexpr size_t kRegistersNeededForDouble = 2;
    940   static constexpr bool kMultiRegistersAligned = true;
    941   static constexpr bool kMultiRegistersWidened = true;
    942   static constexpr bool kAlignLongOnStack = false;
    943   static constexpr bool kAlignDoubleOnStack = false;
    944 #elif defined(__i386__)
    945   // TODO: Check these!
    946   static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
    947   static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
    948   static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
    949 
    950   static constexpr size_t kRegistersNeededForLong = 2;
    951   static constexpr size_t kRegistersNeededForDouble = 2;
    952   static constexpr bool kMultiRegistersAligned = false;  // x86 not using regs, anyways
    953   static constexpr bool kMultiRegistersWidened = false;
    954   static constexpr bool kAlignLongOnStack = false;
    955   static constexpr bool kAlignDoubleOnStack = false;
    956 #elif defined(__x86_64__)
    957   static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
    958   static constexpr size_t kNumNativeGprArgs = 6;  // 6 arguments passed in GPRs.
    959   static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
    960 
    961   static constexpr size_t kRegistersNeededForLong = 1;
    962   static constexpr size_t kRegistersNeededForDouble = 1;
    963   static constexpr bool kMultiRegistersAligned = false;
    964   static constexpr bool kMultiRegistersWidened = false;
    965   static constexpr bool kAlignLongOnStack = false;
    966   static constexpr bool kAlignDoubleOnStack = false;
    967 #else
    968 #error "Unsupported architecture"
    969 #endif
    970 
    971  public:
    972   explicit BuildNativeCallFrameStateMachine(T* delegate)
    973       : gpr_index_(kNumNativeGprArgs),
    974         fpr_index_(kNumNativeFprArgs),
    975         stack_entries_(0),
    976         delegate_(delegate) {
    977     // For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
    978     // the next register is even; counting down is just to make the compiler happy...
    979     CHECK_EQ(kNumNativeGprArgs % 2, 0U);
    980     CHECK_EQ(kNumNativeFprArgs % 2, 0U);
    981   }
    982 
    983   virtual ~BuildNativeCallFrameStateMachine() {}
    984 
    985   bool HavePointerGpr() {
    986     return gpr_index_ > 0;
    987   }
    988 
    989   void AdvancePointer(const void* val) {
    990     if (HavePointerGpr()) {
    991       gpr_index_--;
    992       PushGpr(reinterpret_cast<uintptr_t>(val));
    993     } else {
    994       stack_entries_++;  // TODO: have a field for pointer length as multiple of 32b
    995       PushStack(reinterpret_cast<uintptr_t>(val));
    996       gpr_index_ = 0;
    997     }
    998   }
    999 
   1000   bool HaveHandleScopeGpr() {
   1001     return gpr_index_ > 0;
   1002   }
   1003 
   1004   void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1005     uintptr_t handle = PushHandle(ptr);
   1006     if (HaveHandleScopeGpr()) {
   1007       gpr_index_--;
   1008       PushGpr(handle);
   1009     } else {
   1010       stack_entries_++;
   1011       PushStack(handle);
   1012       gpr_index_ = 0;
   1013     }
   1014   }
   1015 
   1016   bool HaveIntGpr() {
   1017     return gpr_index_ > 0;
   1018   }
   1019 
   1020   void AdvanceInt(uint32_t val) {
   1021     if (HaveIntGpr()) {
   1022       gpr_index_--;
   1023       PushGpr(val);
   1024     } else {
   1025       stack_entries_++;
   1026       PushStack(val);
   1027       gpr_index_ = 0;
   1028     }
   1029   }
   1030 
   1031   bool HaveLongGpr() {
   1032     return gpr_index_ >= kRegistersNeededForLong + (LongGprNeedsPadding() ? 1 : 0);
   1033   }
   1034 
   1035   bool LongGprNeedsPadding() {
   1036     return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
   1037         kAlignLongOnStack &&                  // and when it needs alignment
   1038         (gpr_index_ & 1) == 1;                // counter is odd, see constructor
   1039   }
   1040 
   1041   bool LongStackNeedsPadding() {
   1042     return kRegistersNeededForLong > 1 &&     // only pad when using multiple registers
   1043         kAlignLongOnStack &&                  // and when it needs 8B alignment
   1044         (stack_entries_ & 1) == 1;            // counter is odd
   1045   }
   1046 
   1047   void AdvanceLong(uint64_t val) {
   1048     if (HaveLongGpr()) {
   1049       if (LongGprNeedsPadding()) {
   1050         PushGpr(0);
   1051         gpr_index_--;
   1052       }
   1053       if (kRegistersNeededForLong == 1) {
   1054         PushGpr(static_cast<uintptr_t>(val));
   1055       } else {
   1056         PushGpr(static_cast<uintptr_t>(val & 0xFFFFFFFF));
   1057         PushGpr(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
   1058       }
   1059       gpr_index_ -= kRegistersNeededForLong;
   1060     } else {
   1061       if (LongStackNeedsPadding()) {
   1062         PushStack(0);
   1063         stack_entries_++;
   1064       }
   1065       if (kRegistersNeededForLong == 1) {
   1066         PushStack(static_cast<uintptr_t>(val));
   1067         stack_entries_++;
   1068       } else {
   1069         PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
   1070         PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
   1071         stack_entries_ += 2;
   1072       }
   1073       gpr_index_ = 0;
   1074     }
   1075   }
   1076 
   1077   bool HaveFloatFpr() {
   1078     return fpr_index_ > 0;
   1079   }
   1080 
   1081   void AdvanceFloat(float val) {
   1082     if (kNativeSoftFloatAbi) {
   1083       AdvanceInt(bit_cast<float, uint32_t>(val));
   1084     } else {
   1085       if (HaveFloatFpr()) {
   1086         fpr_index_--;
   1087         if (kRegistersNeededForDouble == 1) {
   1088           if (kMultiRegistersWidened) {
   1089             PushFpr8(bit_cast<double, uint64_t>(val));
   1090           } else {
   1091             // No widening, just use the bits.
   1092             PushFpr8(bit_cast<float, uint64_t>(val));
   1093           }
   1094         } else {
   1095           PushFpr4(val);
   1096         }
   1097       } else {
   1098         stack_entries_++;
   1099         if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) {
   1100           // Need to widen before storing: Note the "double" in the template instantiation.
   1101           // Note: We need to jump through those hoops to make the compiler happy.
   1102           DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));
   1103           PushStack(static_cast<uintptr_t>(bit_cast<double, uint64_t>(val)));
   1104         } else {
   1105           PushStack(bit_cast<float, uintptr_t>(val));
   1106         }
   1107         fpr_index_ = 0;
   1108       }
   1109     }
   1110   }
   1111 
   1112   bool HaveDoubleFpr() {
   1113     return fpr_index_ >= kRegistersNeededForDouble + (DoubleFprNeedsPadding() ? 1 : 0);
   1114   }
   1115 
   1116   bool DoubleFprNeedsPadding() {
   1117     return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
   1118         kAlignDoubleOnStack &&                  // and when it needs alignment
   1119         (fpr_index_ & 1) == 1;                  // counter is odd, see constructor
   1120   }
   1121 
   1122   bool DoubleStackNeedsPadding() {
   1123     return kRegistersNeededForDouble > 1 &&     // only pad when using multiple registers
   1124         kAlignDoubleOnStack &&                  // and when it needs 8B alignment
   1125         (stack_entries_ & 1) == 1;              // counter is odd
   1126   }
   1127 
   1128   void AdvanceDouble(uint64_t val) {
   1129     if (kNativeSoftFloatAbi) {
   1130       AdvanceLong(val);
   1131     } else {
   1132       if (HaveDoubleFpr()) {
   1133         if (DoubleFprNeedsPadding()) {
   1134           PushFpr4(0);
   1135           fpr_index_--;
   1136         }
   1137         PushFpr8(val);
   1138         fpr_index_ -= kRegistersNeededForDouble;
   1139       } else {
   1140         if (DoubleStackNeedsPadding()) {
   1141           PushStack(0);
   1142           stack_entries_++;
   1143         }
   1144         if (kRegistersNeededForDouble == 1) {
   1145           PushStack(static_cast<uintptr_t>(val));
   1146           stack_entries_++;
   1147         } else {
   1148           PushStack(static_cast<uintptr_t>(val & 0xFFFFFFFF));
   1149           PushStack(static_cast<uintptr_t>((val >> 32) & 0xFFFFFFFF));
   1150           stack_entries_ += 2;
   1151         }
   1152         fpr_index_ = 0;
   1153       }
   1154     }
   1155   }
   1156 
   1157   uint32_t getStackEntries() {
   1158     return stack_entries_;
   1159   }
   1160 
   1161   uint32_t getNumberOfUsedGprs() {
   1162     return kNumNativeGprArgs - gpr_index_;
   1163   }
   1164 
   1165   uint32_t getNumberOfUsedFprs() {
   1166     return kNumNativeFprArgs - fpr_index_;
   1167   }
   1168 
   1169  private:
   1170   void PushGpr(uintptr_t val) {
   1171     delegate_->PushGpr(val);
   1172   }
   1173   void PushFpr4(float val) {
   1174     delegate_->PushFpr4(val);
   1175   }
   1176   void PushFpr8(uint64_t val) {
   1177     delegate_->PushFpr8(val);
   1178   }
   1179   void PushStack(uintptr_t val) {
   1180     delegate_->PushStack(val);
   1181   }
   1182   uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1183     return delegate_->PushHandle(ref);
   1184   }
   1185 
   1186   uint32_t gpr_index_;      // Number of free GPRs
   1187   uint32_t fpr_index_;      // Number of free FPRs
   1188   uint32_t stack_entries_;  // Stack entries are in multiples of 32b, as floats are usually not
   1189                             // extended
   1190   T* delegate_;             // What Push implementation gets called
   1191 };
   1192 
   1193 // Computes the sizes of register stacks and call stack area. Handling of references can be extended
   1194 // in subclasses.
   1195 //
   1196 // To handle native pointers, use "L" in the shorty for an object reference, which simulates
   1197 // them with handles.
   1198 class ComputeNativeCallFrameSize {
   1199  public:
   1200   ComputeNativeCallFrameSize() : num_stack_entries_(0) {}
   1201 
   1202   virtual ~ComputeNativeCallFrameSize() {}
   1203 
   1204   uint32_t GetStackSize() {
   1205     return num_stack_entries_ * sizeof(uintptr_t);
   1206   }
   1207 
   1208   uint8_t* LayoutCallStack(uint8_t* sp8) {
   1209     sp8 -= GetStackSize();
   1210     // Align by kStackAlignment.
   1211     sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
   1212     return sp8;
   1213   }
   1214 
   1215   uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr) {
   1216     // Assumption is OK right now, as we have soft-float arm
   1217     size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
   1218     sp8 -= fregs * sizeof(uintptr_t);
   1219     *start_fpr = reinterpret_cast<uint32_t*>(sp8);
   1220     size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
   1221     sp8 -= iregs * sizeof(uintptr_t);
   1222     *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
   1223     return sp8;
   1224   }
   1225 
   1226   uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr,
   1227                             uint32_t** start_fpr) {
   1228     // Native call stack.
   1229     sp8 = LayoutCallStack(sp8);
   1230     *start_stack = reinterpret_cast<uintptr_t*>(sp8);
   1231 
   1232     // Put fprs and gprs below.
   1233     sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr);
   1234 
   1235     // Return the new bottom.
   1236     return sp8;
   1237   }
   1238 
   1239   virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
   1240       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {}
   1241 
   1242   void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1243     BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
   1244 
   1245     WalkHeader(&sm);
   1246 
   1247     for (uint32_t i = 1; i < shorty_len; ++i) {
   1248       Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
   1249       switch (cur_type_) {
   1250         case Primitive::kPrimNot:
   1251           sm.AdvanceHandleScope(
   1252               reinterpret_cast<mirror::Object*>(0x12345678));
   1253           break;
   1254 
   1255         case Primitive::kPrimBoolean:
   1256         case Primitive::kPrimByte:
   1257         case Primitive::kPrimChar:
   1258         case Primitive::kPrimShort:
   1259         case Primitive::kPrimInt:
   1260           sm.AdvanceInt(0);
   1261           break;
   1262         case Primitive::kPrimFloat:
   1263           sm.AdvanceFloat(0);
   1264           break;
   1265         case Primitive::kPrimDouble:
   1266           sm.AdvanceDouble(0);
   1267           break;
   1268         case Primitive::kPrimLong:
   1269           sm.AdvanceLong(0);
   1270           break;
   1271         default:
   1272           LOG(FATAL) << "Unexpected type: " << cur_type_ << " in " << shorty;
   1273       }
   1274     }
   1275 
   1276     num_stack_entries_ = sm.getStackEntries();
   1277   }
   1278 
   1279   void PushGpr(uintptr_t /* val */) {
   1280     // not optimizing registers, yet
   1281   }
   1282 
   1283   void PushFpr4(float /* val */) {
   1284     // not optimizing registers, yet
   1285   }
   1286 
   1287   void PushFpr8(uint64_t /* val */) {
   1288     // not optimizing registers, yet
   1289   }
   1290 
   1291   void PushStack(uintptr_t /* val */) {
   1292     // counting is already done in the superclass
   1293   }
   1294 
   1295   virtual uintptr_t PushHandle(mirror::Object* /* ptr */) {
   1296     return reinterpret_cast<uintptr_t>(nullptr);
   1297   }
   1298 
   1299  protected:
   1300   uint32_t num_stack_entries_;
   1301 };
   1302 
   1303 class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
   1304  public:
   1305   ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {}
   1306 
   1307   // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
   1308   // is at *m = sp. Will update to point to the bottom of the save frame.
   1309   //
   1310   // Note: assumes ComputeAll() has been run before.
   1311   void LayoutCalleeSaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table,
   1312                              uint32_t* handle_scope_entries)
   1313       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1314     mirror::ArtMethod* method = (*m)->AsMirrorPtr();
   1315 
   1316     uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
   1317 
   1318     // First, fix up the layout of the callee-save frame.
   1319     // We have to squeeze in the HandleScope, and relocate the method pointer.
   1320 
   1321     // "Free" the slot for the method.
   1322     sp8 += kPointerSize;  // In the callee-save frame we use a full pointer.
   1323 
   1324     // Under the callee saves put handle scope and new method stack reference.
   1325     *handle_scope_entries = num_handle_scope_references_;
   1326 
   1327     size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
   1328     size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
   1329 
   1330     sp8 -= scope_and_method;
   1331     // Align by kStackAlignment.
   1332     sp8 = reinterpret_cast<uint8_t*>(RoundDown(
   1333         reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
   1334 
   1335     uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
   1336     *table = reinterpret_cast<HandleScope*>(sp8_table);
   1337     (*table)->SetNumberOfReferences(num_handle_scope_references_);
   1338 
   1339     // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
   1340     uint8_t* method_pointer = sp8;
   1341     StackReference<mirror::ArtMethod>* new_method_ref =
   1342         reinterpret_cast<StackReference<mirror::ArtMethod>*>(method_pointer);
   1343     new_method_ref->Assign(method);
   1344     *m = new_method_ref;
   1345   }
   1346 
   1347   // Adds space for the cookie. Note: may leave stack unaligned.
   1348   void LayoutCookie(uint8_t** sp) {
   1349     // Reference cookie and padding
   1350     *sp -= 8;
   1351   }
   1352 
   1353   // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
   1354   // Returns the new bottom. Note: this may be unaligned.
   1355   uint8_t* LayoutJNISaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table,
   1356                               uint32_t* handle_scope_entries)
   1357       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1358     // First, fix up the layout of the callee-save frame.
   1359     // We have to squeeze in the HandleScope, and relocate the method pointer.
   1360     LayoutCalleeSaveFrame(m, sp, table, handle_scope_entries);
   1361 
   1362     // The bottom of the callee-save frame is now where the method is, *m.
   1363     uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m);
   1364 
   1365     // Add space for cookie.
   1366     LayoutCookie(&sp8);
   1367 
   1368     return sp8;
   1369   }
   1370 
   1371   // WARNING: After this, *sp won't be pointing to the method anymore!
   1372   uint8_t* ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty,
   1373                          uint32_t shorty_len, HandleScope** table, uint32_t* handle_scope_entries,
   1374                          uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
   1375       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1376     Walk(shorty, shorty_len);
   1377 
   1378     // JNI part.
   1379     uint8_t* sp8 = LayoutJNISaveFrame(m, reinterpret_cast<void*>(*m), table, handle_scope_entries);
   1380 
   1381     sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr);
   1382 
   1383     // Return the new bottom.
   1384     return sp8;
   1385   }
   1386 
   1387   uintptr_t PushHandle(mirror::Object* /* ptr */) OVERRIDE;
   1388 
   1389   // Add JNIEnv* and jobj/jclass before the shorty-derived elements.
   1390   void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
   1391       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   1392 
   1393  private:
   1394   uint32_t num_handle_scope_references_;
   1395 };
   1396 
   1397 uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
   1398   num_handle_scope_references_++;
   1399   return reinterpret_cast<uintptr_t>(nullptr);
   1400 }
   1401 
   1402 void ComputeGenericJniFrameSize::WalkHeader(
   1403     BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
   1404   // JNIEnv
   1405   sm->AdvancePointer(nullptr);
   1406 
   1407   // Class object or this as first argument
   1408   sm->AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
   1409 }
   1410 
   1411 // Class to push values to three separate regions. Used to fill the native call part. Adheres to
   1412 // the template requirements of BuildGenericJniFrameStateMachine.
   1413 class FillNativeCall {
   1414  public:
   1415   FillNativeCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) :
   1416       cur_gpr_reg_(gpr_regs), cur_fpr_reg_(fpr_regs), cur_stack_arg_(stack_args) {}
   1417 
   1418   virtual ~FillNativeCall() {}
   1419 
   1420   void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args) {
   1421     cur_gpr_reg_ = gpr_regs;
   1422     cur_fpr_reg_ = fpr_regs;
   1423     cur_stack_arg_ = stack_args;
   1424   }
   1425 
   1426   void PushGpr(uintptr_t val) {
   1427     *cur_gpr_reg_ = val;
   1428     cur_gpr_reg_++;
   1429   }
   1430 
   1431   void PushFpr4(float val) {
   1432     *cur_fpr_reg_ = val;
   1433     cur_fpr_reg_++;
   1434   }
   1435 
   1436   void PushFpr8(uint64_t val) {
   1437     uint64_t* tmp = reinterpret_cast<uint64_t*>(cur_fpr_reg_);
   1438     *tmp = val;
   1439     cur_fpr_reg_ += 2;
   1440   }
   1441 
   1442   void PushStack(uintptr_t val) {
   1443     *cur_stack_arg_ = val;
   1444     cur_stack_arg_++;
   1445   }
   1446 
   1447   virtual uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1448     LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
   1449     return 0U;
   1450   }
   1451 
   1452  private:
   1453   uintptr_t* cur_gpr_reg_;
   1454   uint32_t* cur_fpr_reg_;
   1455   uintptr_t* cur_stack_arg_;
   1456 };
   1457 
   1458 // Visits arguments on the stack placing them into a region lower down the stack for the benefit
   1459 // of transitioning into native code.
   1460 class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
   1461  public:
   1462   BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static,
   1463                               const char* shorty, uint32_t shorty_len, Thread* self)
   1464      : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
   1465        jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) {
   1466     ComputeGenericJniFrameSize fsc;
   1467     uintptr_t* start_gpr_reg;
   1468     uint32_t* start_fpr_reg;
   1469     uintptr_t* start_stack_arg;
   1470     uint32_t handle_scope_entries;
   1471     bottom_of_used_area_ = fsc.ComputeLayout(sp, is_static, shorty, shorty_len, &handle_scope_,
   1472                                              &handle_scope_entries, &start_stack_arg,
   1473                                              &start_gpr_reg, &start_fpr_reg);
   1474 
   1475     handle_scope_->SetNumberOfReferences(handle_scope_entries);
   1476     jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
   1477 
   1478     // jni environment is always first argument
   1479     sm_.AdvancePointer(self->GetJniEnv());
   1480 
   1481     if (is_static) {
   1482       sm_.AdvanceHandleScope((*sp)->AsMirrorPtr()->GetDeclaringClass());
   1483     }
   1484   }
   1485 
   1486   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
   1487 
   1488   void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   1489 
   1490   StackReference<mirror::Object>* GetFirstHandleScopeEntry()
   1491       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1492     return handle_scope_->GetHandle(0).GetReference();
   1493   }
   1494 
   1495   jobject GetFirstHandleScopeJObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1496     return handle_scope_->GetHandle(0).ToJObject();
   1497   }
   1498 
   1499   void* GetBottomOfUsedArea() {
   1500     return bottom_of_used_area_;
   1501   }
   1502 
   1503  private:
   1504   // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
   1505   class FillJniCall FINAL : public FillNativeCall {
   1506    public:
   1507     FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
   1508                 HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
   1509                                              handle_scope_(handle_scope), cur_entry_(0) {}
   1510 
   1511     uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   1512 
   1513     void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
   1514       FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
   1515       handle_scope_ = scope;
   1516       cur_entry_ = 0U;
   1517     }
   1518 
   1519     void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1520       // Initialize padding entries.
   1521       size_t expected_slots = handle_scope_->NumberOfReferences();
   1522       while (cur_entry_ < expected_slots) {
   1523         handle_scope_->GetHandle(cur_entry_++).Assign(nullptr);
   1524       }
   1525       DCHECK_NE(cur_entry_, 0U);
   1526     }
   1527 
   1528    private:
   1529     HandleScope* handle_scope_;
   1530     size_t cur_entry_;
   1531   };
   1532 
   1533   HandleScope* handle_scope_;
   1534   FillJniCall jni_call_;
   1535   void* bottom_of_used_area_;
   1536 
   1537   BuildNativeCallFrameStateMachine<FillJniCall> sm_;
   1538 
   1539   DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
   1540 };
   1541 
   1542 uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
   1543   uintptr_t tmp;
   1544   Handle<mirror::Object> h = handle_scope_->GetHandle(cur_entry_);
   1545   h.Assign(ref);
   1546   tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
   1547   cur_entry_++;
   1548   return tmp;
   1549 }
   1550 
   1551 void BuildGenericJniFrameVisitor::Visit() {
   1552   Primitive::Type type = GetParamPrimitiveType();
   1553   switch (type) {
   1554     case Primitive::kPrimLong: {
   1555       jlong long_arg;
   1556       if (IsSplitLongOrDouble()) {
   1557         long_arg = ReadSplitLongParam();
   1558       } else {
   1559         long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
   1560       }
   1561       sm_.AdvanceLong(long_arg);
   1562       break;
   1563     }
   1564     case Primitive::kPrimDouble: {
   1565       uint64_t double_arg;
   1566       if (IsSplitLongOrDouble()) {
   1567         // Read into union so that we don't case to a double.
   1568         double_arg = ReadSplitLongParam();
   1569       } else {
   1570         double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
   1571       }
   1572       sm_.AdvanceDouble(double_arg);
   1573       break;
   1574     }
   1575     case Primitive::kPrimNot: {
   1576       StackReference<mirror::Object>* stack_ref =
   1577           reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
   1578       sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
   1579       break;
   1580     }
   1581     case Primitive::kPrimFloat:
   1582       sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
   1583       break;
   1584     case Primitive::kPrimBoolean:  // Fall-through.
   1585     case Primitive::kPrimByte:     // Fall-through.
   1586     case Primitive::kPrimChar:     // Fall-through.
   1587     case Primitive::kPrimShort:    // Fall-through.
   1588     case Primitive::kPrimInt:      // Fall-through.
   1589       sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
   1590       break;
   1591     case Primitive::kPrimVoid:
   1592       LOG(FATAL) << "UNREACHABLE";
   1593       break;
   1594   }
   1595 }
   1596 
   1597 void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
   1598   // Clear out rest of the scope.
   1599   jni_call_.ResetRemainingScopeSlots();
   1600   // Install HandleScope.
   1601   self->PushHandleScope(handle_scope_);
   1602 }
   1603 
   1604 #if defined(__arm__) || defined(__aarch64__)
   1605 extern "C" void* artFindNativeMethod();
   1606 #else
   1607 extern "C" void* artFindNativeMethod(Thread* self);
   1608 #endif
   1609 
   1610 uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
   1611   if (lock != nullptr) {
   1612     return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
   1613   } else {
   1614     return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
   1615   }
   1616 }
   1617 
   1618 void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
   1619   if (lock != nullptr) {
   1620     JniMethodEndSynchronized(cookie, lock, self);
   1621   } else {
   1622     JniMethodEnd(cookie, self);
   1623   }
   1624 }
   1625 
   1626 /*
   1627  * Initializes an alloca region assumed to be directly below sp for a native call:
   1628  * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
   1629  * The final element on the stack is a pointer to the native code.
   1630  *
   1631  * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
   1632  * We need to fix this, as the handle scope needs to go into the callee-save frame.
   1633  *
   1634  * The return of this function denotes:
   1635  * 1) How many bytes of the alloca can be released, if the value is non-negative.
   1636  * 2) An error, if the value is negative.
   1637  */
   1638 extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self,
   1639                                                       StackReference<mirror::ArtMethod>* sp)
   1640     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1641   mirror::ArtMethod* called = sp->AsMirrorPtr();
   1642   DCHECK(called->IsNative()) << PrettyMethod(called, true);
   1643   uint32_t shorty_len = 0;
   1644   const char* shorty = called->GetShorty(&shorty_len);
   1645 
   1646   // Run the visitor.
   1647   BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), shorty, shorty_len, self);
   1648   visitor.VisitArguments();
   1649   visitor.FinalizeHandleScope(self);
   1650 
   1651   // Fix up managed-stack things in Thread.
   1652   self->SetTopOfStack(sp, 0);
   1653 
   1654   self->VerifyStack();
   1655 
   1656   // Start JNI, save the cookie.
   1657   uint32_t cookie;
   1658   if (called->IsSynchronized()) {
   1659     cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
   1660     if (self->IsExceptionPending()) {
   1661       self->PopHandleScope();
   1662       // A negative value denotes an error.
   1663       return GetTwoWordFailureValue();
   1664     }
   1665   } else {
   1666     cookie = JniMethodStart(self);
   1667   }
   1668   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
   1669   *(sp32 - 1) = cookie;
   1670 
   1671   // Retrieve the stored native code.
   1672   void* nativeCode = called->GetEntryPointFromJni();
   1673 
   1674   // There are two cases for the content of nativeCode:
   1675   // 1) Pointer to the native function.
   1676   // 2) Pointer to the trampoline for native code binding.
   1677   // In the second case, we need to execute the binding and continue with the actual native function
   1678   // pointer.
   1679   DCHECK(nativeCode != nullptr);
   1680   if (nativeCode == GetJniDlsymLookupStub()) {
   1681 #if defined(__arm__) || defined(__aarch64__)
   1682     nativeCode = artFindNativeMethod();
   1683 #else
   1684     nativeCode = artFindNativeMethod(self);
   1685 #endif
   1686 
   1687     if (nativeCode == nullptr) {
   1688       DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
   1689 
   1690       // End JNI, as the assembly will move to deliver the exception.
   1691       jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
   1692       if (shorty[0] == 'L') {
   1693         artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
   1694       } else {
   1695         artQuickGenericJniEndJNINonRef(self, cookie, lock);
   1696       }
   1697 
   1698       return GetTwoWordFailureValue();
   1699     }
   1700     // Note that the native code pointer will be automatically set by artFindNativeMethod().
   1701   }
   1702 
   1703   // Return native code addr(lo) and bottom of alloca address(hi).
   1704   return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()),
   1705                                 reinterpret_cast<uintptr_t>(nativeCode));
   1706 }
   1707 
   1708 /*
   1709  * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
   1710  * unlocking.
   1711  */
   1712 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f)
   1713     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1714   StackReference<mirror::ArtMethod>* sp = self->GetManagedStack()->GetTopQuickFrame();
   1715   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
   1716   mirror::ArtMethod* called = sp->AsMirrorPtr();
   1717   uint32_t cookie = *(sp32 - 1);
   1718 
   1719   jobject lock = nullptr;
   1720   if (called->IsSynchronized()) {
   1721     HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp)
   1722         + sizeof(StackReference<mirror::ArtMethod>));
   1723     lock = table->GetHandle(0).ToJObject();
   1724   }
   1725 
   1726   char return_shorty_char = called->GetShorty()[0];
   1727 
   1728   if (return_shorty_char == 'L') {
   1729     return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock);
   1730   } else {
   1731     artQuickGenericJniEndJNINonRef(self, cookie, lock);
   1732 
   1733     switch (return_shorty_char) {
   1734       case 'F': {
   1735         if (kRuntimeISA == kX86) {
   1736           // Convert back the result to float.
   1737           double d = bit_cast<uint64_t, double>(result_f);
   1738           return bit_cast<float, uint32_t>(static_cast<float>(d));
   1739         } else {
   1740           return result_f;
   1741         }
   1742       }
   1743       case 'D':
   1744         return result_f;
   1745       case 'Z':
   1746         return result.z;
   1747       case 'B':
   1748         return result.b;
   1749       case 'C':
   1750         return result.c;
   1751       case 'S':
   1752         return result.s;
   1753       case 'I':
   1754         return result.i;
   1755       case 'J':
   1756         return result.j;
   1757       case 'V':
   1758         return 0;
   1759       default:
   1760         LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
   1761         return 0;
   1762     }
   1763   }
   1764 }
   1765 
   1766 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
   1767 // for the method pointer.
   1768 //
   1769 // It is valid to use this, as at the usage points here (returns from C functions) we are assuming
   1770 // to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations).
   1771 
   1772 template<InvokeType type, bool access_check>
   1773 static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
   1774                                      mirror::ArtMethod* caller_method,
   1775                                      Thread* self, StackReference<mirror::ArtMethod>* sp);
   1776 
   1777 template<InvokeType type, bool access_check>
   1778 static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
   1779                                      mirror::ArtMethod* caller_method,
   1780                                      Thread* self, StackReference<mirror::ArtMethod>* sp) {
   1781   mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
   1782                                              type);
   1783   if (UNLIKELY(method == nullptr)) {
   1784     FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   1785     const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
   1786     uint32_t shorty_len;
   1787     const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
   1788     {
   1789       // Remember the args in case a GC happens in FindMethodFromCode.
   1790       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
   1791       RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
   1792       visitor.VisitArguments();
   1793       method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method,
   1794                                                       self);
   1795       visitor.FixupReferences();
   1796     }
   1797 
   1798     if (UNLIKELY(method == NULL)) {
   1799       CHECK(self->IsExceptionPending());
   1800       return GetTwoWordFailureValue();  // Failure.
   1801     }
   1802   }
   1803   DCHECK(!self->IsExceptionPending());
   1804   const void* code = method->GetEntryPointFromQuickCompiledCode();
   1805 
   1806   // When we return, the caller will branch to this address, so it had better not be 0!
   1807   DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method)
   1808                           << " location: "
   1809                           << method->GetDexFile()->GetLocation();
   1810 
   1811   return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
   1812                                 reinterpret_cast<uintptr_t>(method));
   1813 }
   1814 
   1815 // Explicit artInvokeCommon template function declarations to please analysis tool.
   1816 #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check)                                \
   1817   template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)                                          \
   1818   TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx,                        \
   1819                                                     mirror::Object* this_object,                \
   1820                                                     mirror::ArtMethod* caller_method,           \
   1821                                                     Thread* self,                               \
   1822                                                     StackReference<mirror::ArtMethod>* sp)      \
   1823 
   1824 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
   1825 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
   1826 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
   1827 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
   1828 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
   1829 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
   1830 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
   1831 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
   1832 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
   1833 EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
   1834 #undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
   1835 
   1836 // See comments in runtime_support_asm.S
   1837 extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
   1838     uint32_t method_idx, mirror::Object* this_object,
   1839     mirror::ArtMethod* caller_method, Thread* self,
   1840     StackReference<mirror::ArtMethod>* sp)
   1841         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1842   return artInvokeCommon<kInterface, true>(method_idx, this_object,
   1843                                            caller_method, self, sp);
   1844 }
   1845 
   1846 extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
   1847     uint32_t method_idx, mirror::Object* this_object,
   1848     mirror::ArtMethod* caller_method, Thread* self,
   1849     StackReference<mirror::ArtMethod>* sp)
   1850         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1851   return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method,
   1852                                         self, sp);
   1853 }
   1854 
   1855 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
   1856     uint32_t method_idx, mirror::Object* this_object,
   1857     mirror::ArtMethod* caller_method, Thread* self,
   1858     StackReference<mirror::ArtMethod>* sp)
   1859         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1860   return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method,
   1861                                         self, sp);
   1862 }
   1863 
   1864 extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
   1865     uint32_t method_idx, mirror::Object* this_object,
   1866     mirror::ArtMethod* caller_method, Thread* self,
   1867     StackReference<mirror::ArtMethod>* sp)
   1868         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1869   return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method,
   1870                                        self, sp);
   1871 }
   1872 
   1873 extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
   1874     uint32_t method_idx, mirror::Object* this_object,
   1875     mirror::ArtMethod* caller_method, Thread* self,
   1876     StackReference<mirror::ArtMethod>* sp)
   1877         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1878   return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method,
   1879                                          self, sp);
   1880 }
   1881 
   1882 // Determine target of interface dispatch. This object is known non-null.
   1883 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
   1884                                                       mirror::Object* this_object,
   1885                                                       mirror::ArtMethod* caller_method,
   1886                                                       Thread* self,
   1887                                                       StackReference<mirror::ArtMethod>* sp)
   1888     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   1889   mirror::ArtMethod* method;
   1890   if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
   1891     method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
   1892     if (UNLIKELY(method == NULL)) {
   1893       FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   1894       ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
   1895                                                                  caller_method);
   1896       return GetTwoWordFailureValue();  // Failure.
   1897     }
   1898   } else {
   1899     FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   1900     DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
   1901 
   1902     // Find the caller PC.
   1903     constexpr size_t pc_offset = GetCalleeSavePCOffset(kRuntimeISA, Runtime::kRefsAndArgs);
   1904     uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + pc_offset);
   1905 
   1906     // Map the caller PC to a dex PC.
   1907     uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
   1908     const DexFile::CodeItem* code = caller_method->GetCodeItem();
   1909     CHECK_LT(dex_pc, code->insns_size_in_code_units_);
   1910     const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
   1911     Instruction::Code instr_code = instr->Opcode();
   1912     CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
   1913           instr_code == Instruction::INVOKE_INTERFACE_RANGE)
   1914         << "Unexpected call into interface trampoline: " << instr->DumpString(NULL);
   1915     uint32_t dex_method_idx;
   1916     if (instr_code == Instruction::INVOKE_INTERFACE) {
   1917       dex_method_idx = instr->VRegB_35c();
   1918     } else {
   1919       DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
   1920       dex_method_idx = instr->VRegB_3rc();
   1921     }
   1922 
   1923     const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
   1924         ->GetDexFile();
   1925     uint32_t shorty_len;
   1926     const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx),
   1927                                                    &shorty_len);
   1928     {
   1929       // Remember the args in case a GC happens in FindMethodFromCode.
   1930       ScopedObjectAccessUnchecked soa(self->GetJniEnv());
   1931       RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
   1932       visitor.VisitArguments();
   1933       method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method,
   1934                                                      self);
   1935       visitor.FixupReferences();
   1936     }
   1937 
   1938     if (UNLIKELY(method == nullptr)) {
   1939       CHECK(self->IsExceptionPending());
   1940       return GetTwoWordFailureValue();  // Failure.
   1941     }
   1942   }
   1943   const void* code = method->GetEntryPointFromQuickCompiledCode();
   1944 
   1945   // When we return, the caller will branch to this address, so it had better not be 0!
   1946   DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method)
   1947                           << " location: " << method->GetDexFile()->GetLocation();
   1948 
   1949   return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(code),
   1950                                 reinterpret_cast<uintptr_t>(method));
   1951 }
   1952 
   1953 }  // namespace art
   1954