Home | History | Annotate | Download | only in mterp
      1 /*
      2  * Copyright (C) 2016 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 /*
     18  * Mterp entry point and support functions.
     19  */
     20 #include "mterp.h"
     21 
     22 #include "base/quasi_atomic.h"
     23 #include "debugger.h"
     24 #include "entrypoints/entrypoint_utils-inl.h"
     25 #include "interpreter/interpreter_common.h"
     26 #include "interpreter/interpreter_intrinsics.h"
     27 #include "interpreter/shadow_frame-inl.h"
     28 #include "mirror/string-alloc-inl.h"
     29 
     30 namespace art {
     31 namespace interpreter {
     32 /*
     33  * Verify some constants used by the mterp interpreter.
     34  */
     35 void CheckMterpAsmConstants() {
     36   /*
     37    * If we're using computed goto instruction transitions, make sure
     38    * none of the handlers overflows the byte limit.  This won't tell
     39    * which one did, but if any one is too big the total size will
     40    * overflow.
     41    */
     42   const int width = kMterpHandlerSize;
     43   int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
     44                     (uintptr_t) artMterpAsmInstructionStart;
     45   if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
     46       LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
     47                  << "(did an instruction handler exceed " << width << " bytes?)";
     48   }
     49 }
     50 
     51 void InitMterpTls(Thread* self) {
     52   self->SetMterpCurrentIBase(artMterpAsmInstructionStart);
     53 }
     54 
     55 /*
     56  * Find the matching case.  Returns the offset to the handler instructions.
     57  *
     58  * Returns 3 if we don't find a match (it's the size of the sparse-switch
     59  * instruction).
     60  */
     61 extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
     62   const int kInstrLen = 3;
     63   uint16_t size;
     64   const int32_t* keys;
     65   const int32_t* entries;
     66 
     67   /*
     68    * Sparse switch data format:
     69    *  ushort ident = 0x0200   magic value
     70    *  ushort size             number of entries in the table; > 0
     71    *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
     72    *  int targets[size]       branch targets, relative to switch opcode
     73    *
     74    * Total size is (2+size*4) 16-bit code units.
     75    */
     76 
     77   uint16_t signature = *switchData++;
     78   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
     79 
     80   size = *switchData++;
     81 
     82   /* The keys are guaranteed to be aligned on a 32-bit boundary;
     83    * we can treat them as a native int array.
     84    */
     85   keys = reinterpret_cast<const int32_t*>(switchData);
     86 
     87   /* The entries are guaranteed to be aligned on a 32-bit boundary;
     88    * we can treat them as a native int array.
     89    */
     90   entries = keys + size;
     91 
     92   /*
     93    * Binary-search through the array of keys, which are guaranteed to
     94    * be sorted low-to-high.
     95    */
     96   int lo = 0;
     97   int hi = size - 1;
     98   while (lo <= hi) {
     99     int mid = (lo + hi) >> 1;
    100 
    101     int32_t foundVal = keys[mid];
    102     if (testVal < foundVal) {
    103       hi = mid - 1;
    104     } else if (testVal > foundVal) {
    105       lo = mid + 1;
    106     } else {
    107       return entries[mid];
    108     }
    109   }
    110   return kInstrLen;
    111 }
    112 
    113 extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
    114   const int kInstrLen = 3;
    115 
    116   /*
    117    * Packed switch data format:
    118    *  ushort ident = 0x0100   magic value
    119    *  ushort size             number of entries in the table
    120    *  int first_key           first (and lowest) switch case value
    121    *  int targets[size]       branch targets, relative to switch opcode
    122    *
    123    * Total size is (4+size*2) 16-bit code units.
    124    */
    125   uint16_t signature = *switchData++;
    126   DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
    127 
    128   uint16_t size = *switchData++;
    129 
    130   int32_t firstKey = *switchData++;
    131   firstKey |= (*switchData++) << 16;
    132 
    133   int index = testVal - firstKey;
    134   if (index < 0 || index >= size) {
    135     return kInstrLen;
    136   }
    137 
    138   /*
    139    * The entries are guaranteed to be aligned on a 32-bit boundary;
    140    * we can treat them as a native int array.
    141    */
    142   const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
    143   return entries[index];
    144 }
    145 
    146 bool CanUseMterp()
    147     REQUIRES_SHARED(Locks::mutator_lock_) {
    148   const Runtime* const runtime = Runtime::Current();
    149   return
    150       runtime->IsStarted() &&
    151       !runtime->IsAotCompiler() &&
    152       !Dbg::IsDebuggerActive() &&
    153       !runtime->GetInstrumentation()->IsActive() &&
    154       // mterp only knows how to deal with the normal exits. It cannot handle any of the
    155       // non-standard force-returns.
    156       !runtime->AreNonStandardExitsEnabled() &&
    157       // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
    158       // know how to deal with these so we could end up never dealing with it if we are in an
    159       // infinite loop.
    160       !runtime->AreAsyncExceptionsThrown() &&
    161       (runtime->GetJit() == nullptr || !runtime->GetJit()->JitAtFirstUse());
    162 }
    163 
    164 
    165 extern "C" size_t MterpInvokeVirtual(Thread* self,
    166                                      ShadowFrame* shadow_frame,
    167                                      uint16_t* dex_pc_ptr,
    168                                      uint16_t inst_data)
    169     REQUIRES_SHARED(Locks::mutator_lock_) {
    170   JValue* result_register = shadow_frame->GetResultRegister();
    171   const Instruction* inst = Instruction::At(dex_pc_ptr);
    172   return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    173       self, *shadow_frame, inst, inst_data, result_register);
    174 }
    175 
    176 extern "C" size_t MterpInvokeSuper(Thread* self,
    177                                    ShadowFrame* shadow_frame,
    178                                    uint16_t* dex_pc_ptr,
    179                                    uint16_t inst_data)
    180     REQUIRES_SHARED(Locks::mutator_lock_) {
    181   JValue* result_register = shadow_frame->GetResultRegister();
    182   const Instruction* inst = Instruction::At(dex_pc_ptr);
    183   return DoInvoke<kSuper, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    184       self, *shadow_frame, inst, inst_data, result_register);
    185 }
    186 
    187 extern "C" size_t MterpInvokeInterface(Thread* self,
    188                                        ShadowFrame* shadow_frame,
    189                                        uint16_t* dex_pc_ptr,
    190                                        uint16_t inst_data)
    191     REQUIRES_SHARED(Locks::mutator_lock_) {
    192   JValue* result_register = shadow_frame->GetResultRegister();
    193   const Instruction* inst = Instruction::At(dex_pc_ptr);
    194   return DoInvoke<kInterface, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    195       self, *shadow_frame, inst, inst_data, result_register);
    196 }
    197 
    198 extern "C" size_t MterpInvokeDirect(Thread* self,
    199                                     ShadowFrame* shadow_frame,
    200                                     uint16_t* dex_pc_ptr,
    201                                     uint16_t inst_data)
    202     REQUIRES_SHARED(Locks::mutator_lock_) {
    203   JValue* result_register = shadow_frame->GetResultRegister();
    204   const Instruction* inst = Instruction::At(dex_pc_ptr);
    205   return DoInvoke<kDirect, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    206       self, *shadow_frame, inst, inst_data, result_register);
    207 }
    208 
    209 extern "C" size_t MterpInvokeStatic(Thread* self,
    210                                     ShadowFrame* shadow_frame,
    211                                     uint16_t* dex_pc_ptr,
    212                                     uint16_t inst_data)
    213     REQUIRES_SHARED(Locks::mutator_lock_) {
    214   JValue* result_register = shadow_frame->GetResultRegister();
    215   const Instruction* inst = Instruction::At(dex_pc_ptr);
    216   return DoInvoke<kStatic, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    217       self, *shadow_frame, inst, inst_data, result_register);
    218 }
    219 
    220 extern "C" size_t MterpInvokeCustom(Thread* self,
    221                                     ShadowFrame* shadow_frame,
    222                                     uint16_t* dex_pc_ptr,
    223                                     uint16_t inst_data)
    224     REQUIRES_SHARED(Locks::mutator_lock_) {
    225   JValue* result_register = shadow_frame->GetResultRegister();
    226   const Instruction* inst = Instruction::At(dex_pc_ptr);
    227   return DoInvokeCustom</* is_range= */ false>(
    228       self, *shadow_frame, inst, inst_data, result_register);
    229 }
    230 
    231 extern "C" size_t MterpInvokePolymorphic(Thread* self,
    232                                          ShadowFrame* shadow_frame,
    233                                          uint16_t* dex_pc_ptr,
    234                                          uint16_t inst_data)
    235     REQUIRES_SHARED(Locks::mutator_lock_) {
    236   JValue* result_register = shadow_frame->GetResultRegister();
    237   const Instruction* inst = Instruction::At(dex_pc_ptr);
    238   return DoInvokePolymorphic</* is_range= */ false>(
    239       self, *shadow_frame, inst, inst_data, result_register);
    240 }
    241 
    242 extern "C" size_t MterpInvokeVirtualRange(Thread* self,
    243                                           ShadowFrame* shadow_frame,
    244                                           uint16_t* dex_pc_ptr,
    245                                           uint16_t inst_data)
    246     REQUIRES_SHARED(Locks::mutator_lock_) {
    247   JValue* result_register = shadow_frame->GetResultRegister();
    248   const Instruction* inst = Instruction::At(dex_pc_ptr);
    249   return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    250       self, *shadow_frame, inst, inst_data, result_register);
    251 }
    252 
    253 extern "C" size_t MterpInvokeSuperRange(Thread* self,
    254                                         ShadowFrame* shadow_frame,
    255                                         uint16_t* dex_pc_ptr,
    256                                         uint16_t inst_data)
    257     REQUIRES_SHARED(Locks::mutator_lock_) {
    258   JValue* result_register = shadow_frame->GetResultRegister();
    259   const Instruction* inst = Instruction::At(dex_pc_ptr);
    260   return DoInvoke<kSuper, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    261       self, *shadow_frame, inst, inst_data, result_register);
    262 }
    263 
    264 extern "C" size_t MterpInvokeInterfaceRange(Thread* self,
    265                                             ShadowFrame* shadow_frame,
    266                                             uint16_t* dex_pc_ptr,
    267                                             uint16_t inst_data)
    268     REQUIRES_SHARED(Locks::mutator_lock_) {
    269   JValue* result_register = shadow_frame->GetResultRegister();
    270   const Instruction* inst = Instruction::At(dex_pc_ptr);
    271   return DoInvoke<kInterface, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    272       self, *shadow_frame, inst, inst_data, result_register);
    273 }
    274 
    275 extern "C" size_t MterpInvokeDirectRange(Thread* self,
    276                                          ShadowFrame* shadow_frame,
    277                                          uint16_t* dex_pc_ptr,
    278                                          uint16_t inst_data)
    279     REQUIRES_SHARED(Locks::mutator_lock_) {
    280   JValue* result_register = shadow_frame->GetResultRegister();
    281   const Instruction* inst = Instruction::At(dex_pc_ptr);
    282   return DoInvoke<kDirect, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    283       self, *shadow_frame, inst, inst_data, result_register);
    284 }
    285 
    286 extern "C" size_t MterpInvokeStaticRange(Thread* self,
    287                                          ShadowFrame* shadow_frame,
    288                                          uint16_t* dex_pc_ptr,
    289                                          uint16_t inst_data)
    290     REQUIRES_SHARED(Locks::mutator_lock_) {
    291   JValue* result_register = shadow_frame->GetResultRegister();
    292   const Instruction* inst = Instruction::At(dex_pc_ptr);
    293   return DoInvoke<kStatic, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
    294       self, *shadow_frame, inst, inst_data, result_register);
    295 }
    296 
    297 extern "C" size_t MterpInvokeCustomRange(Thread* self,
    298                                          ShadowFrame* shadow_frame,
    299                                          uint16_t* dex_pc_ptr,
    300                                          uint16_t inst_data)
    301     REQUIRES_SHARED(Locks::mutator_lock_) {
    302   JValue* result_register = shadow_frame->GetResultRegister();
    303   const Instruction* inst = Instruction::At(dex_pc_ptr);
    304   return DoInvokeCustom</*is_range=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
    305 }
    306 
    307 extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
    308                                               ShadowFrame* shadow_frame,
    309                                               uint16_t* dex_pc_ptr,
    310                                               uint16_t inst_data)
    311     REQUIRES_SHARED(Locks::mutator_lock_) {
    312   JValue* result_register = shadow_frame->GetResultRegister();
    313   const Instruction* inst = Instruction::At(dex_pc_ptr);
    314   return DoInvokePolymorphic</* is_range= */ true>(
    315       self, *shadow_frame, inst, inst_data, result_register);
    316 }
    317 
    318 extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
    319                                           ShadowFrame* shadow_frame,
    320                                           uint16_t* dex_pc_ptr,
    321                                           uint16_t inst_data)
    322     REQUIRES_SHARED(Locks::mutator_lock_) {
    323   JValue* result_register = shadow_frame->GetResultRegister();
    324   const Instruction* inst = Instruction::At(dex_pc_ptr);
    325   return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true,
    326       /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
    327 }
    328 
    329 extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
    330                                                ShadowFrame* shadow_frame,
    331                                                uint16_t* dex_pc_ptr,
    332                                                uint16_t inst_data)
    333     REQUIRES_SHARED(Locks::mutator_lock_) {
    334   JValue* result_register = shadow_frame->GetResultRegister();
    335   const Instruction* inst = Instruction::At(dex_pc_ptr);
    336   return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true,
    337       /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
    338 }
    339 
    340 extern "C" void MterpThreadFenceForConstructor() {
    341   QuasiAtomic::ThreadFenceForConstructor();
    342 }
    343 
    344 extern "C" size_t MterpConstString(uint32_t index,
    345                                    uint32_t tgt_vreg,
    346                                    ShadowFrame* shadow_frame,
    347                                    Thread* self)
    348     REQUIRES_SHARED(Locks::mutator_lock_) {
    349   ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, dex::StringIndex(index));
    350   if (UNLIKELY(s == nullptr)) {
    351     return true;
    352   }
    353   shadow_frame->SetVRegReference(tgt_vreg, s);
    354   return false;
    355 }
    356 
    357 extern "C" size_t MterpConstClass(uint32_t index,
    358                                   uint32_t tgt_vreg,
    359                                   ShadowFrame* shadow_frame,
    360                                   Thread* self)
    361     REQUIRES_SHARED(Locks::mutator_lock_) {
    362   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
    363                                                    shadow_frame->GetMethod(),
    364                                                    self,
    365                                                    /* can_run_clinit= */ false,
    366                                                    /* verify_access= */ false);
    367   if (UNLIKELY(c == nullptr)) {
    368     return true;
    369   }
    370   shadow_frame->SetVRegReference(tgt_vreg, c);
    371   return false;
    372 }
    373 
    374 extern "C" size_t MterpConstMethodHandle(uint32_t index,
    375                                          uint32_t tgt_vreg,
    376                                          ShadowFrame* shadow_frame,
    377                                          Thread* self)
    378     REQUIRES_SHARED(Locks::mutator_lock_) {
    379   ObjPtr<mirror::MethodHandle> mh = ResolveMethodHandle(self, index, shadow_frame->GetMethod());
    380   if (UNLIKELY(mh == nullptr)) {
    381     return true;
    382   }
    383   shadow_frame->SetVRegReference(tgt_vreg, mh);
    384   return false;
    385 }
    386 
    387 extern "C" size_t MterpConstMethodType(uint32_t index,
    388                                        uint32_t tgt_vreg,
    389                                        ShadowFrame* shadow_frame,
    390                                        Thread* self)
    391     REQUIRES_SHARED(Locks::mutator_lock_) {
    392   ObjPtr<mirror::MethodType> mt =
    393       ResolveMethodType(self, dex::ProtoIndex(index), shadow_frame->GetMethod());
    394   if (UNLIKELY(mt == nullptr)) {
    395     return true;
    396   }
    397   shadow_frame->SetVRegReference(tgt_vreg, mt);
    398   return false;
    399 }
    400 
    401 extern "C" size_t MterpCheckCast(uint32_t index,
    402                                  StackReference<mirror::Object>* vreg_addr,
    403                                  art::ArtMethod* method,
    404                                  Thread* self)
    405     REQUIRES_SHARED(Locks::mutator_lock_) {
    406   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
    407                                                    method,
    408                                                    self,
    409                                                    false,
    410                                                    false);
    411   if (UNLIKELY(c == nullptr)) {
    412     return true;
    413   }
    414   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
    415   ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
    416   if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
    417     ThrowClassCastException(c, obj->GetClass());
    418     return true;
    419   }
    420   return false;
    421 }
    422 
    423 extern "C" size_t MterpInstanceOf(uint32_t index,
    424                                   StackReference<mirror::Object>* vreg_addr,
    425                                   art::ArtMethod* method,
    426                                   Thread* self)
    427     REQUIRES_SHARED(Locks::mutator_lock_) {
    428   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index),
    429                                                    method,
    430                                                    self,
    431                                                    false,
    432                                                    false);
    433   if (UNLIKELY(c == nullptr)) {
    434     return false;  // Caller will check for pending exception.  Return value unimportant.
    435   }
    436   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
    437   ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
    438   return (obj != nullptr) && obj->InstanceOf(c);
    439 }
    440 
    441 extern "C" size_t MterpFillArrayData(mirror::Object* obj,
    442                                      const Instruction::ArrayDataPayload* payload)
    443     REQUIRES_SHARED(Locks::mutator_lock_) {
    444   return FillArrayData(obj, payload);
    445 }
    446 
    447 extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
    448     REQUIRES_SHARED(Locks::mutator_lock_) {
    449   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    450   ObjPtr<mirror::Object> obj = nullptr;
    451   ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
    452                                                    shadow_frame->GetMethod(),
    453                                                    self,
    454                                                    /* can_run_clinit= */ false,
    455                                                    /* verify_access= */ false);
    456   if (LIKELY(c != nullptr)) {
    457     if (UNLIKELY(c->IsStringClass())) {
    458       gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
    459       obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
    460     } else {
    461       obj = AllocObjectFromCode<true>(c,
    462                                       self,
    463                                       Runtime::Current()->GetHeap()->GetCurrentAllocator());
    464     }
    465   }
    466   if (UNLIKELY(obj == nullptr)) {
    467     return false;
    468   }
    469   obj->GetClass()->AssertInitializedOrInitializingInThread(self);
    470   shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
    471   return true;
    472 }
    473 
    474 extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
    475                                        uint16_t* dex_pc_ptr,
    476                                        uint32_t inst_data)
    477     REQUIRES_SHARED(Locks::mutator_lock_) {
    478   const Instruction* inst = Instruction::At(dex_pc_ptr);
    479   return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
    480 }
    481 
    482 extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
    483                                   uint16_t* dex_pc_ptr,
    484                                   uint32_t inst_data)
    485     REQUIRES_SHARED(Locks::mutator_lock_) {
    486   const Instruction* inst = Instruction::At(dex_pc_ptr);
    487   ObjPtr<mirror::Object> a = shadow_frame->GetVRegReference(inst->VRegB_23x());
    488   if (UNLIKELY(a == nullptr)) {
    489     return false;
    490   }
    491   int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
    492   ObjPtr<mirror::Object> val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
    493   ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
    494   if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
    495     array->SetWithoutChecks<false>(index, val);
    496     return true;
    497   }
    498   return false;
    499 }
    500 
    501 extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
    502                                       uint16_t* dex_pc_ptr,
    503                                       Thread* self)
    504     REQUIRES_SHARED(Locks::mutator_lock_) {
    505   const Instruction* inst = Instruction::At(dex_pc_ptr);
    506   return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
    507                                                shadow_frame->GetResultRegister());
    508 }
    509 
    510 extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
    511                                            uint16_t* dex_pc_ptr,
    512                                            Thread* self)
    513     REQUIRES_SHARED(Locks::mutator_lock_) {
    514   const Instruction* inst = Instruction::At(dex_pc_ptr);
    515   return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
    516                                               shadow_frame->GetResultRegister());
    517 }
    518 
    519 extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
    520                                 uint16_t* dex_pc_ptr,
    521                                 uint32_t inst_data, Thread* self)
    522     REQUIRES_SHARED(Locks::mutator_lock_) {
    523   const Instruction* inst = Instruction::At(dex_pc_ptr);
    524   int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
    525   ObjPtr<mirror::Object> obj = AllocArrayFromCode<false, true>(
    526       dex::TypeIndex(inst->VRegC_22c()), length, shadow_frame->GetMethod(), self,
    527       Runtime::Current()->GetHeap()->GetCurrentAllocator());
    528   if (UNLIKELY(obj == nullptr)) {
    529       return false;
    530   }
    531   shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
    532   return true;
    533 }
    534 
    535 extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
    536     REQUIRES_SHARED(Locks::mutator_lock_) {
    537   DCHECK(self->IsExceptionPending());
    538   const instrumentation::Instrumentation* const instrumentation =
    539       Runtime::Current()->GetInstrumentation();
    540   return MoveToExceptionHandler(self, *shadow_frame, instrumentation);
    541 }
    542 
    543 struct MterpCheckHelper {
    544   DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
    545 };
    546 DEFINE_RUNTIME_DEBUG_FLAG(MterpCheckHelper, kSlowMode);
    547 
    548 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
    549     REQUIRES_SHARED(Locks::mutator_lock_) {
    550   // Check that we are using the right interpreter.
    551   if (kIsDebugBuild && self->UseMterp() != CanUseMterp()) {
    552     // The flag might be currently being updated on all threads. Retry with lock.
    553     MutexLock tll_mu(self, *Locks::thread_list_lock_);
    554     DCHECK_EQ(self->UseMterp(), CanUseMterp());
    555   }
    556   DCHECK(!Runtime::Current()->IsActiveTransaction());
    557   const Instruction* inst = Instruction::At(dex_pc_ptr);
    558   uint16_t inst_data = inst->Fetch16(0);
    559   if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
    560     self->AssertPendingException();
    561   } else {
    562     self->AssertNoPendingException();
    563   }
    564   if (kTraceExecutionEnabled) {
    565     uint32_t dex_pc = dex_pc_ptr - shadow_frame->GetDexInstructions();
    566     TraceExecution(*shadow_frame, inst, dex_pc);
    567   }
    568   if (kTestExportPC) {
    569     // Save invalid dex pc to force segfault if improperly used.
    570     shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(kExportPCPoison));
    571   }
    572   if (MterpCheckHelper::kSlowMode) {
    573     shadow_frame->CheckConsistentVRegs();
    574   }
    575 }
    576 
    577 extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
    578     REQUIRES_SHARED(Locks::mutator_lock_) {
    579   UNUSED(self);
    580   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    581   uint16_t inst_data = inst->Fetch16(0);
    582   LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data);
    583 }
    584 
    585 extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
    586     REQUIRES_SHARED(Locks::mutator_lock_) {
    587   UNUSED(self);
    588   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    589   uint16_t inst_data = inst->Fetch16(0);
    590   LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data);
    591 }
    592 
    593 extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
    594     REQUIRES_SHARED(Locks::mutator_lock_) {
    595   UNUSED(self);
    596   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    597   uint16_t inst_data = inst->Fetch16(0);
    598   LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data);
    599 }
    600 
    601 extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
    602     REQUIRES_SHARED(Locks::mutator_lock_) {
    603   UNUSED(self);
    604   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    605   uint16_t inst_data = inst->Fetch16(0);
    606   LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data);
    607 }
    608 
    609 extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
    610     REQUIRES_SHARED(Locks::mutator_lock_) {
    611   UNUSED(self);
    612   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    613   uint16_t inst_data = inst->Fetch16(0);
    614   LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data);
    615 }
    616 
    617 extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
    618     REQUIRES_SHARED(Locks::mutator_lock_) {
    619   UNUSED(self);
    620   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    621   uint16_t inst_data = inst->Fetch16(0);
    622   LOG(INFO) << "NullObject: " << inst->Opcode(inst_data);
    623 }
    624 
    625 extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
    626     REQUIRES_SHARED(Locks::mutator_lock_) {
    627   UNUSED(self);
    628   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    629   uint16_t inst_data = inst->Fetch16(0);
    630   LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: "
    631             << self->IsExceptionPending();
    632 }
    633 
    634 extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
    635     REQUIRES_SHARED(Locks::mutator_lock_) {
    636   UNUSED(self);
    637   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    638   uint16_t inst_data = inst->Fetch16(0);
    639   LOG(INFO) << "OSR: " << inst->Opcode(inst_data) << ", offset = " << offset;
    640 }
    641 
    642 extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
    643     REQUIRES_SHARED(Locks::mutator_lock_) {
    644   UNUSED(self);
    645   const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
    646   uint16_t inst_data = inst->Fetch16(0);
    647   if (flags & kCheckpointRequest) {
    648     LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
    649   } else if (flags & kSuspendRequest) {
    650     LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
    651   } else if (flags & kEmptyCheckpointRequest) {
    652     LOG(INFO) << "Empty checkpoint fallback: " << inst->Opcode(inst_data);
    653   }
    654 }
    655 
    656 extern "C" size_t MterpSuspendCheck(Thread* self)
    657     REQUIRES_SHARED(Locks::mutator_lock_) {
    658   self->AllowThreadSuspension();
    659   return !self->UseMterp();
    660 }
    661 
    662 // Execute single field access instruction (get/put, static/instance).
    663 // The template arguments reduce this to fairly small amount of code.
    664 // It requires the target object and field to be already resolved.
    665 template<typename PrimType, FindFieldType kAccessType>
    666 ALWAYS_INLINE void MterpFieldAccess(Instruction* inst,
    667                                     uint16_t inst_data,
    668                                     ShadowFrame* shadow_frame,
    669                                     ObjPtr<mirror::Object> obj,
    670                                     MemberOffset offset,
    671                                     bool is_volatile)
    672     REQUIRES_SHARED(Locks::mutator_lock_) {
    673   static_assert(std::is_integral<PrimType>::value, "Unexpected primitive type");
    674   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
    675   constexpr bool kIsPrimitive = (kAccessType & FindFieldFlags::PrimitiveBit) != 0;
    676   constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
    677 
    678   uint16_t vRegA = kIsStatic ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
    679   if (kIsPrimitive) {
    680     if (kIsRead) {
    681       PrimType value = UNLIKELY(is_volatile)
    682           ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset)
    683           : obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset);
    684       if (sizeof(PrimType) == sizeof(uint64_t)) {
    685         shadow_frame->SetVRegLong(vRegA, value);  // Set two consecutive registers.
    686       } else {
    687         shadow_frame->SetVReg(vRegA, static_cast<int32_t>(value));  // Sign/zero extend.
    688       }
    689     } else {  // Write.
    690       uint64_t value = (sizeof(PrimType) == sizeof(uint64_t))
    691           ? shadow_frame->GetVRegLong(vRegA)
    692           : shadow_frame->GetVReg(vRegA);
    693       if (UNLIKELY(is_volatile)) {
    694         obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset, value);
    695       } else {
    696         obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset, value);
    697       }
    698     }
    699   } else {  // Object.
    700     if (kIsRead) {
    701       ObjPtr<mirror::Object> value = UNLIKELY(is_volatile)
    702           ? obj->GetFieldObjectVolatile<mirror::Object>(offset)
    703           : obj->GetFieldObject<mirror::Object>(offset);
    704       shadow_frame->SetVRegReference(vRegA, value);
    705     } else {  // Write.
    706       ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
    707       if (UNLIKELY(is_volatile)) {
    708         obj->SetFieldObjectVolatile</*kTransactionActive=*/ false>(offset, value);
    709       } else {
    710         obj->SetFieldObject</*kTransactionActive=*/ false>(offset, value);
    711       }
    712     }
    713   }
    714 }
    715 
    716 template<typename PrimType, FindFieldType kAccessType>
    717 NO_INLINE bool MterpFieldAccessSlow(Instruction* inst,
    718                                     uint16_t inst_data,
    719                                     ShadowFrame* shadow_frame,
    720                                     Thread* self)
    721     REQUIRES_SHARED(Locks::mutator_lock_) {
    722   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
    723   constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
    724 
    725   // Update the dex pc in shadow frame, just in case anything throws.
    726   shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst));
    727   ArtMethod* referrer = shadow_frame->GetMethod();
    728   uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
    729   ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
    730       field_idx, referrer, self, sizeof(PrimType));
    731   if (UNLIKELY(field == nullptr)) {
    732     DCHECK(self->IsExceptionPending());
    733     return false;
    734   }
    735   ObjPtr<mirror::Object> obj = kIsStatic
    736       ? field->GetDeclaringClass().Ptr()
    737       : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
    738   if (UNLIKELY(obj == nullptr)) {
    739     ThrowNullPointerExceptionForFieldAccess(field, kIsRead);
    740     return false;
    741   }
    742   MterpFieldAccess<PrimType, kAccessType>(
    743       inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
    744   return true;
    745 }
    746 
    747 // This methods is called from assembly to handle field access instructions.
    748 //
    749 // This method is fairly hot.  It is long, but it has been carefully optimized.
    750 // It contains only fully inlined methods -> no spills -> no prologue/epilogue.
    751 template<typename PrimType, FindFieldType kAccessType>
    752 ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst,
    753                                         uint16_t inst_data,
    754                                         ShadowFrame* shadow_frame,
    755                                         Thread* self)
    756     REQUIRES_SHARED(Locks::mutator_lock_) {
    757   constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
    758 
    759   // Try to find the field in small thread-local cache first.
    760   InterpreterCache* tls_cache = self->GetInterpreterCache();
    761   size_t tls_value;
    762   if (LIKELY(tls_cache->Get(inst, &tls_value))) {
    763     // The meaning of the cache value is opcode-specific.
    764     // It is ArtFiled* for static fields and the raw offset for instance fields.
    765     size_t offset = kIsStatic
    766         ? reinterpret_cast<ArtField*>(tls_value)->GetOffset().SizeValue()
    767         : tls_value;
    768     if (kIsDebugBuild) {
    769       uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
    770       ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>(
    771           field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType));
    772       DCHECK_EQ(offset, field->GetOffset().SizeValue());
    773     }
    774     ObjPtr<mirror::Object> obj = kIsStatic
    775         ? reinterpret_cast<ArtField*>(tls_value)->GetDeclaringClass()
    776         : ObjPtr<mirror::Object>(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data)));
    777     if (LIKELY(obj != nullptr)) {
    778       MterpFieldAccess<PrimType, kAccessType>(
    779           inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile= */ false);
    780       return true;
    781     }
    782   }
    783 
    784   // This effectively inlines the fast path from ArtMethod::GetDexCache.
    785   ArtMethod* referrer = shadow_frame->GetMethod();
    786   if (LIKELY(!referrer->IsObsolete())) {
    787     // Avoid read barriers, since we need only the pointer to the native (non-movable)
    788     // DexCache field array which we can get even through from-space objects.
    789     ObjPtr<mirror::Class> klass = referrer->GetDeclaringClass<kWithoutReadBarrier>();
    790     ObjPtr<mirror::DexCache> dex_cache =
    791         klass->GetDexCache<kDefaultVerifyFlags, kWithoutReadBarrier>();
    792 
    793     // Try to find the desired field in DexCache.
    794     uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
    795     ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
    796     if (LIKELY(field != nullptr)) {
    797       bool initialized = !kIsStatic || field->GetDeclaringClass()->IsInitialized();
    798       if (LIKELY(initialized)) {
    799         DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks= */ false>(
    800             field_idx, referrer, self, sizeof(PrimType))));
    801         ObjPtr<mirror::Object> obj = kIsStatic
    802             ? field->GetDeclaringClass().Ptr()
    803             : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
    804         if (LIKELY(kIsStatic || obj != nullptr)) {
    805           // Only non-volatile fields are allowed in the thread-local cache.
    806           if (LIKELY(!field->IsVolatile())) {
    807             if (kIsStatic) {
    808               tls_cache->Set(inst, reinterpret_cast<uintptr_t>(field));
    809             } else {
    810               tls_cache->Set(inst, field->GetOffset().SizeValue());
    811             }
    812           }
    813           MterpFieldAccess<PrimType, kAccessType>(
    814               inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
    815           return true;
    816         }
    817       }
    818     }
    819   }
    820 
    821   // Slow path. Last and with identical arguments so that it becomes single instruction tail call.
    822   return MterpFieldAccessSlow<PrimType, kAccessType>(inst, inst_data, shadow_frame, self);
    823 }
    824 
    825 #define MTERP_FIELD_ACCESSOR(Name, PrimType, AccessType)                                          \
    826 extern "C" bool Name(Instruction* inst, uint16_t inst_data, ShadowFrame* sf, Thread* self)        \
    827     REQUIRES_SHARED(Locks::mutator_lock_) {                                                       \
    828   return MterpFieldAccessFast<PrimType, AccessType>(inst, inst_data, sf, self);                   \
    829 }
    830 
    831 #define MTERP_FIELD_ACCESSORS_FOR_TYPE(Sufix, PrimType, Kind)                                     \
    832   MTERP_FIELD_ACCESSOR(MterpIGet##Sufix, PrimType, Instance##Kind##Read)                          \
    833   MTERP_FIELD_ACCESSOR(MterpIPut##Sufix, PrimType, Instance##Kind##Write)                         \
    834   MTERP_FIELD_ACCESSOR(MterpSGet##Sufix, PrimType, Static##Kind##Read)                            \
    835   MTERP_FIELD_ACCESSOR(MterpSPut##Sufix, PrimType, Static##Kind##Write)
    836 
    837 MTERP_FIELD_ACCESSORS_FOR_TYPE(I8, int8_t, Primitive)
    838 MTERP_FIELD_ACCESSORS_FOR_TYPE(U8, uint8_t, Primitive)
    839 MTERP_FIELD_ACCESSORS_FOR_TYPE(I16, int16_t, Primitive)
    840 MTERP_FIELD_ACCESSORS_FOR_TYPE(U16, uint16_t, Primitive)
    841 MTERP_FIELD_ACCESSORS_FOR_TYPE(U32, uint32_t, Primitive)
    842 MTERP_FIELD_ACCESSORS_FOR_TYPE(U64, uint64_t, Primitive)
    843 MTERP_FIELD_ACCESSORS_FOR_TYPE(Obj, uint32_t, Object)
    844 
    845 // Check that the primitive type for Obj variant above is correct.
    846 // It really must be primitive type for the templates to compile.
    847 // In the case of objects, it is only used to get the field size.
    848 static_assert(kHeapReferenceSize == sizeof(uint32_t), "Unexpected kHeapReferenceSize");
    849 
    850 #undef MTERP_FIELD_ACCESSORS_FOR_TYPE
    851 #undef MTERP_FIELD_ACCESSOR
    852 
    853 extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr,
    854                                                   int32_t index)
    855     REQUIRES_SHARED(Locks::mutator_lock_) {
    856   if (UNLIKELY(arr == nullptr)) {
    857     ThrowNullPointerExceptionFromInterpreter();
    858     return nullptr;
    859   }
    860   ObjPtr<mirror::ObjectArray<mirror::Object>> array = arr->AsObjectArray<mirror::Object>();
    861   if (LIKELY(array->CheckIsValidIndex(index))) {
    862     return array->GetWithoutChecks(index).Ptr();
    863   } else {
    864     return nullptr;
    865   }
    866 }
    867 
    868 extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj,
    869                                                   uint32_t field_offset)
    870     REQUIRES_SHARED(Locks::mutator_lock_) {
    871   if (UNLIKELY(obj == nullptr)) {
    872     ThrowNullPointerExceptionFromInterpreter();
    873     return nullptr;
    874   }
    875   return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset));
    876 }
    877 
    878 /*
    879  * Create a hotness_countdown based on the current method hotness_count and profiling
    880  * mode.  In short, determine how many hotness events we hit before reporting back
    881  * to the full instrumentation via MterpAddHotnessBatch.  Called once on entry to the method,
    882  * and regenerated following batch updates.
    883  */
    884 extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method,
    885                                               ShadowFrame* shadow_frame,
    886                                               Thread* self)
    887     REQUIRES_SHARED(Locks::mutator_lock_) {
    888   uint16_t hotness_count = method->GetCounter();
    889   int32_t countdown_value = jit::kJitHotnessDisabled;
    890   jit::Jit* jit = Runtime::Current()->GetJit();
    891   if (jit != nullptr) {
    892     int32_t warm_threshold = jit->WarmMethodThreshold();
    893     int32_t hot_threshold = jit->HotMethodThreshold();
    894     int32_t osr_threshold = jit->OSRMethodThreshold();
    895     if (hotness_count < warm_threshold) {
    896       countdown_value = warm_threshold - hotness_count;
    897     } else if (hotness_count < hot_threshold) {
    898       countdown_value = hot_threshold - hotness_count;
    899     } else if (hotness_count < osr_threshold) {
    900       countdown_value = osr_threshold - hotness_count;
    901     } else {
    902       countdown_value = jit::kJitCheckForOSR;
    903     }
    904     if (jit::Jit::ShouldUsePriorityThreadWeight(self)) {
    905       int32_t priority_thread_weight = jit->PriorityThreadWeight();
    906       countdown_value = std::min(countdown_value, countdown_value / priority_thread_weight);
    907     }
    908   }
    909   /*
    910    * The actual hotness threshold may exceed the range of our int16_t countdown value.  This is
    911    * not a problem, though.  We can just break it down into smaller chunks.
    912    */
    913   countdown_value = std::min(countdown_value,
    914                              static_cast<int32_t>(std::numeric_limits<int16_t>::max()));
    915   shadow_frame->SetCachedHotnessCountdown(countdown_value);
    916   shadow_frame->SetHotnessCountdown(countdown_value);
    917   return countdown_value;
    918 }
    919 
    920 /*
    921  * Report a batch of hotness events to the instrumentation and then return the new
    922  * countdown value to the next time we should report.
    923  */
    924 extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
    925                                         ShadowFrame* shadow_frame,
    926                                         Thread* self)
    927     REQUIRES_SHARED(Locks::mutator_lock_) {
    928   jit::Jit* jit = Runtime::Current()->GetJit();
    929   if (jit != nullptr) {
    930     int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
    931     jit->AddSamples(self, method, count, /*with_backedges=*/ true);
    932   }
    933   return MterpSetUpHotnessCountdown(method, shadow_frame, self);
    934 }
    935 
    936 extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
    937                                                  ShadowFrame* shadow_frame,
    938                                                  int32_t offset)
    939     REQUIRES_SHARED(Locks::mutator_lock_) {
    940   int16_t osr_countdown = shadow_frame->GetCachedHotnessCountdown() - 1;
    941   bool did_osr = false;
    942   /*
    943    * To reduce the cost of polling the compiler to determine whether the requested OSR
    944    * compilation has completed, only check every Nth time.  NOTE: the "osr_countdown <= 0"
    945    * condition is satisfied either by the decrement below or the initial setting of
    946    * the cached countdown field to kJitCheckForOSR, which elsewhere is asserted to be -1.
    947    */
    948   if (osr_countdown <= 0) {
    949     ArtMethod* method = shadow_frame->GetMethod();
    950     JValue* result = shadow_frame->GetResultRegister();
    951     uint32_t dex_pc = shadow_frame->GetDexPC();
    952     jit::Jit* jit = Runtime::Current()->GetJit();
    953     osr_countdown = jit::Jit::kJitRecheckOSRThreshold;
    954     if (offset <= 0) {
    955       // Keep updating hotness in case a compilation request was dropped.  Eventually it will retry.
    956       jit->AddSamples(self, method, osr_countdown, /*with_backedges=*/ true);
    957     }
    958     did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
    959   }
    960   shadow_frame->SetCachedHotnessCountdown(osr_countdown);
    961   return did_osr;
    962 }
    963 
    964 }  // namespace interpreter
    965 }  // namespace art
    966