1 /* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 /* 18 * Mterp entry point and support functions. 19 */ 20 #include "interpreter/interpreter_common.h" 21 #include "entrypoints/entrypoint_utils-inl.h" 22 #include "mterp.h" 23 #include "debugger.h" 24 25 namespace art { 26 namespace interpreter { 27 /* 28 * Verify some constants used by the mterp interpreter. 29 */ 30 void CheckMterpAsmConstants() { 31 /* 32 * If we're using computed goto instruction transitions, make sure 33 * none of the handlers overflows the 128-byte limit. This won't tell 34 * which one did, but if any one is too big the total size will 35 * overflow. 36 */ 37 const int width = 128; 38 int interp_size = (uintptr_t) artMterpAsmInstructionEnd - 39 (uintptr_t) artMterpAsmInstructionStart; 40 if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) { 41 LOG(art::FATAL) << "ERROR: unexpected asm interp size " << interp_size 42 << "(did an instruction handler exceed " << width << " bytes?)"; 43 } 44 } 45 46 void InitMterpTls(Thread* self) { 47 self->SetMterpDefaultIBase(artMterpAsmInstructionStart); 48 self->SetMterpAltIBase(artMterpAsmAltInstructionStart); 49 self->SetMterpCurrentIBase(TraceExecutionEnabled() ? 50 artMterpAsmAltInstructionStart : 51 artMterpAsmInstructionStart); 52 } 53 54 /* 55 * Find the matching case. Returns the offset to the handler instructions. 56 * 57 * Returns 3 if we don't find a match (it's the size of the sparse-switch 58 * instruction). 59 */ 60 extern "C" int32_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) { 61 const int kInstrLen = 3; 62 uint16_t size; 63 const int32_t* keys; 64 const int32_t* entries; 65 66 /* 67 * Sparse switch data format: 68 * ushort ident = 0x0200 magic value 69 * ushort size number of entries in the table; > 0 70 * int keys[size] keys, sorted low-to-high; 32-bit aligned 71 * int targets[size] branch targets, relative to switch opcode 72 * 73 * Total size is (2+size*4) 16-bit code units. 74 */ 75 76 uint16_t signature = *switchData++; 77 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature)); 78 79 size = *switchData++; 80 81 /* The keys are guaranteed to be aligned on a 32-bit boundary; 82 * we can treat them as a native int array. 83 */ 84 keys = reinterpret_cast<const int32_t*>(switchData); 85 86 /* The entries are guaranteed to be aligned on a 32-bit boundary; 87 * we can treat them as a native int array. 88 */ 89 entries = keys + size; 90 91 /* 92 * Binary-search through the array of keys, which are guaranteed to 93 * be sorted low-to-high. 94 */ 95 int lo = 0; 96 int hi = size - 1; 97 while (lo <= hi) { 98 int mid = (lo + hi) >> 1; 99 100 int32_t foundVal = keys[mid]; 101 if (testVal < foundVal) { 102 hi = mid - 1; 103 } else if (testVal > foundVal) { 104 lo = mid + 1; 105 } else { 106 return entries[mid]; 107 } 108 } 109 return kInstrLen; 110 } 111 112 extern "C" int32_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) { 113 const int kInstrLen = 3; 114 115 /* 116 * Packed switch data format: 117 * ushort ident = 0x0100 magic value 118 * ushort size number of entries in the table 119 * int first_key first (and lowest) switch case value 120 * int targets[size] branch targets, relative to switch opcode 121 * 122 * Total size is (4+size*2) 16-bit code units. 123 */ 124 uint16_t signature = *switchData++; 125 DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature)); 126 127 uint16_t size = *switchData++; 128 129 int32_t firstKey = *switchData++; 130 firstKey |= (*switchData++) << 16; 131 132 int index = testVal - firstKey; 133 if (index < 0 || index >= size) { 134 return kInstrLen; 135 } 136 137 /* 138 * The entries are guaranteed to be aligned on a 32-bit boundary; 139 * we can treat them as a native int array. 140 */ 141 const int32_t* entries = reinterpret_cast<const int32_t*>(switchData); 142 return entries[index]; 143 } 144 145 extern "C" bool MterpShouldSwitchInterpreters() 146 SHARED_REQUIRES(Locks::mutator_lock_) { 147 const instrumentation::Instrumentation* const instrumentation = 148 Runtime::Current()->GetInstrumentation(); 149 return instrumentation->NonJitProfilingActive() || Dbg::IsDebuggerActive(); 150 } 151 152 153 extern "C" bool MterpInvokeVirtual(Thread* self, ShadowFrame* shadow_frame, 154 uint16_t* dex_pc_ptr, uint16_t inst_data ) 155 SHARED_REQUIRES(Locks::mutator_lock_) { 156 JValue* result_register = shadow_frame->GetResultRegister(); 157 const Instruction* inst = Instruction::At(dex_pc_ptr); 158 return DoInvoke<kVirtual, false, false>( 159 self, *shadow_frame, inst, inst_data, result_register); 160 } 161 162 extern "C" bool MterpInvokeSuper(Thread* self, ShadowFrame* shadow_frame, 163 uint16_t* dex_pc_ptr, uint16_t inst_data ) 164 SHARED_REQUIRES(Locks::mutator_lock_) { 165 JValue* result_register = shadow_frame->GetResultRegister(); 166 const Instruction* inst = Instruction::At(dex_pc_ptr); 167 return DoInvoke<kSuper, false, false>( 168 self, *shadow_frame, inst, inst_data, result_register); 169 } 170 171 extern "C" bool MterpInvokeInterface(Thread* self, ShadowFrame* shadow_frame, 172 uint16_t* dex_pc_ptr, uint16_t inst_data ) 173 SHARED_REQUIRES(Locks::mutator_lock_) { 174 JValue* result_register = shadow_frame->GetResultRegister(); 175 const Instruction* inst = Instruction::At(dex_pc_ptr); 176 return DoInvoke<kInterface, false, false>( 177 self, *shadow_frame, inst, inst_data, result_register); 178 } 179 180 extern "C" bool MterpInvokeDirect(Thread* self, ShadowFrame* shadow_frame, 181 uint16_t* dex_pc_ptr, uint16_t inst_data ) 182 SHARED_REQUIRES(Locks::mutator_lock_) { 183 JValue* result_register = shadow_frame->GetResultRegister(); 184 const Instruction* inst = Instruction::At(dex_pc_ptr); 185 return DoInvoke<kDirect, false, false>( 186 self, *shadow_frame, inst, inst_data, result_register); 187 } 188 189 extern "C" bool MterpInvokeStatic(Thread* self, ShadowFrame* shadow_frame, 190 uint16_t* dex_pc_ptr, uint16_t inst_data ) 191 SHARED_REQUIRES(Locks::mutator_lock_) { 192 JValue* result_register = shadow_frame->GetResultRegister(); 193 const Instruction* inst = Instruction::At(dex_pc_ptr); 194 return DoInvoke<kStatic, false, false>( 195 self, *shadow_frame, inst, inst_data, result_register); 196 } 197 198 extern "C" bool MterpInvokeVirtualRange(Thread* self, ShadowFrame* shadow_frame, 199 uint16_t* dex_pc_ptr, uint16_t inst_data ) 200 SHARED_REQUIRES(Locks::mutator_lock_) { 201 JValue* result_register = shadow_frame->GetResultRegister(); 202 const Instruction* inst = Instruction::At(dex_pc_ptr); 203 return DoInvoke<kVirtual, true, false>( 204 self, *shadow_frame, inst, inst_data, result_register); 205 } 206 207 extern "C" bool MterpInvokeSuperRange(Thread* self, ShadowFrame* shadow_frame, 208 uint16_t* dex_pc_ptr, uint16_t inst_data ) 209 SHARED_REQUIRES(Locks::mutator_lock_) { 210 JValue* result_register = shadow_frame->GetResultRegister(); 211 const Instruction* inst = Instruction::At(dex_pc_ptr); 212 return DoInvoke<kSuper, true, false>( 213 self, *shadow_frame, inst, inst_data, result_register); 214 } 215 216 extern "C" bool MterpInvokeInterfaceRange(Thread* self, ShadowFrame* shadow_frame, 217 uint16_t* dex_pc_ptr, uint16_t inst_data ) 218 SHARED_REQUIRES(Locks::mutator_lock_) { 219 JValue* result_register = shadow_frame->GetResultRegister(); 220 const Instruction* inst = Instruction::At(dex_pc_ptr); 221 return DoInvoke<kInterface, true, false>( 222 self, *shadow_frame, inst, inst_data, result_register); 223 } 224 225 extern "C" bool MterpInvokeDirectRange(Thread* self, ShadowFrame* shadow_frame, 226 uint16_t* dex_pc_ptr, uint16_t inst_data ) 227 SHARED_REQUIRES(Locks::mutator_lock_) { 228 JValue* result_register = shadow_frame->GetResultRegister(); 229 const Instruction* inst = Instruction::At(dex_pc_ptr); 230 return DoInvoke<kDirect, true, false>( 231 self, *shadow_frame, inst, inst_data, result_register); 232 } 233 234 extern "C" bool MterpInvokeStaticRange(Thread* self, ShadowFrame* shadow_frame, 235 uint16_t* dex_pc_ptr, uint16_t inst_data ) 236 SHARED_REQUIRES(Locks::mutator_lock_) { 237 JValue* result_register = shadow_frame->GetResultRegister(); 238 const Instruction* inst = Instruction::At(dex_pc_ptr); 239 return DoInvoke<kStatic, true, false>( 240 self, *shadow_frame, inst, inst_data, result_register); 241 } 242 243 extern "C" bool MterpInvokeVirtualQuick(Thread* self, ShadowFrame* shadow_frame, 244 uint16_t* dex_pc_ptr, uint16_t inst_data ) 245 SHARED_REQUIRES(Locks::mutator_lock_) { 246 JValue* result_register = shadow_frame->GetResultRegister(); 247 const Instruction* inst = Instruction::At(dex_pc_ptr); 248 return DoInvokeVirtualQuick<false>( 249 self, *shadow_frame, inst, inst_data, result_register); 250 } 251 252 extern "C" bool MterpInvokeVirtualQuickRange(Thread* self, ShadowFrame* shadow_frame, 253 uint16_t* dex_pc_ptr, uint16_t inst_data ) 254 SHARED_REQUIRES(Locks::mutator_lock_) { 255 JValue* result_register = shadow_frame->GetResultRegister(); 256 const Instruction* inst = Instruction::At(dex_pc_ptr); 257 return DoInvokeVirtualQuick<true>( 258 self, *shadow_frame, inst, inst_data, result_register); 259 } 260 261 extern "C" void MterpThreadFenceForConstructor() { 262 QuasiAtomic::ThreadFenceForConstructor(); 263 } 264 265 extern "C" bool MterpConstString(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame, 266 Thread* self) 267 SHARED_REQUIRES(Locks::mutator_lock_) { 268 String* s = ResolveString(self, *shadow_frame, index); 269 if (UNLIKELY(s == nullptr)) { 270 return true; 271 } 272 shadow_frame->SetVRegReference(tgt_vreg, s); 273 return false; 274 } 275 276 extern "C" bool MterpConstClass(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame, 277 Thread* self) 278 SHARED_REQUIRES(Locks::mutator_lock_) { 279 Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false); 280 if (UNLIKELY(c == nullptr)) { 281 return true; 282 } 283 shadow_frame->SetVRegReference(tgt_vreg, c); 284 return false; 285 } 286 287 extern "C" bool MterpCheckCast(uint32_t index, StackReference<mirror::Object>* vreg_addr, 288 art::ArtMethod* method, Thread* self) 289 SHARED_REQUIRES(Locks::mutator_lock_) { 290 Class* c = ResolveVerifyAndClinit(index, method, self, false, false); 291 if (UNLIKELY(c == nullptr)) { 292 return true; 293 } 294 // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc. 295 Object* obj = vreg_addr->AsMirrorPtr(); 296 if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) { 297 ThrowClassCastException(c, obj->GetClass()); 298 return true; 299 } 300 return false; 301 } 302 303 extern "C" bool MterpInstanceOf(uint32_t index, StackReference<mirror::Object>* vreg_addr, 304 art::ArtMethod* method, Thread* self) 305 SHARED_REQUIRES(Locks::mutator_lock_) { 306 Class* c = ResolveVerifyAndClinit(index, method, self, false, false); 307 if (UNLIKELY(c == nullptr)) { 308 return false; // Caller will check for pending exception. Return value unimportant. 309 } 310 // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc. 311 Object* obj = vreg_addr->AsMirrorPtr(); 312 return (obj != nullptr) && obj->InstanceOf(c); 313 } 314 315 extern "C" bool MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload) 316 SHARED_REQUIRES(Locks::mutator_lock_) { 317 return FillArrayData(obj, payload); 318 } 319 320 extern "C" bool MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data) 321 SHARED_REQUIRES(Locks::mutator_lock_) { 322 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 323 Object* obj = nullptr; 324 Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame->GetMethod(), 325 self, false, false); 326 if (LIKELY(c != nullptr)) { 327 if (UNLIKELY(c->IsStringClass())) { 328 gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator(); 329 mirror::SetStringCountVisitor visitor(0); 330 obj = String::Alloc<true>(self, 0, allocator_type, visitor); 331 } else { 332 obj = AllocObjectFromCode<false, true>( 333 inst->VRegB_21c(), shadow_frame->GetMethod(), self, 334 Runtime::Current()->GetHeap()->GetCurrentAllocator()); 335 } 336 } 337 if (UNLIKELY(obj == nullptr)) { 338 return false; 339 } 340 obj->GetClass()->AssertInitializedOrInitializingInThread(self); 341 shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj); 342 return true; 343 } 344 345 extern "C" bool MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr, 346 uint32_t inst_data, Thread* self) 347 SHARED_REQUIRES(Locks::mutator_lock_) { 348 const Instruction* inst = Instruction::At(dex_pc_ptr); 349 return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false> 350 (self, *shadow_frame, inst, inst_data); 351 } 352 353 extern "C" bool MterpIputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr, 354 uint32_t inst_data, Thread* self) 355 SHARED_REQUIRES(Locks::mutator_lock_) { 356 const Instruction* inst = Instruction::At(dex_pc_ptr); 357 return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false> 358 (self, *shadow_frame, inst, inst_data); 359 } 360 361 extern "C" bool MterpIputObjectQuick(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr, 362 uint32_t inst_data) 363 SHARED_REQUIRES(Locks::mutator_lock_) { 364 const Instruction* inst = Instruction::At(dex_pc_ptr); 365 return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data); 366 } 367 368 extern "C" bool MterpAputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr, 369 uint32_t inst_data) 370 SHARED_REQUIRES(Locks::mutator_lock_) { 371 const Instruction* inst = Instruction::At(dex_pc_ptr); 372 Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x()); 373 if (UNLIKELY(a == nullptr)) { 374 return false; 375 } 376 int32_t index = shadow_frame->GetVReg(inst->VRegC_23x()); 377 Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data)); 378 ObjectArray<Object>* array = a->AsObjectArray<Object>(); 379 if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) { 380 array->SetWithoutChecks<false>(index, val); 381 return true; 382 } 383 return false; 384 } 385 386 extern "C" bool MterpFilledNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr, 387 Thread* self) 388 SHARED_REQUIRES(Locks::mutator_lock_) { 389 const Instruction* inst = Instruction::At(dex_pc_ptr); 390 return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self, 391 shadow_frame->GetResultRegister()); 392 } 393 394 extern "C" bool MterpFilledNewArrayRange(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr, 395 Thread* self) 396 SHARED_REQUIRES(Locks::mutator_lock_) { 397 const Instruction* inst = Instruction::At(dex_pc_ptr); 398 return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self, 399 shadow_frame->GetResultRegister()); 400 } 401 402 extern "C" bool MterpNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr, 403 uint32_t inst_data, Thread* self) 404 SHARED_REQUIRES(Locks::mutator_lock_) { 405 const Instruction* inst = Instruction::At(dex_pc_ptr); 406 int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data)); 407 Object* obj = AllocArrayFromCode<false, true>( 408 inst->VRegC_22c(), length, shadow_frame->GetMethod(), self, 409 Runtime::Current()->GetHeap()->GetCurrentAllocator()); 410 if (UNLIKELY(obj == nullptr)) { 411 return false; 412 } 413 shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj); 414 return true; 415 } 416 417 extern "C" bool MterpHandleException(Thread* self, ShadowFrame* shadow_frame) 418 SHARED_REQUIRES(Locks::mutator_lock_) { 419 DCHECK(self->IsExceptionPending()); 420 const instrumentation::Instrumentation* const instrumentation = 421 Runtime::Current()->GetInstrumentation(); 422 uint32_t found_dex_pc = FindNextInstructionFollowingException(self, *shadow_frame, 423 shadow_frame->GetDexPC(), 424 instrumentation); 425 if (found_dex_pc == DexFile::kDexNoIndex) { 426 return false; 427 } 428 // OK - we can deal with it. Update and continue. 429 shadow_frame->SetDexPC(found_dex_pc); 430 return true; 431 } 432 433 extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame) 434 SHARED_REQUIRES(Locks::mutator_lock_) { 435 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 436 uint16_t inst_data = inst->Fetch16(0); 437 if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) { 438 self->AssertPendingException(); 439 } else { 440 self->AssertNoPendingException(); 441 } 442 TraceExecution(*shadow_frame, inst, shadow_frame->GetDexPC()); 443 } 444 445 extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame) 446 SHARED_REQUIRES(Locks::mutator_lock_) { 447 UNUSED(self); 448 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 449 uint16_t inst_data = inst->Fetch16(0); 450 LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data); 451 } 452 453 extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame) 454 SHARED_REQUIRES(Locks::mutator_lock_) { 455 UNUSED(self); 456 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 457 uint16_t inst_data = inst->Fetch16(0); 458 LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data); 459 } 460 461 extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame) 462 SHARED_REQUIRES(Locks::mutator_lock_) { 463 UNUSED(self); 464 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 465 uint16_t inst_data = inst->Fetch16(0); 466 LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data); 467 } 468 469 extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame) 470 SHARED_REQUIRES(Locks::mutator_lock_) { 471 UNUSED(self); 472 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 473 uint16_t inst_data = inst->Fetch16(0); 474 LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data); 475 } 476 477 extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame) 478 SHARED_REQUIRES(Locks::mutator_lock_) { 479 UNUSED(self); 480 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 481 uint16_t inst_data = inst->Fetch16(0); 482 LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data); 483 } 484 485 extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame) 486 SHARED_REQUIRES(Locks::mutator_lock_) { 487 UNUSED(self); 488 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 489 uint16_t inst_data = inst->Fetch16(0); 490 LOG(INFO) << "NullObject: " << inst->Opcode(inst_data); 491 } 492 493 extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame) 494 SHARED_REQUIRES(Locks::mutator_lock_) { 495 UNUSED(self); 496 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 497 uint16_t inst_data = inst->Fetch16(0); 498 LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: " 499 << self->IsExceptionPending(); 500 } 501 502 extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset) 503 SHARED_REQUIRES(Locks::mutator_lock_) { 504 UNUSED(self); 505 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 506 uint16_t inst_data = inst->Fetch16(0); 507 LOG(INFO) << "OSR: " << inst->Opcode(inst_data) << ", offset = " << offset; 508 } 509 510 extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags) 511 SHARED_REQUIRES(Locks::mutator_lock_) { 512 UNUSED(self); 513 const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr()); 514 uint16_t inst_data = inst->Fetch16(0); 515 if (flags & kCheckpointRequest) { 516 LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data); 517 } else if (flags & kSuspendRequest) { 518 LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data); 519 } 520 } 521 522 extern "C" bool MterpSuspendCheck(Thread* self) 523 SHARED_REQUIRES(Locks::mutator_lock_) { 524 self->AllowThreadSuspension(); 525 return MterpShouldSwitchInterpreters(); 526 } 527 528 extern "C" int artSet64IndirectStaticFromMterp(uint32_t field_idx, ArtMethod* referrer, 529 uint64_t* new_value, Thread* self) 530 SHARED_REQUIRES(Locks::mutator_lock_) { 531 ScopedQuickEntrypointChecks sqec(self); 532 ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); 533 if (LIKELY(field != nullptr)) { 534 // Compiled code can't use transactional mode. 535 field->Set64<false>(field->GetDeclaringClass(), *new_value); 536 return 0; // success 537 } 538 field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t)); 539 if (LIKELY(field != nullptr)) { 540 // Compiled code can't use transactional mode. 541 field->Set64<false>(field->GetDeclaringClass(), *new_value); 542 return 0; // success 543 } 544 return -1; // failure 545 } 546 547 extern "C" int artSet8InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint8_t new_value, 548 ArtMethod* referrer) 549 SHARED_REQUIRES(Locks::mutator_lock_) { 550 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t)); 551 if (LIKELY(field != nullptr && obj != nullptr)) { 552 Primitive::Type type = field->GetTypeAsPrimitiveType(); 553 if (type == Primitive::kPrimBoolean) { 554 field->SetBoolean<false>(obj, new_value); 555 } else { 556 DCHECK_EQ(Primitive::kPrimByte, type); 557 field->SetByte<false>(obj, new_value); 558 } 559 return 0; // success 560 } 561 return -1; // failure 562 } 563 564 extern "C" int artSet16InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint16_t new_value, 565 ArtMethod* referrer) 566 SHARED_REQUIRES(Locks::mutator_lock_) { 567 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, 568 sizeof(int16_t)); 569 if (LIKELY(field != nullptr && obj != nullptr)) { 570 Primitive::Type type = field->GetTypeAsPrimitiveType(); 571 if (type == Primitive::kPrimChar) { 572 field->SetChar<false>(obj, new_value); 573 } else { 574 DCHECK_EQ(Primitive::kPrimShort, type); 575 field->SetShort<false>(obj, new_value); 576 } 577 return 0; // success 578 } 579 return -1; // failure 580 } 581 582 extern "C" int artSet32InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, 583 uint32_t new_value, ArtMethod* referrer) 584 SHARED_REQUIRES(Locks::mutator_lock_) { 585 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, 586 sizeof(int32_t)); 587 if (LIKELY(field != nullptr && obj != nullptr)) { 588 field->Set32<false>(obj, new_value); 589 return 0; // success 590 } 591 return -1; // failure 592 } 593 594 extern "C" int artSet64InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, 595 uint64_t* new_value, ArtMethod* referrer) 596 SHARED_REQUIRES(Locks::mutator_lock_) { 597 ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, 598 sizeof(int64_t)); 599 if (LIKELY(field != nullptr && obj != nullptr)) { 600 field->Set64<false>(obj, *new_value); 601 return 0; // success 602 } 603 return -1; // failure 604 } 605 606 extern "C" int artSetObjInstanceFromMterp(uint32_t field_idx, mirror::Object* obj, 607 mirror::Object* new_value, ArtMethod* referrer) 608 SHARED_REQUIRES(Locks::mutator_lock_) { 609 ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, 610 sizeof(mirror::HeapReference<mirror::Object>)); 611 if (LIKELY(field != nullptr && obj != nullptr)) { 612 field->SetObj<false>(obj, new_value); 613 return 0; // success 614 } 615 return -1; // failure 616 } 617 618 extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index) 619 SHARED_REQUIRES(Locks::mutator_lock_) { 620 if (UNLIKELY(arr == nullptr)) { 621 ThrowNullPointerExceptionFromInterpreter(); 622 return nullptr; 623 } 624 ObjectArray<Object>* array = arr->AsObjectArray<Object>(); 625 if (LIKELY(array->CheckIsValidIndex(index))) { 626 return array->GetWithoutChecks(index); 627 } else { 628 return nullptr; 629 } 630 } 631 632 extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset) 633 SHARED_REQUIRES(Locks::mutator_lock_) { 634 if (UNLIKELY(obj == nullptr)) { 635 ThrowNullPointerExceptionFromInterpreter(); 636 return nullptr; 637 } 638 return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset)); 639 } 640 641 /* 642 * Create a hotness_countdown based on the current method hotness_count and profiling 643 * mode. In short, determine how many hotness events we hit before reporting back 644 * to the full instrumentation via MterpAddHotnessBatch. Called once on entry to the method, 645 * and regenerated following batch updates. 646 */ 647 extern "C" int MterpSetUpHotnessCountdown(ArtMethod* method, ShadowFrame* shadow_frame) 648 SHARED_REQUIRES(Locks::mutator_lock_) { 649 uint16_t hotness_count = method->GetCounter(); 650 int32_t countdown_value = jit::kJitHotnessDisabled; 651 jit::Jit* jit = Runtime::Current()->GetJit(); 652 if (jit != nullptr) { 653 int32_t warm_threshold = jit->WarmMethodThreshold(); 654 int32_t hot_threshold = jit->HotMethodThreshold(); 655 int32_t osr_threshold = jit->OSRMethodThreshold(); 656 if (hotness_count < warm_threshold) { 657 countdown_value = warm_threshold - hotness_count; 658 } else if (hotness_count < hot_threshold) { 659 countdown_value = hot_threshold - hotness_count; 660 } else if (hotness_count < osr_threshold) { 661 countdown_value = osr_threshold - hotness_count; 662 } else { 663 countdown_value = jit::kJitCheckForOSR; 664 } 665 if (jit::Jit::ShouldUsePriorityThreadWeight()) { 666 int32_t priority_thread_weight = jit->PriorityThreadWeight(); 667 countdown_value = std::min(countdown_value, countdown_value / priority_thread_weight); 668 } 669 } 670 /* 671 * The actual hotness threshold may exceed the range of our int16_t countdown value. This is 672 * not a problem, though. We can just break it down into smaller chunks. 673 */ 674 countdown_value = std::min(countdown_value, 675 static_cast<int32_t>(std::numeric_limits<int16_t>::max())); 676 shadow_frame->SetCachedHotnessCountdown(countdown_value); 677 shadow_frame->SetHotnessCountdown(countdown_value); 678 return countdown_value; 679 } 680 681 /* 682 * Report a batch of hotness events to the instrumentation and then return the new 683 * countdown value to the next time we should report. 684 */ 685 extern "C" int16_t MterpAddHotnessBatch(ArtMethod* method, 686 ShadowFrame* shadow_frame, 687 Thread* self) 688 SHARED_REQUIRES(Locks::mutator_lock_) { 689 jit::Jit* jit = Runtime::Current()->GetJit(); 690 if (jit != nullptr) { 691 int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown(); 692 jit->AddSamples(self, method, count, /*with_backedges*/ true); 693 } 694 return MterpSetUpHotnessCountdown(method, shadow_frame); 695 } 696 697 // TUNING: Unused by arm/arm64/x86/x86_64. Remove when mips/mips64 mterps support batch updates. 698 extern "C" bool MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int32_t offset) 699 SHARED_REQUIRES(Locks::mutator_lock_) { 700 ArtMethod* method = shadow_frame->GetMethod(); 701 JValue* result = shadow_frame->GetResultRegister(); 702 uint32_t dex_pc = shadow_frame->GetDexPC(); 703 jit::Jit* jit = Runtime::Current()->GetJit(); 704 if ((jit != nullptr) && (offset <= 0)) { 705 jit->AddSamples(self, method, 1, /*with_backedges*/ true); 706 } 707 int16_t countdown_value = MterpSetUpHotnessCountdown(method, shadow_frame); 708 if (countdown_value == jit::kJitCheckForOSR) { 709 return jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result); 710 } else { 711 return false; 712 } 713 } 714 715 extern "C" bool MterpMaybeDoOnStackReplacement(Thread* self, 716 ShadowFrame* shadow_frame, 717 int32_t offset) 718 SHARED_REQUIRES(Locks::mutator_lock_) { 719 ArtMethod* method = shadow_frame->GetMethod(); 720 JValue* result = shadow_frame->GetResultRegister(); 721 uint32_t dex_pc = shadow_frame->GetDexPC(); 722 jit::Jit* jit = Runtime::Current()->GetJit(); 723 if (offset <= 0) { 724 // Keep updating hotness in case a compilation request was dropped. Eventually it will retry. 725 jit->AddSamples(self, method, 1, /*with_backedges*/ true); 726 } 727 // Assumes caller has already determined that an OSR check is appropriate. 728 return jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result); 729 } 730 731 } // namespace interpreter 732 } // namespace art 733