1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "thread.h" 18 19 #if !defined(__APPLE__) 20 #include <sched.h> 21 #endif 22 23 #include <pthread.h> 24 #include <signal.h> 25 #include <sys/resource.h> 26 #include <sys/time.h> 27 28 #include <algorithm> 29 #include <bitset> 30 #include <cerrno> 31 #include <iostream> 32 #include <list> 33 #include <sstream> 34 35 #include "android-base/stringprintf.h" 36 37 #include "arch/context-inl.h" 38 #include "arch/context.h" 39 #include "art_field-inl.h" 40 #include "art_method-inl.h" 41 #include "base/bit_utils.h" 42 #include "base/memory_tool.h" 43 #include "base/mutex.h" 44 #include "base/systrace.h" 45 #include "base/timing_logger.h" 46 #include "base/to_str.h" 47 #include "class_linker-inl.h" 48 #include "debugger.h" 49 #include "dex_file-inl.h" 50 #include "dex_file_annotations.h" 51 #include "entrypoints/entrypoint_utils.h" 52 #include "entrypoints/quick/quick_alloc_entrypoints.h" 53 #include "gc/accounting/card_table-inl.h" 54 #include "gc/accounting/heap_bitmap-inl.h" 55 #include "gc/allocator/rosalloc.h" 56 #include "gc/heap.h" 57 #include "gc/space/space-inl.h" 58 #include "gc_root.h" 59 #include "handle_scope-inl.h" 60 #include "indirect_reference_table-inl.h" 61 #include "interpreter/interpreter.h" 62 #include "interpreter/shadow_frame.h" 63 #include "java_frame_root_info.h" 64 #include "java_vm_ext.h" 65 #include "jni_internal.h" 66 #include "mirror/class-inl.h" 67 #include "mirror/class_loader.h" 68 #include "mirror/object_array-inl.h" 69 #include "mirror/stack_trace_element.h" 70 #include "monitor.h" 71 #include "native_stack_dump.h" 72 #include "nativehelper/ScopedLocalRef.h" 73 #include "nativehelper/ScopedUtfChars.h" 74 #include "nth_caller_visitor.h" 75 #include "oat_quick_method_header.h" 76 #include "obj_ptr-inl.h" 77 #include "object_lock.h" 78 #include "quick/quick_method_frame_info.h" 79 #include "quick_exception_handler.h" 80 #include "read_barrier-inl.h" 81 #include "reflection.h" 82 #include "runtime.h" 83 #include "runtime_callbacks.h" 84 #include "scoped_thread_state_change-inl.h" 85 #include "stack.h" 86 #include "stack_map.h" 87 #include "thread-inl.h" 88 #include "thread_list.h" 89 #include "utils.h" 90 #include "verifier/method_verifier.h" 91 #include "verify_object.h" 92 #include "well_known_classes.h" 93 94 #if ART_USE_FUTEXES 95 #include "linux/futex.h" 96 #include "sys/syscall.h" 97 #ifndef SYS_futex 98 #define SYS_futex __NR_futex 99 #endif 100 #endif // ART_USE_FUTEXES 101 102 namespace art { 103 104 using android::base::StringAppendV; 105 using android::base::StringPrintf; 106 107 extern "C" NO_RETURN void artDeoptimize(Thread* self); 108 109 bool Thread::is_started_ = false; 110 pthread_key_t Thread::pthread_key_self_; 111 ConditionVariable* Thread::resume_cond_ = nullptr; 112 const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); 113 bool (*Thread::is_sensitive_thread_hook_)() = nullptr; 114 Thread* Thread::jit_sensitive_thread_ = nullptr; 115 116 static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild; 117 118 // For implicit overflow checks we reserve an extra piece of memory at the bottom 119 // of the stack (lowest memory). The higher portion of the memory 120 // is protected against reads and the lower is available for use while 121 // throwing the StackOverflow exception. 122 constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB; 123 124 static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; 125 126 void Thread::InitCardTable() { 127 tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); 128 } 129 130 static void UnimplementedEntryPoint() { 131 UNIMPLEMENTED(FATAL); 132 } 133 134 void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints); 135 void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active); 136 137 void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { 138 CHECK(kUseReadBarrier); 139 tls32_.is_gc_marking = is_marking; 140 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active */ is_marking); 141 ResetQuickAllocEntryPointsForThread(is_marking); 142 } 143 144 void Thread::InitTlsEntryPoints() { 145 // Insert a placeholder so we can easily tell if we call an unimplemented entry point. 146 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints); 147 uintptr_t* end = reinterpret_cast<uintptr_t*>( 148 reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints)); 149 for (uintptr_t* it = begin; it != end; ++it) { 150 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 151 } 152 InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints); 153 } 154 155 void Thread::ResetQuickAllocEntryPointsForThread(bool is_marking) { 156 if (kUseReadBarrier && kRuntimeISA != kX86_64) { 157 // Allocation entrypoint switching is currently only implemented for X86_64. 158 is_marking = true; 159 } 160 ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints, is_marking); 161 } 162 163 class DeoptimizationContextRecord { 164 public: 165 DeoptimizationContextRecord(const JValue& ret_val, 166 bool is_reference, 167 bool from_code, 168 ObjPtr<mirror::Throwable> pending_exception, 169 DeoptimizationContextRecord* link) 170 : ret_val_(ret_val), 171 is_reference_(is_reference), 172 from_code_(from_code), 173 pending_exception_(pending_exception.Ptr()), 174 link_(link) {} 175 176 JValue GetReturnValue() const { return ret_val_; } 177 bool IsReference() const { return is_reference_; } 178 bool GetFromCode() const { return from_code_; } 179 ObjPtr<mirror::Throwable> GetPendingException() const { return pending_exception_; } 180 DeoptimizationContextRecord* GetLink() const { return link_; } 181 mirror::Object** GetReturnValueAsGCRoot() { 182 DCHECK(is_reference_); 183 return ret_val_.GetGCRoot(); 184 } 185 mirror::Object** GetPendingExceptionAsGCRoot() { 186 return reinterpret_cast<mirror::Object**>(&pending_exception_); 187 } 188 189 private: 190 // The value returned by the method at the top of the stack before deoptimization. 191 JValue ret_val_; 192 193 // Indicates whether the returned value is a reference. If so, the GC will visit it. 194 const bool is_reference_; 195 196 // Whether the context was created from an explicit deoptimization in the code. 197 const bool from_code_; 198 199 // The exception that was pending before deoptimization (or null if there was no pending 200 // exception). 201 mirror::Throwable* pending_exception_; 202 203 // A link to the previous DeoptimizationContextRecord. 204 DeoptimizationContextRecord* const link_; 205 206 DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord); 207 }; 208 209 class StackedShadowFrameRecord { 210 public: 211 StackedShadowFrameRecord(ShadowFrame* shadow_frame, 212 StackedShadowFrameType type, 213 StackedShadowFrameRecord* link) 214 : shadow_frame_(shadow_frame), 215 type_(type), 216 link_(link) {} 217 218 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 219 StackedShadowFrameType GetType() const { return type_; } 220 StackedShadowFrameRecord* GetLink() const { return link_; } 221 222 private: 223 ShadowFrame* const shadow_frame_; 224 const StackedShadowFrameType type_; 225 StackedShadowFrameRecord* const link_; 226 227 DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord); 228 }; 229 230 void Thread::PushDeoptimizationContext(const JValue& return_value, 231 bool is_reference, 232 bool from_code, 233 ObjPtr<mirror::Throwable> exception) { 234 DeoptimizationContextRecord* record = new DeoptimizationContextRecord( 235 return_value, 236 is_reference, 237 from_code, 238 exception, 239 tlsPtr_.deoptimization_context_stack); 240 tlsPtr_.deoptimization_context_stack = record; 241 } 242 243 void Thread::PopDeoptimizationContext(JValue* result, 244 ObjPtr<mirror::Throwable>* exception, 245 bool* from_code) { 246 AssertHasDeoptimizationContext(); 247 DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 248 tlsPtr_.deoptimization_context_stack = record->GetLink(); 249 result->SetJ(record->GetReturnValue().GetJ()); 250 *exception = record->GetPendingException(); 251 *from_code = record->GetFromCode(); 252 delete record; 253 } 254 255 void Thread::AssertHasDeoptimizationContext() { 256 CHECK(tlsPtr_.deoptimization_context_stack != nullptr) 257 << "No deoptimization context for thread " << *this; 258 } 259 260 void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) { 261 StackedShadowFrameRecord* record = new StackedShadowFrameRecord( 262 sf, type, tlsPtr_.stacked_shadow_frame_record); 263 tlsPtr_.stacked_shadow_frame_record = record; 264 } 265 266 ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present) { 267 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 268 if (must_be_present) { 269 DCHECK(record != nullptr); 270 } else { 271 if (record == nullptr || record->GetType() != type) { 272 return nullptr; 273 } 274 } 275 tlsPtr_.stacked_shadow_frame_record = record->GetLink(); 276 ShadowFrame* shadow_frame = record->GetShadowFrame(); 277 delete record; 278 return shadow_frame; 279 } 280 281 class FrameIdToShadowFrame { 282 public: 283 static FrameIdToShadowFrame* Create(size_t frame_id, 284 ShadowFrame* shadow_frame, 285 FrameIdToShadowFrame* next, 286 size_t num_vregs) { 287 // Append a bool array at the end to keep track of what vregs are updated by the debugger. 288 uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs]; 289 return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next); 290 } 291 292 static void Delete(FrameIdToShadowFrame* f) { 293 uint8_t* memory = reinterpret_cast<uint8_t*>(f); 294 delete[] memory; 295 } 296 297 size_t GetFrameId() const { return frame_id_; } 298 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 299 FrameIdToShadowFrame* GetNext() const { return next_; } 300 void SetNext(FrameIdToShadowFrame* next) { next_ = next; } 301 bool* GetUpdatedVRegFlags() { 302 return updated_vreg_flags_; 303 } 304 305 private: 306 FrameIdToShadowFrame(size_t frame_id, 307 ShadowFrame* shadow_frame, 308 FrameIdToShadowFrame* next) 309 : frame_id_(frame_id), 310 shadow_frame_(shadow_frame), 311 next_(next) {} 312 313 const size_t frame_id_; 314 ShadowFrame* const shadow_frame_; 315 FrameIdToShadowFrame* next_; 316 bool updated_vreg_flags_[0]; 317 318 DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame); 319 }; 320 321 static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head, 322 size_t frame_id) { 323 FrameIdToShadowFrame* found = nullptr; 324 for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) { 325 if (record->GetFrameId() == frame_id) { 326 if (kIsDebugBuild) { 327 // Sanity check we have at most one record for this frame. 328 CHECK(found == nullptr) << "Multiple records for the frame " << frame_id; 329 found = record; 330 } else { 331 return record; 332 } 333 } 334 } 335 return found; 336 } 337 338 ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) { 339 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 340 tlsPtr_.frame_id_to_shadow_frame, frame_id); 341 if (record != nullptr) { 342 return record->GetShadowFrame(); 343 } 344 return nullptr; 345 } 346 347 // Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr. 348 bool* Thread::GetUpdatedVRegFlags(size_t frame_id) { 349 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 350 tlsPtr_.frame_id_to_shadow_frame, frame_id); 351 CHECK(record != nullptr); 352 return record->GetUpdatedVRegFlags(); 353 } 354 355 ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id, 356 uint32_t num_vregs, 357 ArtMethod* method, 358 uint32_t dex_pc) { 359 ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id); 360 if (shadow_frame != nullptr) { 361 return shadow_frame; 362 } 363 VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method); 364 shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, nullptr, method, dex_pc); 365 FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id, 366 shadow_frame, 367 tlsPtr_.frame_id_to_shadow_frame, 368 num_vregs); 369 for (uint32_t i = 0; i < num_vregs; i++) { 370 // Do this to clear all references for root visitors. 371 shadow_frame->SetVRegReference(i, nullptr); 372 // This flag will be changed to true if the debugger modifies the value. 373 record->GetUpdatedVRegFlags()[i] = false; 374 } 375 tlsPtr_.frame_id_to_shadow_frame = record; 376 return shadow_frame; 377 } 378 379 void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) { 380 FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame; 381 if (head->GetFrameId() == frame_id) { 382 tlsPtr_.frame_id_to_shadow_frame = head->GetNext(); 383 FrameIdToShadowFrame::Delete(head); 384 return; 385 } 386 FrameIdToShadowFrame* prev = head; 387 for (FrameIdToShadowFrame* record = head->GetNext(); 388 record != nullptr; 389 prev = record, record = record->GetNext()) { 390 if (record->GetFrameId() == frame_id) { 391 prev->SetNext(record->GetNext()); 392 FrameIdToShadowFrame::Delete(record); 393 return; 394 } 395 } 396 LOG(FATAL) << "No shadow frame for frame " << frame_id; 397 UNREACHABLE(); 398 } 399 400 void Thread::InitTid() { 401 tls32_.tid = ::art::GetTid(); 402 } 403 404 void Thread::InitAfterFork() { 405 // One thread (us) survived the fork, but we have a new tid so we need to 406 // update the value stashed in this Thread*. 407 InitTid(); 408 } 409 410 void* Thread::CreateCallback(void* arg) { 411 Thread* self = reinterpret_cast<Thread*>(arg); 412 Runtime* runtime = Runtime::Current(); 413 if (runtime == nullptr) { 414 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self; 415 return nullptr; 416 } 417 { 418 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true 419 // after self->Init(). 420 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 421 // Check that if we got here we cannot be shutting down (as shutdown should never have started 422 // while threads are being born). 423 CHECK(!runtime->IsShuttingDownLocked()); 424 // Note: given that the JNIEnv is created in the parent thread, the only failure point here is 425 // a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort 426 // the runtime in such a case. In case this ever changes, we need to make sure here to 427 // delete the tmp_jni_env, as we own it at this point. 428 CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env)); 429 self->tlsPtr_.tmp_jni_env = nullptr; 430 Runtime::Current()->EndThreadBirth(); 431 } 432 { 433 ScopedObjectAccess soa(self); 434 self->InitStringEntryPoints(); 435 436 // Copy peer into self, deleting global reference when done. 437 CHECK(self->tlsPtr_.jpeer != nullptr); 438 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr(); 439 self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer); 440 self->tlsPtr_.jpeer = nullptr; 441 self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str()); 442 443 ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority); 444 self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer)); 445 446 runtime->GetRuntimeCallbacks()->ThreadStart(self); 447 448 // Invoke the 'run' method of our java.lang.Thread. 449 ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer; 450 jmethodID mid = WellKnownClasses::java_lang_Thread_run; 451 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); 452 InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr); 453 } 454 // Detach and delete self. 455 Runtime::Current()->GetThreadList()->Unregister(self); 456 457 return nullptr; 458 } 459 460 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 461 ObjPtr<mirror::Object> thread_peer) { 462 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer); 463 Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer))); 464 // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ 465 // to stop it from going away. 466 if (kIsDebugBuild) { 467 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 468 if (result != nullptr && !result->IsSuspended()) { 469 Locks::thread_list_lock_->AssertHeld(soa.Self()); 470 } 471 } 472 return result; 473 } 474 475 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 476 jobject java_thread) { 477 return FromManagedThread(soa, soa.Decode<mirror::Object>(java_thread).Ptr()); 478 } 479 480 static size_t FixStackSize(size_t stack_size) { 481 // A stack size of zero means "use the default". 482 if (stack_size == 0) { 483 stack_size = Runtime::Current()->GetDefaultStackSize(); 484 } 485 486 // Dalvik used the bionic pthread default stack size for native threads, 487 // so include that here to support apps that expect large native stacks. 488 stack_size += 1 * MB; 489 490 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. 491 if (stack_size < PTHREAD_STACK_MIN) { 492 stack_size = PTHREAD_STACK_MIN; 493 } 494 495 if (Runtime::Current()->ExplicitStackOverflowChecks()) { 496 // It's likely that callers are trying to ensure they have at least a certain amount of 497 // stack space, so we should add our reserved space on top of what they requested, rather 498 // than implicitly take it away from them. 499 stack_size += GetStackOverflowReservedBytes(kRuntimeISA); 500 } else { 501 // If we are going to use implicit stack checks, allocate space for the protected 502 // region at the bottom of the stack. 503 stack_size += Thread::kStackOverflowImplicitCheckSize + 504 GetStackOverflowReservedBytes(kRuntimeISA); 505 } 506 507 // Some systems require the stack size to be a multiple of the system page size, so round up. 508 stack_size = RoundUp(stack_size, kPageSize); 509 510 return stack_size; 511 } 512 513 // Return the nearest page-aligned address below the current stack top. 514 NO_INLINE 515 static uint8_t* FindStackTop() { 516 return reinterpret_cast<uint8_t*>( 517 AlignDown(__builtin_frame_address(0), kPageSize)); 518 } 519 520 // Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack 521 // overflow is detected. It is located right below the stack_begin_. 522 ATTRIBUTE_NO_SANITIZE_ADDRESS 523 void Thread::InstallImplicitProtection() { 524 uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 525 // Page containing current top of stack. 526 uint8_t* stack_top = FindStackTop(); 527 528 // Try to directly protect the stack. 529 VLOG(threads) << "installing stack protected region at " << std::hex << 530 static_cast<void*>(pregion) << " to " << 531 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 532 if (ProtectStack(/* fatal_on_error */ false)) { 533 // Tell the kernel that we won't be needing these pages any more. 534 // NB. madvise will probably write zeroes into the memory (on linux it does). 535 uint32_t unwanted_size = stack_top - pregion - kPageSize; 536 madvise(pregion, unwanted_size, MADV_DONTNEED); 537 return; 538 } 539 540 // There is a little complexity here that deserves a special mention. On some 541 // architectures, the stack is created using a VM_GROWSDOWN flag 542 // to prevent memory being allocated when it's not needed. This flag makes the 543 // kernel only allocate memory for the stack by growing down in memory. Because we 544 // want to put an mprotected region far away from that at the stack top, we need 545 // to make sure the pages for the stack are mapped in before we call mprotect. 546 // 547 // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN 548 // with a non-mapped stack (usually only the main thread). 549 // 550 // We map in the stack by reading every page from the stack bottom (highest address) 551 // to the stack top. (We then madvise this away.) This must be done by reading from the 552 // current stack pointer downwards. 553 // 554 // Accesses too far below the current machine register corresponding to the stack pointer (e.g., 555 // ESP on x86[-32], SP on ARM) might cause a SIGSEGV (at least on x86 with newer kernels). We 556 // thus have to move the stack pointer. We do this portably by using a recursive function with a 557 // large stack frame size. 558 559 // (Defensively) first remove the protection on the protected region as we'll want to read 560 // and write it. Ignore errors. 561 UnprotectStack(); 562 563 VLOG(threads) << "Need to map in stack for thread at " << std::hex << 564 static_cast<void*>(pregion); 565 566 struct RecurseDownStack { 567 // This function has an intentionally large stack size. 568 #pragma GCC diagnostic push 569 #pragma GCC diagnostic ignored "-Wframe-larger-than=" 570 NO_INLINE 571 static void Touch(uintptr_t target) { 572 volatile size_t zero = 0; 573 // Use a large local volatile array to ensure a large frame size. Do not use anything close 574 // to a full page for ASAN. It would be nice to ensure the frame size is at most a page, but 575 // there is no pragma support for this. 576 // Note: for ASAN we need to shrink the array a bit, as there's other overhead. 577 constexpr size_t kAsanMultiplier = 578 #ifdef ADDRESS_SANITIZER 579 2u; 580 #else 581 1u; 582 #endif 583 volatile char space[kPageSize - (kAsanMultiplier * 256)]; 584 char sink ATTRIBUTE_UNUSED = space[zero]; 585 if (reinterpret_cast<uintptr_t>(space) >= target + kPageSize) { 586 Touch(target); 587 } 588 zero *= 2; // Try to avoid tail recursion. 589 } 590 #pragma GCC diagnostic pop 591 }; 592 RecurseDownStack::Touch(reinterpret_cast<uintptr_t>(pregion)); 593 594 VLOG(threads) << "(again) installing stack protected region at " << std::hex << 595 static_cast<void*>(pregion) << " to " << 596 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 597 598 // Protect the bottom of the stack to prevent read/write to it. 599 ProtectStack(/* fatal_on_error */ true); 600 601 // Tell the kernel that we won't be needing these pages any more. 602 // NB. madvise will probably write zeroes into the memory (on linux it does). 603 uint32_t unwanted_size = stack_top - pregion - kPageSize; 604 madvise(pregion, unwanted_size, MADV_DONTNEED); 605 } 606 607 void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) { 608 CHECK(java_peer != nullptr); 609 Thread* self = static_cast<JNIEnvExt*>(env)->self; 610 611 if (VLOG_IS_ON(threads)) { 612 ScopedObjectAccess soa(env); 613 614 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 615 ObjPtr<mirror::String> java_name = 616 f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString(); 617 std::string thread_name; 618 if (java_name != nullptr) { 619 thread_name = java_name->ToModifiedUtf8(); 620 } else { 621 thread_name = "(Unnamed)"; 622 } 623 624 VLOG(threads) << "Creating native thread for " << thread_name; 625 self->Dump(LOG_STREAM(INFO)); 626 } 627 628 Runtime* runtime = Runtime::Current(); 629 630 // Atomically start the birth of the thread ensuring the runtime isn't shutting down. 631 bool thread_start_during_shutdown = false; 632 { 633 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 634 if (runtime->IsShuttingDownLocked()) { 635 thread_start_during_shutdown = true; 636 } else { 637 runtime->StartThreadBirth(); 638 } 639 } 640 if (thread_start_during_shutdown) { 641 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError")); 642 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown"); 643 return; 644 } 645 646 Thread* child_thread = new Thread(is_daemon); 647 // Use global JNI ref to hold peer live while child thread starts. 648 child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer); 649 stack_size = FixStackSize(stack_size); 650 651 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to 652 // assign it. 653 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 654 reinterpret_cast<jlong>(child_thread)); 655 656 // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and 657 // do not have a good way to report this on the child's side. 658 std::string error_msg; 659 std::unique_ptr<JNIEnvExt> child_jni_env_ext( 660 JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg)); 661 662 int pthread_create_result = 0; 663 if (child_jni_env_ext.get() != nullptr) { 664 pthread_t new_pthread; 665 pthread_attr_t attr; 666 child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get(); 667 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); 668 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), 669 "PTHREAD_CREATE_DETACHED"); 670 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); 671 pthread_create_result = pthread_create(&new_pthread, 672 &attr, 673 Thread::CreateCallback, 674 child_thread); 675 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); 676 677 if (pthread_create_result == 0) { 678 // pthread_create started the new thread. The child is now responsible for managing the 679 // JNIEnvExt we created. 680 // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization 681 // between the threads. 682 child_jni_env_ext.release(); 683 return; 684 } 685 } 686 687 // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up. 688 { 689 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 690 runtime->EndThreadBirth(); 691 } 692 // Manually delete the global reference since Thread::Init will not have been run. 693 env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer); 694 child_thread->tlsPtr_.jpeer = nullptr; 695 delete child_thread; 696 child_thread = nullptr; 697 // TODO: remove from thread group? 698 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0); 699 { 700 std::string msg(child_jni_env_ext.get() == nullptr ? 701 StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) : 702 StringPrintf("pthread_create (%s stack) failed: %s", 703 PrettySize(stack_size).c_str(), strerror(pthread_create_result))); 704 ScopedObjectAccess soa(env); 705 soa.Self()->ThrowOutOfMemoryError(msg.c_str()); 706 } 707 } 708 709 bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) { 710 // This function does all the initialization that must be run by the native thread it applies to. 711 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so 712 // we can handshake with the corresponding native thread when it's ready.) Check this native 713 // thread hasn't been through here already... 714 CHECK(Thread::Current() == nullptr); 715 716 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this 717 // avoids pthread_self_ ever being invalid when discovered from Thread::Current(). 718 tlsPtr_.pthread_self = pthread_self(); 719 CHECK(is_started_); 720 721 SetUpAlternateSignalStack(); 722 if (!InitStackHwm()) { 723 return false; 724 } 725 InitCpu(); 726 InitTlsEntryPoints(); 727 RemoveSuspendTrigger(); 728 InitCardTable(); 729 InitTid(); 730 interpreter::InitInterpreterTls(this); 731 732 #ifdef ART_TARGET_ANDROID 733 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this; 734 #else 735 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); 736 #endif 737 DCHECK_EQ(Thread::Current(), this); 738 739 tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this); 740 741 if (jni_env_ext != nullptr) { 742 DCHECK_EQ(jni_env_ext->vm, java_vm); 743 DCHECK_EQ(jni_env_ext->self, this); 744 tlsPtr_.jni_env = jni_env_ext; 745 } else { 746 std::string error_msg; 747 tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg); 748 if (tlsPtr_.jni_env == nullptr) { 749 LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg; 750 return false; 751 } 752 } 753 754 thread_list->Register(this); 755 return true; 756 } 757 758 template <typename PeerAction> 759 Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) { 760 Runtime* runtime = Runtime::Current(); 761 if (runtime == nullptr) { 762 LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name; 763 return nullptr; 764 } 765 Thread* self; 766 { 767 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 768 if (runtime->IsShuttingDownLocked()) { 769 LOG(WARNING) << "Thread attaching while runtime is shutting down: " << thread_name; 770 return nullptr; 771 } else { 772 Runtime::Current()->StartThreadBirth(); 773 self = new Thread(as_daemon); 774 bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); 775 Runtime::Current()->EndThreadBirth(); 776 if (!init_success) { 777 delete self; 778 return nullptr; 779 } 780 } 781 } 782 783 self->InitStringEntryPoints(); 784 785 CHECK_NE(self->GetState(), kRunnable); 786 self->SetState(kNative); 787 788 // Run the action that is acting on the peer. 789 if (!peer_action(self)) { 790 runtime->GetThreadList()->Unregister(self); 791 // Unregister deletes self, no need to do this here. 792 return nullptr; 793 } 794 795 if (VLOG_IS_ON(threads)) { 796 if (thread_name != nullptr) { 797 VLOG(threads) << "Attaching thread " << thread_name; 798 } else { 799 VLOG(threads) << "Attaching unnamed thread."; 800 } 801 ScopedObjectAccess soa(self); 802 self->Dump(LOG_STREAM(INFO)); 803 } 804 805 { 806 ScopedObjectAccess soa(self); 807 runtime->GetRuntimeCallbacks()->ThreadStart(self); 808 } 809 810 return self; 811 } 812 813 Thread* Thread::Attach(const char* thread_name, 814 bool as_daemon, 815 jobject thread_group, 816 bool create_peer) { 817 auto create_peer_action = [&](Thread* self) { 818 // If we're the main thread, ClassLinker won't be created until after we're attached, 819 // so that thread needs a two-stage attach. Regular threads don't need this hack. 820 // In the compiler, all threads need this hack, because no-one's going to be getting 821 // a native peer! 822 if (create_peer) { 823 self->CreatePeer(thread_name, as_daemon, thread_group); 824 if (self->IsExceptionPending()) { 825 // We cannot keep the exception around, as we're deleting self. Try to be helpful and log it. 826 { 827 ScopedObjectAccess soa(self); 828 LOG(ERROR) << "Exception creating thread peer:"; 829 LOG(ERROR) << self->GetException()->Dump(); 830 self->ClearException(); 831 } 832 return false; 833 } 834 } else { 835 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. 836 if (thread_name != nullptr) { 837 self->tlsPtr_.name->assign(thread_name); 838 ::art::SetThreadName(thread_name); 839 } else if (self->GetJniEnv()->check_jni) { 840 LOG(WARNING) << *Thread::Current() << " attached without supplying a name"; 841 } 842 } 843 return true; 844 }; 845 return Attach(thread_name, as_daemon, create_peer_action); 846 } 847 848 Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) { 849 auto set_peer_action = [&](Thread* self) { 850 // Install the given peer. 851 { 852 DCHECK(self == Thread::Current()); 853 ScopedObjectAccess soa(self); 854 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(thread_peer).Ptr(); 855 } 856 self->GetJniEnv()->SetLongField(thread_peer, 857 WellKnownClasses::java_lang_Thread_nativePeer, 858 reinterpret_cast<jlong>(self)); 859 return true; 860 }; 861 return Attach(thread_name, as_daemon, set_peer_action); 862 } 863 864 void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) { 865 Runtime* runtime = Runtime::Current(); 866 CHECK(runtime->IsStarted()); 867 JNIEnv* env = tlsPtr_.jni_env; 868 869 if (thread_group == nullptr) { 870 thread_group = runtime->GetMainThreadGroup(); 871 } 872 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 873 // Add missing null check in case of OOM b/18297817 874 if (name != nullptr && thread_name.get() == nullptr) { 875 CHECK(IsExceptionPending()); 876 return; 877 } 878 jint thread_priority = GetNativePriority(); 879 jboolean thread_is_daemon = as_daemon; 880 881 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 882 if (peer.get() == nullptr) { 883 CHECK(IsExceptionPending()); 884 return; 885 } 886 { 887 ScopedObjectAccess soa(this); 888 tlsPtr_.opeer = soa.Decode<mirror::Object>(peer.get()).Ptr(); 889 } 890 env->CallNonvirtualVoidMethod(peer.get(), 891 WellKnownClasses::java_lang_Thread, 892 WellKnownClasses::java_lang_Thread_init, 893 thread_group, thread_name.get(), thread_priority, thread_is_daemon); 894 if (IsExceptionPending()) { 895 return; 896 } 897 898 Thread* self = this; 899 DCHECK_EQ(self, Thread::Current()); 900 env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer, 901 reinterpret_cast<jlong>(self)); 902 903 ScopedObjectAccess soa(self); 904 StackHandleScope<1> hs(self); 905 MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName())); 906 if (peer_thread_name == nullptr) { 907 // The Thread constructor should have set the Thread.name to a 908 // non-null value. However, because we can run without code 909 // available (in the compiler, in tests), we manually assign the 910 // fields the constructor should have set. 911 if (runtime->IsActiveTransaction()) { 912 InitPeer<true>(soa, 913 tlsPtr_.opeer, 914 thread_is_daemon, 915 thread_group, 916 thread_name.get(), 917 thread_priority); 918 } else { 919 InitPeer<false>(soa, 920 tlsPtr_.opeer, 921 thread_is_daemon, 922 thread_group, 923 thread_name.get(), 924 thread_priority); 925 } 926 peer_thread_name.Assign(GetThreadName()); 927 } 928 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. 929 if (peer_thread_name != nullptr) { 930 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); 931 } 932 } 933 934 jobject Thread::CreateCompileTimePeer(JNIEnv* env, 935 const char* name, 936 bool as_daemon, 937 jobject thread_group) { 938 Runtime* runtime = Runtime::Current(); 939 CHECK(!runtime->IsStarted()); 940 941 if (thread_group == nullptr) { 942 thread_group = runtime->GetMainThreadGroup(); 943 } 944 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 945 // Add missing null check in case of OOM b/18297817 946 if (name != nullptr && thread_name.get() == nullptr) { 947 CHECK(Thread::Current()->IsExceptionPending()); 948 return nullptr; 949 } 950 jint thread_priority = GetNativePriority(); 951 jboolean thread_is_daemon = as_daemon; 952 953 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 954 if (peer.get() == nullptr) { 955 CHECK(Thread::Current()->IsExceptionPending()); 956 return nullptr; 957 } 958 959 // We cannot call Thread.init, as it will recursively ask for currentThread. 960 961 // The Thread constructor should have set the Thread.name to a 962 // non-null value. However, because we can run without code 963 // available (in the compiler, in tests), we manually assign the 964 // fields the constructor should have set. 965 ScopedObjectAccessUnchecked soa(Thread::Current()); 966 if (runtime->IsActiveTransaction()) { 967 InitPeer<true>(soa, 968 soa.Decode<mirror::Object>(peer.get()), 969 thread_is_daemon, 970 thread_group, 971 thread_name.get(), 972 thread_priority); 973 } else { 974 InitPeer<false>(soa, 975 soa.Decode<mirror::Object>(peer.get()), 976 thread_is_daemon, 977 thread_group, 978 thread_name.get(), 979 thread_priority); 980 } 981 982 return peer.release(); 983 } 984 985 template<bool kTransactionActive> 986 void Thread::InitPeer(ScopedObjectAccessAlreadyRunnable& soa, 987 ObjPtr<mirror::Object> peer, 988 jboolean thread_is_daemon, 989 jobject thread_group, 990 jobject thread_name, 991 jint thread_priority) { 992 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)-> 993 SetBoolean<kTransactionActive>(peer, thread_is_daemon); 994 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)-> 995 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_group)); 996 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name)-> 997 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_name)); 998 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)-> 999 SetInt<kTransactionActive>(peer, thread_priority); 1000 } 1001 1002 void Thread::SetThreadName(const char* name) { 1003 tlsPtr_.name->assign(name); 1004 ::art::SetThreadName(name); 1005 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); 1006 } 1007 1008 static void GetThreadStack(pthread_t thread, 1009 void** stack_base, 1010 size_t* stack_size, 1011 size_t* guard_size) { 1012 #if defined(__APPLE__) 1013 *stack_size = pthread_get_stacksize_np(thread); 1014 void* stack_addr = pthread_get_stackaddr_np(thread); 1015 1016 // Check whether stack_addr is the base or end of the stack. 1017 // (On Mac OS 10.7, it's the end.) 1018 int stack_variable; 1019 if (stack_addr > &stack_variable) { 1020 *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size; 1021 } else { 1022 *stack_base = stack_addr; 1023 } 1024 1025 // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac. 1026 pthread_attr_t attributes; 1027 CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__); 1028 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1029 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1030 #else 1031 pthread_attr_t attributes; 1032 CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__); 1033 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__); 1034 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1035 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1036 1037 #if defined(__GLIBC__) 1038 // If we're the main thread, check whether we were run with an unlimited stack. In that case, 1039 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection 1040 // will be broken because we'll die long before we get close to 2GB. 1041 bool is_main_thread = (::art::GetTid() == getpid()); 1042 if (is_main_thread) { 1043 rlimit stack_limit; 1044 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) { 1045 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed"; 1046 } 1047 if (stack_limit.rlim_cur == RLIM_INFINITY) { 1048 size_t old_stack_size = *stack_size; 1049 1050 // Use the kernel default limit as our size, and adjust the base to match. 1051 *stack_size = 8 * MB; 1052 *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size); 1053 1054 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")" 1055 << " to " << PrettySize(*stack_size) 1056 << " with base " << *stack_base; 1057 } 1058 } 1059 #endif 1060 1061 #endif 1062 } 1063 1064 bool Thread::InitStackHwm() { 1065 void* read_stack_base; 1066 size_t read_stack_size; 1067 size_t read_guard_size; 1068 GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size); 1069 1070 tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base); 1071 tlsPtr_.stack_size = read_stack_size; 1072 1073 // The minimum stack size we can cope with is the overflow reserved bytes (typically 1074 // 8K) + the protected region size (4K) + another page (4K). Typically this will 1075 // be 8+4+4 = 16K. The thread won't be able to do much with this stack even the GC takes 1076 // between 8K and 12K. 1077 uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize 1078 + 4 * KB; 1079 if (read_stack_size <= min_stack) { 1080 // Note, as we know the stack is small, avoid operations that could use a lot of stack. 1081 LogHelper::LogLineLowStack(__PRETTY_FUNCTION__, 1082 __LINE__, 1083 ::android::base::ERROR, 1084 "Attempt to attach a thread with a too-small stack"); 1085 return false; 1086 } 1087 1088 // This is included in the SIGQUIT output, but it's useful here for thread debugging. 1089 VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)", 1090 read_stack_base, 1091 PrettySize(read_stack_size).c_str(), 1092 PrettySize(read_guard_size).c_str()); 1093 1094 // Set stack_end_ to the bottom of the stack saving space of stack overflows 1095 1096 Runtime* runtime = Runtime::Current(); 1097 bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler(); 1098 1099 // Valgrind on arm doesn't give the right values here. Do not install the guard page, and 1100 // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting 1101 // stack_begin to 0. 1102 const bool valgrind_on_arm = 1103 (kRuntimeISA == kArm || kRuntimeISA == kArm64) && 1104 kMemoryToolIsValgrind && 1105 RUNNING_ON_MEMORY_TOOL != 0; 1106 if (valgrind_on_arm) { 1107 tlsPtr_.stack_begin = nullptr; 1108 } 1109 1110 ResetDefaultStackEnd(); 1111 1112 // Install the protected region if we are doing implicit overflow checks. 1113 if (implicit_stack_check && !valgrind_on_arm) { 1114 // The thread might have protected region at the bottom. We need 1115 // to install our own region so we need to move the limits 1116 // of the stack to make room for it. 1117 1118 tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize; 1119 tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize; 1120 tlsPtr_.stack_size -= read_guard_size; 1121 1122 InstallImplicitProtection(); 1123 } 1124 1125 // Sanity check. 1126 CHECK_GT(FindStackTop(), reinterpret_cast<void*>(tlsPtr_.stack_end)); 1127 1128 return true; 1129 } 1130 1131 void Thread::ShortDump(std::ostream& os) const { 1132 os << "Thread["; 1133 if (GetThreadId() != 0) { 1134 // If we're in kStarting, we won't have a thin lock id or tid yet. 1135 os << GetThreadId() 1136 << ",tid=" << GetTid() << ','; 1137 } 1138 os << GetState() 1139 << ",Thread*=" << this 1140 << ",peer=" << tlsPtr_.opeer 1141 << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\"" 1142 << "]"; 1143 } 1144 1145 void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map, 1146 bool force_dump_stack) const { 1147 DumpState(os); 1148 DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack); 1149 } 1150 1151 mirror::String* Thread::GetThreadName() const { 1152 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 1153 if (tlsPtr_.opeer == nullptr) { 1154 return nullptr; 1155 } 1156 ObjPtr<mirror::Object> name = f->GetObject(tlsPtr_.opeer); 1157 return name == nullptr ? nullptr : name->AsString(); 1158 } 1159 1160 void Thread::GetThreadName(std::string& name) const { 1161 name.assign(*tlsPtr_.name); 1162 } 1163 1164 uint64_t Thread::GetCpuMicroTime() const { 1165 #if defined(__linux__) 1166 clockid_t cpu_clock_id; 1167 pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id); 1168 timespec now; 1169 clock_gettime(cpu_clock_id, &now); 1170 return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000); 1171 #else // __APPLE__ 1172 UNIMPLEMENTED(WARNING); 1173 return -1; 1174 #endif 1175 } 1176 1177 // Attempt to rectify locks so that we dump thread list with required locks before exiting. 1178 static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1179 LOG(ERROR) << *thread << " suspend count already zero."; 1180 Locks::thread_suspend_count_lock_->Unlock(self); 1181 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1182 Locks::mutator_lock_->SharedTryLock(self); 1183 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1184 LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; 1185 } 1186 } 1187 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1188 Locks::thread_list_lock_->TryLock(self); 1189 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1190 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; 1191 } 1192 } 1193 std::ostringstream ss; 1194 Runtime::Current()->GetThreadList()->Dump(ss); 1195 LOG(FATAL) << ss.str(); 1196 } 1197 1198 bool Thread::ModifySuspendCountInternal(Thread* self, 1199 int delta, 1200 AtomicInteger* suspend_barrier, 1201 SuspendReason reason) { 1202 if (kIsDebugBuild) { 1203 DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count) 1204 << reason << " " << delta << " " << tls32_.debug_suspend_count << " " << this; 1205 DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this; 1206 Locks::thread_suspend_count_lock_->AssertHeld(self); 1207 if (this != self && !IsSuspended()) { 1208 Locks::thread_list_lock_->AssertHeld(self); 1209 } 1210 } 1211 // User code suspensions need to be checked more closely since they originate from code outside of 1212 // the runtime's control. 1213 if (UNLIKELY(reason == SuspendReason::kForUserCode)) { 1214 Locks::user_code_suspension_lock_->AssertHeld(self); 1215 if (UNLIKELY(delta + tls32_.user_code_suspend_count < 0)) { 1216 LOG(ERROR) << "attempting to modify suspend count in an illegal way."; 1217 return false; 1218 } 1219 } 1220 if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) { 1221 UnsafeLogFatalForSuspendCount(self, this); 1222 return false; 1223 } 1224 1225 if (kUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) { 1226 // Force retry of a suspend request if it's in the middle of a thread flip to avoid a 1227 // deadlock. b/31683379. 1228 return false; 1229 } 1230 1231 uint16_t flags = kSuspendRequest; 1232 if (delta > 0 && suspend_barrier != nullptr) { 1233 uint32_t available_barrier = kMaxSuspendBarriers; 1234 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1235 if (tlsPtr_.active_suspend_barriers[i] == nullptr) { 1236 available_barrier = i; 1237 break; 1238 } 1239 } 1240 if (available_barrier == kMaxSuspendBarriers) { 1241 // No barrier spaces available, we can't add another. 1242 return false; 1243 } 1244 tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier; 1245 flags |= kActiveSuspendBarrier; 1246 } 1247 1248 tls32_.suspend_count += delta; 1249 switch (reason) { 1250 case SuspendReason::kForDebugger: 1251 tls32_.debug_suspend_count += delta; 1252 break; 1253 case SuspendReason::kForUserCode: 1254 tls32_.user_code_suspend_count += delta; 1255 break; 1256 case SuspendReason::kInternal: 1257 break; 1258 } 1259 1260 if (tls32_.suspend_count == 0) { 1261 AtomicClearFlag(kSuspendRequest); 1262 } else { 1263 // Two bits might be set simultaneously. 1264 tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flags); 1265 TriggerSuspend(); 1266 } 1267 return true; 1268 } 1269 1270 bool Thread::PassActiveSuspendBarriers(Thread* self) { 1271 // Grab the suspend_count lock and copy the current set of 1272 // barriers. Then clear the list and the flag. The ModifySuspendCount 1273 // function requires the lock so we prevent a race between setting 1274 // the kActiveSuspendBarrier flag and clearing it. 1275 AtomicInteger* pass_barriers[kMaxSuspendBarriers]; 1276 { 1277 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1278 if (!ReadFlag(kActiveSuspendBarrier)) { 1279 // quick exit test: the barriers have already been claimed - this is 1280 // possible as there may be a race to claim and it doesn't matter 1281 // who wins. 1282 // All of the callers of this function (except the SuspendAllInternal) 1283 // will first test the kActiveSuspendBarrier flag without lock. Here 1284 // double-check whether the barrier has been passed with the 1285 // suspend_count lock. 1286 return false; 1287 } 1288 1289 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1290 pass_barriers[i] = tlsPtr_.active_suspend_barriers[i]; 1291 tlsPtr_.active_suspend_barriers[i] = nullptr; 1292 } 1293 AtomicClearFlag(kActiveSuspendBarrier); 1294 } 1295 1296 uint32_t barrier_count = 0; 1297 for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) { 1298 AtomicInteger* pending_threads = pass_barriers[i]; 1299 if (pending_threads != nullptr) { 1300 bool done = false; 1301 do { 1302 int32_t cur_val = pending_threads->LoadRelaxed(); 1303 CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val; 1304 // Reduce value by 1. 1305 done = pending_threads->CompareExchangeWeakRelaxed(cur_val, cur_val - 1); 1306 #if ART_USE_FUTEXES 1307 if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously. 1308 futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); 1309 } 1310 #endif 1311 } while (!done); 1312 ++barrier_count; 1313 } 1314 } 1315 CHECK_GT(barrier_count, 0U); 1316 return true; 1317 } 1318 1319 void Thread::ClearSuspendBarrier(AtomicInteger* target) { 1320 CHECK(ReadFlag(kActiveSuspendBarrier)); 1321 bool clear_flag = true; 1322 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1323 AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i]; 1324 if (ptr == target) { 1325 tlsPtr_.active_suspend_barriers[i] = nullptr; 1326 } else if (ptr != nullptr) { 1327 clear_flag = false; 1328 } 1329 } 1330 if (LIKELY(clear_flag)) { 1331 AtomicClearFlag(kActiveSuspendBarrier); 1332 } 1333 } 1334 1335 void Thread::RunCheckpointFunction() { 1336 bool done = false; 1337 do { 1338 // Grab the suspend_count lock and copy the checkpoints one by one. When the last checkpoint is 1339 // copied, clear the list and the flag. The RequestCheckpoint function will also grab this lock 1340 // to prevent a race between setting the kCheckpointRequest flag and clearing it. 1341 Closure* checkpoint = nullptr; 1342 { 1343 MutexLock mu(this, *Locks::thread_suspend_count_lock_); 1344 if (tlsPtr_.checkpoint_function != nullptr) { 1345 checkpoint = tlsPtr_.checkpoint_function; 1346 if (!checkpoint_overflow_.empty()) { 1347 // Overflow list not empty, copy the first one out and continue. 1348 tlsPtr_.checkpoint_function = checkpoint_overflow_.front(); 1349 checkpoint_overflow_.pop_front(); 1350 } else { 1351 // No overflow checkpoints, this means that we are on the last pending checkpoint. 1352 tlsPtr_.checkpoint_function = nullptr; 1353 AtomicClearFlag(kCheckpointRequest); 1354 done = true; 1355 } 1356 } else { 1357 LOG(FATAL) << "Checkpoint flag set without pending checkpoint"; 1358 } 1359 } 1360 1361 // Outside the lock, run the checkpoint functions that we collected. 1362 ScopedTrace trace("Run checkpoint function"); 1363 DCHECK(checkpoint != nullptr); 1364 checkpoint->Run(this); 1365 } while (!done); 1366 } 1367 1368 void Thread::RunEmptyCheckpoint() { 1369 DCHECK_EQ(Thread::Current(), this); 1370 AtomicClearFlag(kEmptyCheckpointRequest); 1371 Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this); 1372 } 1373 1374 bool Thread::RequestCheckpoint(Closure* function) { 1375 union StateAndFlags old_state_and_flags; 1376 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1377 if (old_state_and_flags.as_struct.state != kRunnable) { 1378 return false; // Fail, thread is suspended and so can't run a checkpoint. 1379 } 1380 1381 // We must be runnable to request a checkpoint. 1382 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1383 union StateAndFlags new_state_and_flags; 1384 new_state_and_flags.as_int = old_state_and_flags.as_int; 1385 new_state_and_flags.as_struct.flags |= kCheckpointRequest; 1386 bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( 1387 old_state_and_flags.as_int, new_state_and_flags.as_int); 1388 if (success) { 1389 // Succeeded setting checkpoint flag, now insert the actual checkpoint. 1390 if (tlsPtr_.checkpoint_function == nullptr) { 1391 tlsPtr_.checkpoint_function = function; 1392 } else { 1393 checkpoint_overflow_.push_back(function); 1394 } 1395 CHECK_EQ(ReadFlag(kCheckpointRequest), true); 1396 TriggerSuspend(); 1397 } 1398 return success; 1399 } 1400 1401 bool Thread::RequestEmptyCheckpoint() { 1402 union StateAndFlags old_state_and_flags; 1403 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1404 if (old_state_and_flags.as_struct.state != kRunnable) { 1405 // If it's not runnable, we don't need to do anything because it won't be in the middle of a 1406 // heap access (eg. the read barrier). 1407 return false; 1408 } 1409 1410 // We must be runnable to request a checkpoint. 1411 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1412 union StateAndFlags new_state_and_flags; 1413 new_state_and_flags.as_int = old_state_and_flags.as_int; 1414 new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest; 1415 bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( 1416 old_state_and_flags.as_int, new_state_and_flags.as_int); 1417 if (success) { 1418 TriggerSuspend(); 1419 } 1420 return success; 1421 } 1422 1423 class BarrierClosure : public Closure { 1424 public: 1425 explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {} 1426 1427 void Run(Thread* self) OVERRIDE { 1428 wrapped_->Run(self); 1429 barrier_.Pass(self); 1430 } 1431 1432 void Wait(Thread* self) { 1433 barrier_.Increment(self, 1); 1434 } 1435 1436 private: 1437 Closure* wrapped_; 1438 Barrier barrier_; 1439 }; 1440 1441 bool Thread::RequestSynchronousCheckpoint(Closure* function) { 1442 if (this == Thread::Current()) { 1443 // Asked to run on this thread. Just run. 1444 function->Run(this); 1445 return true; 1446 } 1447 Thread* self = Thread::Current(); 1448 1449 // The current thread is not this thread. 1450 1451 if (GetState() == ThreadState::kTerminated) { 1452 return false; 1453 } 1454 1455 // Note: we're holding the thread-list lock. The thread cannot die at this point. 1456 struct ScopedThreadListLockUnlock { 1457 explicit ScopedThreadListLockUnlock(Thread* self_in) RELEASE(*Locks::thread_list_lock_) 1458 : self_thread(self_in) { 1459 Locks::thread_list_lock_->AssertHeld(self_thread); 1460 Locks::thread_list_lock_->Unlock(self_thread); 1461 } 1462 1463 ~ScopedThreadListLockUnlock() ACQUIRE(*Locks::thread_list_lock_) { 1464 Locks::thread_list_lock_->AssertNotHeld(self_thread); 1465 Locks::thread_list_lock_->Lock(self_thread); 1466 } 1467 1468 Thread* self_thread; 1469 }; 1470 1471 for (;;) { 1472 // If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the 1473 // suspend-count lock for too long. 1474 if (GetState() == ThreadState::kRunnable) { 1475 BarrierClosure barrier_closure(function); 1476 bool installed = false; 1477 { 1478 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1479 installed = RequestCheckpoint(&barrier_closure); 1480 } 1481 if (installed) { 1482 // Relinquish the thread-list lock, temporarily. We should not wait holding any locks. 1483 ScopedThreadListLockUnlock stllu(self); 1484 ScopedThreadSuspension sts(self, ThreadState::kWaiting); 1485 barrier_closure.Wait(self); 1486 return true; 1487 } 1488 // Fall-through. 1489 } 1490 1491 // This thread is not runnable, make sure we stay suspended, then run the checkpoint. 1492 // Note: ModifySuspendCountInternal also expects the thread_list_lock to be held in 1493 // certain situations. 1494 { 1495 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1496 1497 if (!ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal)) { 1498 // Just retry the loop. 1499 sched_yield(); 1500 continue; 1501 } 1502 } 1503 1504 { 1505 ScopedThreadListLockUnlock stllu(self); 1506 { 1507 ScopedThreadSuspension sts(self, ThreadState::kWaiting); 1508 while (GetState() == ThreadState::kRunnable) { 1509 // We became runnable again. Wait till the suspend triggered in ModifySuspendCount 1510 // moves us to suspended. 1511 sched_yield(); 1512 } 1513 } 1514 1515 function->Run(this); 1516 } 1517 1518 { 1519 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1520 1521 DCHECK_NE(GetState(), ThreadState::kRunnable); 1522 bool updated = ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); 1523 DCHECK(updated); 1524 } 1525 1526 { 1527 // Imitate ResumeAll, the thread may be waiting on Thread::resume_cond_ since we raised its 1528 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast. 1529 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1530 Thread::resume_cond_->Broadcast(self); 1531 } 1532 1533 return true; // We're done, break out of the loop. 1534 } 1535 } 1536 1537 Closure* Thread::GetFlipFunction() { 1538 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1539 Closure* func; 1540 do { 1541 func = atomic_func->LoadRelaxed(); 1542 if (func == nullptr) { 1543 return nullptr; 1544 } 1545 } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr)); 1546 DCHECK(func != nullptr); 1547 return func; 1548 } 1549 1550 void Thread::SetFlipFunction(Closure* function) { 1551 CHECK(function != nullptr); 1552 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1553 atomic_func->StoreSequentiallyConsistent(function); 1554 } 1555 1556 void Thread::FullSuspendCheck() { 1557 ScopedTrace trace(__FUNCTION__); 1558 VLOG(threads) << this << " self-suspending"; 1559 // Make thread appear suspended to other threads, release mutator_lock_. 1560 // Transition to suspended and back to runnable, re-acquire share on mutator_lock_. 1561 ScopedThreadSuspension(this, kSuspended); 1562 VLOG(threads) << this << " self-reviving"; 1563 } 1564 1565 static std::string GetSchedulerGroupName(pid_t tid) { 1566 // /proc/<pid>/cgroup looks like this: 1567 // 2:devices:/ 1568 // 1:cpuacct,cpu:/ 1569 // We want the third field from the line whose second field contains the "cpu" token. 1570 std::string cgroup_file; 1571 if (!ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) { 1572 return ""; 1573 } 1574 std::vector<std::string> cgroup_lines; 1575 Split(cgroup_file, '\n', &cgroup_lines); 1576 for (size_t i = 0; i < cgroup_lines.size(); ++i) { 1577 std::vector<std::string> cgroup_fields; 1578 Split(cgroup_lines[i], ':', &cgroup_fields); 1579 std::vector<std::string> cgroups; 1580 Split(cgroup_fields[1], ',', &cgroups); 1581 for (size_t j = 0; j < cgroups.size(); ++j) { 1582 if (cgroups[j] == "cpu") { 1583 return cgroup_fields[2].substr(1); // Skip the leading slash. 1584 } 1585 } 1586 } 1587 return ""; 1588 } 1589 1590 1591 void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { 1592 std::string group_name; 1593 int priority; 1594 bool is_daemon = false; 1595 Thread* self = Thread::Current(); 1596 1597 // If flip_function is not null, it means we have run a checkpoint 1598 // before the thread wakes up to execute the flip function and the 1599 // thread roots haven't been forwarded. So the following access to 1600 // the roots (opeer or methods in the frames) would be bad. Run it 1601 // here. TODO: clean up. 1602 if (thread != nullptr) { 1603 ScopedObjectAccessUnchecked soa(self); 1604 Thread* this_thread = const_cast<Thread*>(thread); 1605 Closure* flip_func = this_thread->GetFlipFunction(); 1606 if (flip_func != nullptr) { 1607 flip_func->Run(this_thread); 1608 } 1609 } 1610 1611 // Don't do this if we are aborting since the GC may have all the threads suspended. This will 1612 // cause ScopedObjectAccessUnchecked to deadlock. 1613 if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) { 1614 ScopedObjectAccessUnchecked soa(self); 1615 priority = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority) 1616 ->GetInt(thread->tlsPtr_.opeer); 1617 is_daemon = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon) 1618 ->GetBoolean(thread->tlsPtr_.opeer); 1619 1620 ObjPtr<mirror::Object> thread_group = 1621 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 1622 ->GetObject(thread->tlsPtr_.opeer); 1623 1624 if (thread_group != nullptr) { 1625 ArtField* group_name_field = 1626 jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name); 1627 ObjPtr<mirror::String> group_name_string = 1628 group_name_field->GetObject(thread_group)->AsString(); 1629 group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>"; 1630 } 1631 } else { 1632 priority = GetNativePriority(); 1633 } 1634 1635 std::string scheduler_group_name(GetSchedulerGroupName(tid)); 1636 if (scheduler_group_name.empty()) { 1637 scheduler_group_name = "default"; 1638 } 1639 1640 if (thread != nullptr) { 1641 os << '"' << *thread->tlsPtr_.name << '"'; 1642 if (is_daemon) { 1643 os << " daemon"; 1644 } 1645 os << " prio=" << priority 1646 << " tid=" << thread->GetThreadId() 1647 << " " << thread->GetState(); 1648 if (thread->IsStillStarting()) { 1649 os << " (still starting up)"; 1650 } 1651 os << "\n"; 1652 } else { 1653 os << '"' << ::art::GetThreadName(tid) << '"' 1654 << " prio=" << priority 1655 << " (not attached)\n"; 1656 } 1657 1658 if (thread != nullptr) { 1659 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1660 os << " | group=\"" << group_name << "\"" 1661 << " sCount=" << thread->tls32_.suspend_count 1662 << " dsCount=" << thread->tls32_.debug_suspend_count 1663 << " flags=" << thread->tls32_.state_and_flags.as_struct.flags 1664 << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer) 1665 << " self=" << reinterpret_cast<const void*>(thread) << "\n"; 1666 } 1667 1668 os << " | sysTid=" << tid 1669 << " nice=" << getpriority(PRIO_PROCESS, tid) 1670 << " cgrp=" << scheduler_group_name; 1671 if (thread != nullptr) { 1672 int policy; 1673 sched_param sp; 1674 #if !defined(__APPLE__) 1675 // b/36445592 Don't use pthread_getschedparam since pthread may have exited. 1676 policy = sched_getscheduler(tid); 1677 if (policy == -1) { 1678 PLOG(WARNING) << "sched_getscheduler(" << tid << ")"; 1679 } 1680 int sched_getparam_result = sched_getparam(tid, &sp); 1681 if (sched_getparam_result == -1) { 1682 PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)"; 1683 sp.sched_priority = -1; 1684 } 1685 #else 1686 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp), 1687 __FUNCTION__); 1688 #endif 1689 os << " sched=" << policy << "/" << sp.sched_priority 1690 << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self); 1691 } 1692 os << "\n"; 1693 1694 // Grab the scheduler stats for this thread. 1695 std::string scheduler_stats; 1696 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) { 1697 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'. 1698 } else { 1699 scheduler_stats = "0 0 0"; 1700 } 1701 1702 char native_thread_state = '?'; 1703 int utime = 0; 1704 int stime = 0; 1705 int task_cpu = 0; 1706 GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu); 1707 1708 os << " | state=" << native_thread_state 1709 << " schedstat=( " << scheduler_stats << " )" 1710 << " utm=" << utime 1711 << " stm=" << stime 1712 << " core=" << task_cpu 1713 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; 1714 if (thread != nullptr) { 1715 os << " | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-" 1716 << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize=" 1717 << PrettySize(thread->tlsPtr_.stack_size) << "\n"; 1718 // Dump the held mutexes. 1719 os << " | held mutexes="; 1720 for (size_t i = 0; i < kLockLevelCount; ++i) { 1721 if (i != kMonitorLock) { 1722 BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i)); 1723 if (mutex != nullptr) { 1724 os << " \"" << mutex->GetName() << "\""; 1725 if (mutex->IsReaderWriterMutex()) { 1726 ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex); 1727 if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) { 1728 os << "(exclusive held)"; 1729 } else { 1730 os << "(shared held)"; 1731 } 1732 } 1733 } 1734 } 1735 } 1736 os << "\n"; 1737 } 1738 } 1739 1740 void Thread::DumpState(std::ostream& os) const { 1741 Thread::DumpState(os, this, GetTid()); 1742 } 1743 1744 struct StackDumpVisitor : public StackVisitor { 1745 StackDumpVisitor(std::ostream& os_in, 1746 Thread* thread_in, 1747 Context* context, 1748 bool can_allocate_in, 1749 bool check_suspended = true, 1750 bool dump_locks_in = true) 1751 REQUIRES_SHARED(Locks::mutator_lock_) 1752 : StackVisitor(thread_in, 1753 context, 1754 StackVisitor::StackWalkKind::kIncludeInlinedFrames, 1755 check_suspended), 1756 os(os_in), 1757 can_allocate(can_allocate_in), 1758 last_method(nullptr), 1759 last_line_number(0), 1760 repetition_count(0), 1761 frame_count(0), 1762 dump_locks(dump_locks_in) {} 1763 1764 virtual ~StackDumpVisitor() { 1765 if (frame_count == 0) { 1766 os << " (no managed stack frames)\n"; 1767 } 1768 } 1769 1770 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 1771 ArtMethod* m = GetMethod(); 1772 if (m->IsRuntimeMethod()) { 1773 return true; 1774 } 1775 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize); 1776 const int kMaxRepetition = 3; 1777 ObjPtr<mirror::Class> c = m->GetDeclaringClass(); 1778 ObjPtr<mirror::DexCache> dex_cache = c->GetDexCache(); 1779 int line_number = -1; 1780 if (dex_cache != nullptr) { // be tolerant of bad input 1781 const DexFile* dex_file = dex_cache->GetDexFile(); 1782 line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc(false)); 1783 } 1784 if (line_number == last_line_number && last_method == m) { 1785 ++repetition_count; 1786 } else { 1787 if (repetition_count >= kMaxRepetition) { 1788 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; 1789 } 1790 repetition_count = 0; 1791 last_line_number = line_number; 1792 last_method = m; 1793 } 1794 if (repetition_count < kMaxRepetition) { 1795 os << " at " << m->PrettyMethod(false); 1796 if (m->IsNative()) { 1797 os << "(Native method)"; 1798 } else { 1799 const char* source_file(m->GetDeclaringClassSourceFile()); 1800 os << "(" << (source_file != nullptr ? source_file : "unavailable") 1801 << ":" << line_number << ")"; 1802 } 1803 os << "\n"; 1804 if (frame_count == 0) { 1805 Monitor::DescribeWait(os, GetThread()); 1806 } 1807 if (can_allocate && dump_locks) { 1808 // Visit locks, but do not abort on errors. This would trigger a nested abort. 1809 // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in 1810 // RegTypeCache::RegTypeCache due to thread_list_lock. 1811 Monitor::VisitLocks(this, DumpLockedObject, &os, false); 1812 } 1813 } 1814 1815 ++frame_count; 1816 return true; 1817 } 1818 1819 static void DumpLockedObject(mirror::Object* o, void* context) 1820 REQUIRES_SHARED(Locks::mutator_lock_) { 1821 std::ostream& os = *reinterpret_cast<std::ostream*>(context); 1822 os << " - locked "; 1823 if (o == nullptr) { 1824 os << "an unknown object"; 1825 } else { 1826 if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) { 1827 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack 1828 // may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the 1829 // IdentityHashCode call below will crash. So explicitly mark/forward it here. 1830 o = ReadBarrier::Mark(o); 1831 } 1832 if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) && 1833 Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) { 1834 // Getting the identity hashcode here would result in lock inflation and suspension of the 1835 // current thread, which isn't safe if this is the only runnable thread. 1836 os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o), 1837 o->PrettyTypeOf().c_str()); 1838 } else { 1839 // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So 1840 // we get the pretty type beofre we call IdentityHashCode. 1841 const std::string pretty_type(o->PrettyTypeOf()); 1842 os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str()); 1843 } 1844 } 1845 os << "\n"; 1846 } 1847 1848 std::ostream& os; 1849 const bool can_allocate; 1850 ArtMethod* last_method; 1851 int last_line_number; 1852 int repetition_count; 1853 int frame_count; 1854 const bool dump_locks; 1855 }; 1856 1857 static bool ShouldShowNativeStack(const Thread* thread) 1858 REQUIRES_SHARED(Locks::mutator_lock_) { 1859 ThreadState state = thread->GetState(); 1860 1861 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. 1862 if (state > kWaiting && state < kStarting) { 1863 return true; 1864 } 1865 1866 // In an Object.wait variant or Thread.sleep? That's not interesting. 1867 if (state == kTimedWaiting || state == kSleeping || state == kWaiting) { 1868 return false; 1869 } 1870 1871 // Threads with no managed stack frames should be shown. 1872 const ManagedStack* managed_stack = thread->GetManagedStack(); 1873 if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr && 1874 managed_stack->GetTopShadowFrame() == nullptr)) { 1875 return true; 1876 } 1877 1878 // In some other native method? That's interesting. 1879 // We don't just check kNative because native methods will be in state kSuspended if they're 1880 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the 1881 // thread-startup states if it's early enough in their life cycle (http://b/7432159). 1882 ArtMethod* current_method = thread->GetCurrentMethod(nullptr); 1883 return current_method != nullptr && current_method->IsNative(); 1884 } 1885 1886 void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const { 1887 // If flip_function is not null, it means we have run a checkpoint 1888 // before the thread wakes up to execute the flip function and the 1889 // thread roots haven't been forwarded. So the following access to 1890 // the roots (locks or methods in the frames) would be bad. Run it 1891 // here. TODO: clean up. 1892 { 1893 Thread* this_thread = const_cast<Thread*>(this); 1894 Closure* flip_func = this_thread->GetFlipFunction(); 1895 if (flip_func != nullptr) { 1896 flip_func->Run(this_thread); 1897 } 1898 } 1899 1900 // Dumping the Java stack involves the verifier for locks. The verifier operates under the 1901 // assumption that there is no exception pending on entry. Thus, stash any pending exception. 1902 // Thread::Current() instead of this in case a thread is dumping the stack of another suspended 1903 // thread. 1904 StackHandleScope<1> scope(Thread::Current()); 1905 Handle<mirror::Throwable> exc; 1906 bool have_exception = false; 1907 if (IsExceptionPending()) { 1908 exc = scope.NewHandle(GetException()); 1909 const_cast<Thread*>(this)->ClearException(); 1910 have_exception = true; 1911 } 1912 1913 std::unique_ptr<Context> context(Context::Create()); 1914 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), 1915 !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks); 1916 dumper.WalkStack(); 1917 1918 if (have_exception) { 1919 const_cast<Thread*>(this)->SetException(exc.Get()); 1920 } 1921 } 1922 1923 void Thread::DumpStack(std::ostream& os, 1924 bool dump_native_stack, 1925 BacktraceMap* backtrace_map, 1926 bool force_dump_stack) const { 1927 // TODO: we call this code when dying but may not have suspended the thread ourself. The 1928 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit 1929 // the race with the thread_suspend_count_lock_). 1930 bool dump_for_abort = (gAborting > 0); 1931 bool safe_to_dump = (this == Thread::Current() || IsSuspended()); 1932 if (!kIsDebugBuild) { 1933 // We always want to dump the stack for an abort, however, there is no point dumping another 1934 // thread's stack in debug builds where we'll hit the not suspended check in the stack walk. 1935 safe_to_dump = (safe_to_dump || dump_for_abort); 1936 } 1937 if (safe_to_dump || force_dump_stack) { 1938 // If we're currently in native code, dump that stack before dumping the managed stack. 1939 if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) { 1940 DumpKernelStack(os, GetTid(), " kernel: ", false); 1941 ArtMethod* method = 1942 GetCurrentMethod(nullptr, 1943 /*check_suspended*/ !force_dump_stack, 1944 /*abort_on_error*/ !(dump_for_abort || force_dump_stack)); 1945 DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method); 1946 } 1947 DumpJavaStack(os, 1948 /*check_suspended*/ !force_dump_stack, 1949 /*dump_locks*/ !force_dump_stack); 1950 } else { 1951 os << "Not able to dump stack of thread that isn't suspended"; 1952 } 1953 } 1954 1955 void Thread::ThreadExitCallback(void* arg) { 1956 Thread* self = reinterpret_cast<Thread*>(arg); 1957 if (self->tls32_.thread_exit_check_count == 0) { 1958 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's " 1959 "going to use a pthread_key_create destructor?): " << *self; 1960 CHECK(is_started_); 1961 #ifdef ART_TARGET_ANDROID 1962 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self; 1963 #else 1964 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); 1965 #endif 1966 self->tls32_.thread_exit_check_count = 1; 1967 } else { 1968 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; 1969 } 1970 } 1971 1972 void Thread::Startup() { 1973 CHECK(!is_started_); 1974 is_started_ = true; 1975 { 1976 // MutexLock to keep annotalysis happy. 1977 // 1978 // Note we use null for the thread because Thread::Current can 1979 // return garbage since (is_started_ == true) and 1980 // Thread::pthread_key_self_ is not yet initialized. 1981 // This was seen on glibc. 1982 MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_); 1983 resume_cond_ = new ConditionVariable("Thread resumption condition variable", 1984 *Locks::thread_suspend_count_lock_); 1985 } 1986 1987 // Allocate a TLS slot. 1988 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), 1989 "self key"); 1990 1991 // Double-check the TLS slot allocation. 1992 if (pthread_getspecific(pthread_key_self_) != nullptr) { 1993 LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr"; 1994 } 1995 } 1996 1997 void Thread::FinishStartup() { 1998 Runtime* runtime = Runtime::Current(); 1999 CHECK(runtime->IsStarted()); 2000 2001 // Finish attaching the main thread. 2002 ScopedObjectAccess soa(Thread::Current()); 2003 Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup()); 2004 Thread::Current()->AssertNoPendingException(); 2005 2006 Runtime::Current()->GetClassLinker()->RunRootClinits(); 2007 2008 // The thread counts as started from now on. We need to add it to the ThreadGroup. For regular 2009 // threads, this is done in Thread.start() on the Java side. 2010 { 2011 // This is only ever done once. There's no benefit in caching the method. 2012 jmethodID thread_group_add = soa.Env()->GetMethodID(WellKnownClasses::java_lang_ThreadGroup, 2013 "add", 2014 "(Ljava/lang/Thread;)V"); 2015 CHECK(thread_group_add != nullptr); 2016 ScopedLocalRef<jobject> thread_jobject( 2017 soa.Env(), soa.Env()->AddLocalReference<jobject>(Thread::Current()->GetPeer())); 2018 soa.Env()->CallNonvirtualVoidMethod(runtime->GetMainThreadGroup(), 2019 WellKnownClasses::java_lang_ThreadGroup, 2020 thread_group_add, 2021 thread_jobject.get()); 2022 Thread::Current()->AssertNoPendingException(); 2023 } 2024 } 2025 2026 void Thread::Shutdown() { 2027 CHECK(is_started_); 2028 is_started_ = false; 2029 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); 2030 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); 2031 if (resume_cond_ != nullptr) { 2032 delete resume_cond_; 2033 resume_cond_ = nullptr; 2034 } 2035 } 2036 2037 Thread::Thread(bool daemon) 2038 : tls32_(daemon), 2039 wait_monitor_(nullptr), 2040 custom_tls_(nullptr), 2041 can_call_into_java_(true) { 2042 wait_mutex_ = new Mutex("a thread wait mutex"); 2043 wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_); 2044 tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>; 2045 tlsPtr_.name = new std::string(kThreadNameDuringStartup); 2046 2047 static_assert((sizeof(Thread) % 4) == 0U, 2048 "art::Thread has a size which is not a multiple of 4."); 2049 tls32_.state_and_flags.as_struct.flags = 0; 2050 tls32_.state_and_flags.as_struct.state = kNative; 2051 tls32_.interrupted.StoreRelaxed(false); 2052 memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes)); 2053 std::fill(tlsPtr_.rosalloc_runs, 2054 tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread, 2055 gc::allocator::RosAlloc::GetDedicatedFullRun()); 2056 tlsPtr_.checkpoint_function = nullptr; 2057 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 2058 tlsPtr_.active_suspend_barriers[i] = nullptr; 2059 } 2060 tlsPtr_.flip_function = nullptr; 2061 tlsPtr_.thread_local_mark_stack = nullptr; 2062 tls32_.is_transitioning_to_runnable = false; 2063 } 2064 2065 bool Thread::IsStillStarting() const { 2066 // You might think you can check whether the state is kStarting, but for much of thread startup, 2067 // the thread is in kNative; it might also be in kVmWait. 2068 // You might think you can check whether the peer is null, but the peer is actually created and 2069 // assigned fairly early on, and needs to be. 2070 // It turns out that the last thing to change is the thread name; that's a good proxy for "has 2071 // this thread _ever_ entered kRunnable". 2072 return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) || 2073 (*tlsPtr_.name == kThreadNameDuringStartup); 2074 } 2075 2076 void Thread::AssertPendingException() const { 2077 CHECK(IsExceptionPending()) << "Pending exception expected."; 2078 } 2079 2080 void Thread::AssertPendingOOMException() const { 2081 AssertPendingException(); 2082 auto* e = GetException(); 2083 CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass()) 2084 << e->Dump(); 2085 } 2086 2087 void Thread::AssertNoPendingException() const { 2088 if (UNLIKELY(IsExceptionPending())) { 2089 ScopedObjectAccess soa(Thread::Current()); 2090 LOG(FATAL) << "No pending exception expected: " << GetException()->Dump(); 2091 } 2092 } 2093 2094 void Thread::AssertNoPendingExceptionForNewException(const char* msg) const { 2095 if (UNLIKELY(IsExceptionPending())) { 2096 ScopedObjectAccess soa(Thread::Current()); 2097 LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: " 2098 << GetException()->Dump(); 2099 } 2100 } 2101 2102 class MonitorExitVisitor : public SingleRootVisitor { 2103 public: 2104 explicit MonitorExitVisitor(Thread* self) : self_(self) { } 2105 2106 // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit. 2107 void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED) 2108 OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 2109 if (self_->HoldsLock(entered_monitor)) { 2110 LOG(WARNING) << "Calling MonitorExit on object " 2111 << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")" 2112 << " left locked by native thread " 2113 << *Thread::Current() << " which is detaching"; 2114 entered_monitor->MonitorExit(self_); 2115 } 2116 } 2117 2118 private: 2119 Thread* const self_; 2120 }; 2121 2122 void Thread::Destroy() { 2123 Thread* self = this; 2124 DCHECK_EQ(self, Thread::Current()); 2125 2126 if (tlsPtr_.jni_env != nullptr) { 2127 { 2128 ScopedObjectAccess soa(self); 2129 MonitorExitVisitor visitor(self); 2130 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. 2131 tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal)); 2132 } 2133 // Release locally held global references which releasing may require the mutator lock. 2134 if (tlsPtr_.jpeer != nullptr) { 2135 // If pthread_create fails we don't have a jni env here. 2136 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer); 2137 tlsPtr_.jpeer = nullptr; 2138 } 2139 if (tlsPtr_.class_loader_override != nullptr) { 2140 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override); 2141 tlsPtr_.class_loader_override = nullptr; 2142 } 2143 } 2144 2145 if (tlsPtr_.opeer != nullptr) { 2146 ScopedObjectAccess soa(self); 2147 // We may need to call user-supplied managed code, do this before final clean-up. 2148 HandleUncaughtExceptions(soa); 2149 Runtime* runtime = Runtime::Current(); 2150 if (runtime != nullptr) { 2151 runtime->GetRuntimeCallbacks()->ThreadDeath(self); 2152 } 2153 RemoveFromThreadGroup(soa); 2154 2155 // this.nativePeer = 0; 2156 if (Runtime::Current()->IsActiveTransaction()) { 2157 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2158 ->SetLong<true>(tlsPtr_.opeer, 0); 2159 } else { 2160 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2161 ->SetLong<false>(tlsPtr_.opeer, 0); 2162 } 2163 2164 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone 2165 // who is waiting. 2166 ObjPtr<mirror::Object> lock = 2167 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer); 2168 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 2169 if (lock != nullptr) { 2170 StackHandleScope<1> hs(self); 2171 Handle<mirror::Object> h_obj(hs.NewHandle(lock)); 2172 ObjectLock<mirror::Object> locker(self, h_obj); 2173 locker.NotifyAll(); 2174 } 2175 tlsPtr_.opeer = nullptr; 2176 } 2177 2178 { 2179 ScopedObjectAccess soa(self); 2180 Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this); 2181 if (kUseReadBarrier) { 2182 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); 2183 } 2184 } 2185 } 2186 2187 Thread::~Thread() { 2188 CHECK(tlsPtr_.class_loader_override == nullptr); 2189 CHECK(tlsPtr_.jpeer == nullptr); 2190 CHECK(tlsPtr_.opeer == nullptr); 2191 bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run? 2192 if (initialized) { 2193 delete tlsPtr_.jni_env; 2194 tlsPtr_.jni_env = nullptr; 2195 } 2196 CHECK_NE(GetState(), kRunnable); 2197 CHECK(!ReadFlag(kCheckpointRequest)); 2198 CHECK(!ReadFlag(kEmptyCheckpointRequest)); 2199 CHECK(tlsPtr_.checkpoint_function == nullptr); 2200 CHECK_EQ(checkpoint_overflow_.size(), 0u); 2201 CHECK(tlsPtr_.flip_function == nullptr); 2202 CHECK_EQ(tls32_.is_transitioning_to_runnable, false); 2203 2204 // Make sure we processed all deoptimization requests. 2205 CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization"; 2206 CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) << 2207 "Not all deoptimized frames have been consumed by the debugger."; 2208 2209 // We may be deleting a still born thread. 2210 SetStateUnsafe(kTerminated); 2211 2212 delete wait_cond_; 2213 delete wait_mutex_; 2214 2215 if (tlsPtr_.long_jump_context != nullptr) { 2216 delete tlsPtr_.long_jump_context; 2217 } 2218 2219 if (initialized) { 2220 CleanupCpu(); 2221 } 2222 2223 if (tlsPtr_.single_step_control != nullptr) { 2224 delete tlsPtr_.single_step_control; 2225 } 2226 delete tlsPtr_.instrumentation_stack; 2227 delete tlsPtr_.name; 2228 delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample; 2229 2230 Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this); 2231 2232 TearDownAlternateSignalStack(); 2233 } 2234 2235 void Thread::HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa) { 2236 if (!IsExceptionPending()) { 2237 return; 2238 } 2239 ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2240 ScopedThreadStateChange tsc(this, kNative); 2241 2242 // Get and clear the exception. 2243 ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred()); 2244 tlsPtr_.jni_env->ExceptionClear(); 2245 2246 // Call the Thread instance's dispatchUncaughtException(Throwable) 2247 tlsPtr_.jni_env->CallVoidMethod(peer.get(), 2248 WellKnownClasses::java_lang_Thread_dispatchUncaughtException, 2249 exception.get()); 2250 2251 // If the dispatchUncaughtException threw, clear that exception too. 2252 tlsPtr_.jni_env->ExceptionClear(); 2253 } 2254 2255 void Thread::RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa) { 2256 // this.group.removeThread(this); 2257 // group can be null if we're in the compiler or a test. 2258 ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 2259 ->GetObject(tlsPtr_.opeer); 2260 if (ogroup != nullptr) { 2261 ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup)); 2262 ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2263 ScopedThreadStateChange tsc(soa.Self(), kNative); 2264 tlsPtr_.jni_env->CallVoidMethod(group.get(), 2265 WellKnownClasses::java_lang_ThreadGroup_removeThread, 2266 peer.get()); 2267 } 2268 } 2269 2270 bool Thread::HandleScopeContains(jobject obj) const { 2271 StackReference<mirror::Object>* hs_entry = 2272 reinterpret_cast<StackReference<mirror::Object>*>(obj); 2273 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) { 2274 if (cur->Contains(hs_entry)) { 2275 return true; 2276 } 2277 } 2278 // JNI code invoked from portable code uses shadow frames rather than the handle scope. 2279 return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry); 2280 } 2281 2282 void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) { 2283 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( 2284 visitor, RootInfo(kRootNativeStack, thread_id)); 2285 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) { 2286 cur->VisitRoots(buffered_visitor); 2287 } 2288 } 2289 2290 ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const { 2291 if (obj == nullptr) { 2292 return nullptr; 2293 } 2294 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2295 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2296 ObjPtr<mirror::Object> result; 2297 bool expect_null = false; 2298 // The "kinds" below are sorted by the frequency we expect to encounter them. 2299 if (kind == kLocal) { 2300 IndirectReferenceTable& locals = tlsPtr_.jni_env->locals; 2301 // Local references do not need a read barrier. 2302 result = locals.Get<kWithoutReadBarrier>(ref); 2303 } else if (kind == kHandleScopeOrInvalid) { 2304 // TODO: make stack indirect reference table lookup more efficient. 2305 // Check if this is a local reference in the handle scope. 2306 if (LIKELY(HandleScopeContains(obj))) { 2307 // Read from handle scope. 2308 result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr(); 2309 VerifyObject(result); 2310 } else { 2311 tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj); 2312 expect_null = true; 2313 result = nullptr; 2314 } 2315 } else if (kind == kGlobal) { 2316 result = tlsPtr_.jni_env->vm->DecodeGlobal(ref); 2317 } else { 2318 DCHECK_EQ(kind, kWeakGlobal); 2319 result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref); 2320 if (Runtime::Current()->IsClearedJniWeakGlobal(result)) { 2321 // This is a special case where it's okay to return null. 2322 expect_null = true; 2323 result = nullptr; 2324 } 2325 } 2326 2327 if (UNLIKELY(!expect_null && result == nullptr)) { 2328 tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p", 2329 ToStr<IndirectRefKind>(kind).c_str(), obj); 2330 } 2331 return result; 2332 } 2333 2334 bool Thread::IsJWeakCleared(jweak obj) const { 2335 CHECK(obj != nullptr); 2336 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2337 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2338 CHECK_EQ(kind, kWeakGlobal); 2339 return tlsPtr_.jni_env->vm->IsWeakGlobalCleared(const_cast<Thread*>(this), ref); 2340 } 2341 2342 // Implements java.lang.Thread.interrupted. 2343 bool Thread::Interrupted() { 2344 DCHECK_EQ(Thread::Current(), this); 2345 // No other thread can concurrently reset the interrupted flag. 2346 bool interrupted = tls32_.interrupted.LoadSequentiallyConsistent(); 2347 if (interrupted) { 2348 tls32_.interrupted.StoreSequentiallyConsistent(false); 2349 } 2350 return interrupted; 2351 } 2352 2353 // Implements java.lang.Thread.isInterrupted. 2354 bool Thread::IsInterrupted() { 2355 return tls32_.interrupted.LoadSequentiallyConsistent(); 2356 } 2357 2358 void Thread::Interrupt(Thread* self) { 2359 MutexLock mu(self, *wait_mutex_); 2360 if (tls32_.interrupted.LoadSequentiallyConsistent()) { 2361 return; 2362 } 2363 tls32_.interrupted.StoreSequentiallyConsistent(true); 2364 NotifyLocked(self); 2365 } 2366 2367 void Thread::Notify() { 2368 Thread* self = Thread::Current(); 2369 MutexLock mu(self, *wait_mutex_); 2370 NotifyLocked(self); 2371 } 2372 2373 void Thread::NotifyLocked(Thread* self) { 2374 if (wait_monitor_ != nullptr) { 2375 wait_cond_->Signal(self); 2376 } 2377 } 2378 2379 void Thread::SetClassLoaderOverride(jobject class_loader_override) { 2380 if (tlsPtr_.class_loader_override != nullptr) { 2381 GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override); 2382 } 2383 tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override); 2384 } 2385 2386 using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>; 2387 2388 // Counts the stack trace depth and also fetches the first max_saved_frames frames. 2389 class FetchStackTraceVisitor : public StackVisitor { 2390 public: 2391 explicit FetchStackTraceVisitor(Thread* thread, 2392 ArtMethodDexPcPair* saved_frames = nullptr, 2393 size_t max_saved_frames = 0) 2394 REQUIRES_SHARED(Locks::mutator_lock_) 2395 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2396 saved_frames_(saved_frames), 2397 max_saved_frames_(max_saved_frames) {} 2398 2399 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 2400 // We want to skip frames up to and including the exception's constructor. 2401 // Note we also skip the frame if it doesn't have a method (namely the callee 2402 // save frame) 2403 ArtMethod* m = GetMethod(); 2404 if (skipping_ && !m->IsRuntimeMethod() && 2405 !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) { 2406 skipping_ = false; 2407 } 2408 if (!skipping_) { 2409 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save). 2410 if (depth_ < max_saved_frames_) { 2411 saved_frames_[depth_].first = m; 2412 saved_frames_[depth_].second = m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(); 2413 } 2414 ++depth_; 2415 } 2416 } else { 2417 ++skip_depth_; 2418 } 2419 return true; 2420 } 2421 2422 uint32_t GetDepth() const { 2423 return depth_; 2424 } 2425 2426 uint32_t GetSkipDepth() const { 2427 return skip_depth_; 2428 } 2429 2430 private: 2431 uint32_t depth_ = 0; 2432 uint32_t skip_depth_ = 0; 2433 bool skipping_ = true; 2434 ArtMethodDexPcPair* saved_frames_; 2435 const size_t max_saved_frames_; 2436 2437 DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor); 2438 }; 2439 2440 template<bool kTransactionActive> 2441 class BuildInternalStackTraceVisitor : public StackVisitor { 2442 public: 2443 BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) 2444 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2445 self_(self), 2446 skip_depth_(skip_depth), 2447 pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} 2448 2449 bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) { 2450 // Allocate method trace as an object array where the first element is a pointer array that 2451 // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring 2452 // class of the ArtMethod pointers. 2453 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 2454 StackHandleScope<1> hs(self_); 2455 ObjPtr<mirror::Class> array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass); 2456 // The first element is the methods and dex pc array, the other elements are declaring classes 2457 // for the methods to ensure classes in the stack trace don't get unloaded. 2458 Handle<mirror::ObjectArray<mirror::Object>> trace( 2459 hs.NewHandle( 2460 mirror::ObjectArray<mirror::Object>::Alloc(hs.Self(), array_class, depth + 1))); 2461 if (trace == nullptr) { 2462 // Acquire uninterruptible_ in all paths. 2463 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2464 self_->AssertPendingOOMException(); 2465 return false; 2466 } 2467 ObjPtr<mirror::PointerArray> methods_and_pcs = 2468 class_linker->AllocPointerArray(self_, depth * 2); 2469 const char* last_no_suspend_cause = 2470 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2471 if (methods_and_pcs == nullptr) { 2472 self_->AssertPendingOOMException(); 2473 return false; 2474 } 2475 trace->Set(0, methods_and_pcs); 2476 trace_ = trace.Get(); 2477 // If We are called from native, use non-transactional mode. 2478 CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; 2479 return true; 2480 } 2481 2482 virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) { 2483 self_->EndAssertNoThreadSuspension(nullptr); 2484 } 2485 2486 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 2487 if (trace_ == nullptr) { 2488 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. 2489 } 2490 if (skip_depth_ > 0) { 2491 skip_depth_--; 2492 return true; 2493 } 2494 ArtMethod* m = GetMethod(); 2495 if (m->IsRuntimeMethod()) { 2496 return true; // Ignore runtime frames (in particular callee save). 2497 } 2498 AddFrame(m, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc()); 2499 return true; 2500 } 2501 2502 void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { 2503 ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs(); 2504 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, method, pointer_size_); 2505 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>( 2506 trace_methods_and_pcs->GetLength() / 2 + count_, 2507 dex_pc, 2508 pointer_size_); 2509 // Save the declaring class of the method to ensure that the declaring classes of the methods 2510 // do not get unloaded while the stack trace is live. 2511 trace_->Set(count_ + 1, method->GetDeclaringClass()); 2512 ++count_; 2513 } 2514 2515 ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) { 2516 return ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(trace_->Get(0))); 2517 } 2518 2519 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const { 2520 return trace_; 2521 } 2522 2523 private: 2524 Thread* const self_; 2525 // How many more frames to skip. 2526 int32_t skip_depth_; 2527 // Current position down stack trace. 2528 uint32_t count_ = 0; 2529 // An object array where the first element is a pointer array that contains the ArtMethod 2530 // pointers on the stack and dex PCs. The rest of the elements are the declaring 2531 // class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of 2532 // the i'th frame. 2533 mirror::ObjectArray<mirror::Object>* trace_ = nullptr; 2534 // For cross compilation. 2535 const PointerSize pointer_size_; 2536 2537 DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor); 2538 }; 2539 2540 template<bool kTransactionActive> 2541 jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { 2542 // Compute depth of stack, save frames if possible to avoid needing to recompute many. 2543 constexpr size_t kMaxSavedFrames = 256; 2544 std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]); 2545 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this), 2546 &saved_frames[0], 2547 kMaxSavedFrames); 2548 count_visitor.WalkStack(); 2549 const uint32_t depth = count_visitor.GetDepth(); 2550 const uint32_t skip_depth = count_visitor.GetSkipDepth(); 2551 2552 // Build internal stack trace. 2553 BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(), 2554 const_cast<Thread*>(this), 2555 skip_depth); 2556 if (!build_trace_visitor.Init(depth)) { 2557 return nullptr; // Allocation failed. 2558 } 2559 // If we saved all of the frames we don't even need to do the actual stack walk. This is faster 2560 // than doing the stack walk twice. 2561 if (depth < kMaxSavedFrames) { 2562 for (size_t i = 0; i < depth; ++i) { 2563 build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second); 2564 } 2565 } else { 2566 build_trace_visitor.WalkStack(); 2567 } 2568 2569 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); 2570 if (kIsDebugBuild) { 2571 ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs(); 2572 // Second half of trace_methods is dex PCs. 2573 for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) { 2574 auto* method = trace_methods->GetElementPtrSize<ArtMethod*>( 2575 i, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); 2576 CHECK(method != nullptr); 2577 } 2578 } 2579 return soa.AddLocalReference<jobject>(trace); 2580 } 2581 template jobject Thread::CreateInternalStackTrace<false>( 2582 const ScopedObjectAccessAlreadyRunnable& soa) const; 2583 template jobject Thread::CreateInternalStackTrace<true>( 2584 const ScopedObjectAccessAlreadyRunnable& soa) const; 2585 2586 bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const { 2587 // Only count the depth since we do not pass a stack frame array as an argument. 2588 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this)); 2589 count_visitor.WalkStack(); 2590 return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth()); 2591 } 2592 2593 jobjectArray Thread::InternalStackTraceToStackTraceElementArray( 2594 const ScopedObjectAccessAlreadyRunnable& soa, 2595 jobject internal, 2596 jobjectArray output_array, 2597 int* stack_depth) { 2598 // Decode the internal stack trace into the depth, method trace and PC trace. 2599 // Subtract one for the methods and PC trace. 2600 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1; 2601 DCHECK_GE(depth, 0); 2602 2603 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); 2604 2605 jobjectArray result; 2606 2607 if (output_array != nullptr) { 2608 // Reuse the array we were given. 2609 result = output_array; 2610 // ...adjusting the number of frames we'll write to not exceed the array length. 2611 const int32_t traces_length = 2612 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->GetLength(); 2613 depth = std::min(depth, traces_length); 2614 } else { 2615 // Create java_trace array and place in local reference table 2616 mirror::ObjectArray<mirror::StackTraceElement>* java_traces = 2617 class_linker->AllocStackTraceElementArray(soa.Self(), depth); 2618 if (java_traces == nullptr) { 2619 return nullptr; 2620 } 2621 result = soa.AddLocalReference<jobjectArray>(java_traces); 2622 } 2623 2624 if (stack_depth != nullptr) { 2625 *stack_depth = depth; 2626 } 2627 2628 for (int32_t i = 0; i < depth; ++i) { 2629 ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces = 2630 soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>(); 2631 // Methods and dex PC trace is element 0. 2632 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray()); 2633 ObjPtr<mirror::PointerArray> const method_trace = 2634 ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(decoded_traces->Get(0))); 2635 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) 2636 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize); 2637 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>( 2638 i + method_trace->GetLength() / 2, kRuntimePointerSize); 2639 int32_t line_number; 2640 StackHandleScope<3> hs(soa.Self()); 2641 auto class_name_object(hs.NewHandle<mirror::String>(nullptr)); 2642 auto source_name_object(hs.NewHandle<mirror::String>(nullptr)); 2643 if (method->IsProxyMethod()) { 2644 line_number = -1; 2645 class_name_object.Assign(method->GetDeclaringClass()->GetName()); 2646 // source_name_object intentionally left null for proxy methods 2647 } else { 2648 line_number = method->GetLineNumFromDexPC(dex_pc); 2649 // Allocate element, potentially triggering GC 2650 // TODO: reuse class_name_object via Class::name_? 2651 const char* descriptor = method->GetDeclaringClassDescriptor(); 2652 CHECK(descriptor != nullptr); 2653 std::string class_name(PrettyDescriptor(descriptor)); 2654 class_name_object.Assign( 2655 mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str())); 2656 if (class_name_object == nullptr) { 2657 soa.Self()->AssertPendingOOMException(); 2658 return nullptr; 2659 } 2660 const char* source_file = method->GetDeclaringClassSourceFile(); 2661 if (line_number == -1) { 2662 // Make the line_number field of StackTraceElement hold the dex pc. 2663 // source_name_object is intentionally left null if we failed to map the dex pc to 2664 // a line number (most probably because there is no debug info). See b/30183883. 2665 line_number = dex_pc; 2666 } else { 2667 if (source_file != nullptr) { 2668 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file)); 2669 if (source_name_object == nullptr) { 2670 soa.Self()->AssertPendingOOMException(); 2671 return nullptr; 2672 } 2673 } 2674 } 2675 } 2676 const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName(); 2677 CHECK(method_name != nullptr); 2678 Handle<mirror::String> method_name_object( 2679 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name))); 2680 if (method_name_object == nullptr) { 2681 return nullptr; 2682 } 2683 ObjPtr<mirror::StackTraceElement> obj = mirror::StackTraceElement::Alloc(soa.Self(), 2684 class_name_object, 2685 method_name_object, 2686 source_name_object, 2687 line_number); 2688 if (obj == nullptr) { 2689 return nullptr; 2690 } 2691 // We are called from native: use non-transactional mode. 2692 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(i, obj); 2693 } 2694 return result; 2695 } 2696 2697 void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) { 2698 va_list args; 2699 va_start(args, fmt); 2700 ThrowNewExceptionV(exception_class_descriptor, fmt, args); 2701 va_end(args); 2702 } 2703 2704 void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, 2705 const char* fmt, va_list ap) { 2706 std::string msg; 2707 StringAppendV(&msg, fmt, ap); 2708 ThrowNewException(exception_class_descriptor, msg.c_str()); 2709 } 2710 2711 void Thread::ThrowNewException(const char* exception_class_descriptor, 2712 const char* msg) { 2713 // Callers should either clear or call ThrowNewWrappedException. 2714 AssertNoPendingExceptionForNewException(msg); 2715 ThrowNewWrappedException(exception_class_descriptor, msg); 2716 } 2717 2718 static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self) 2719 REQUIRES_SHARED(Locks::mutator_lock_) { 2720 ArtMethod* method = self->GetCurrentMethod(nullptr); 2721 return method != nullptr 2722 ? method->GetDeclaringClass()->GetClassLoader() 2723 : nullptr; 2724 } 2725 2726 void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, 2727 const char* msg) { 2728 DCHECK_EQ(this, Thread::Current()); 2729 ScopedObjectAccessUnchecked soa(this); 2730 StackHandleScope<3> hs(soa.Self()); 2731 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self()))); 2732 ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException())); 2733 ClearException(); 2734 Runtime* runtime = Runtime::Current(); 2735 auto* cl = runtime->GetClassLinker(); 2736 Handle<mirror::Class> exception_class( 2737 hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader))); 2738 if (UNLIKELY(exception_class == nullptr)) { 2739 CHECK(IsExceptionPending()); 2740 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); 2741 return; 2742 } 2743 2744 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true, 2745 true))) { 2746 DCHECK(IsExceptionPending()); 2747 return; 2748 } 2749 DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass()); 2750 Handle<mirror::Throwable> exception( 2751 hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this)))); 2752 2753 // If we couldn't allocate the exception, throw the pre-allocated out of memory exception. 2754 if (exception == nullptr) { 2755 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 2756 return; 2757 } 2758 2759 // Choose an appropriate constructor and set up the arguments. 2760 const char* signature; 2761 ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr); 2762 if (msg != nullptr) { 2763 // Ensure we remember this and the method over the String allocation. 2764 msg_string.reset( 2765 soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg))); 2766 if (UNLIKELY(msg_string.get() == nullptr)) { 2767 CHECK(IsExceptionPending()); // OOME. 2768 return; 2769 } 2770 if (cause.get() == nullptr) { 2771 signature = "(Ljava/lang/String;)V"; 2772 } else { 2773 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; 2774 } 2775 } else { 2776 if (cause.get() == nullptr) { 2777 signature = "()V"; 2778 } else { 2779 signature = "(Ljava/lang/Throwable;)V"; 2780 } 2781 } 2782 ArtMethod* exception_init_method = 2783 exception_class->FindConstructor(signature, cl->GetImagePointerSize()); 2784 2785 CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in " 2786 << PrettyDescriptor(exception_class_descriptor); 2787 2788 if (UNLIKELY(!runtime->IsStarted())) { 2789 // Something is trying to throw an exception without a started runtime, which is the common 2790 // case in the compiler. We won't be able to invoke the constructor of the exception, so set 2791 // the exception fields directly. 2792 if (msg != nullptr) { 2793 exception->SetDetailMessage(DecodeJObject(msg_string.get())->AsString()); 2794 } 2795 if (cause.get() != nullptr) { 2796 exception->SetCause(DecodeJObject(cause.get())->AsThrowable()); 2797 } 2798 ScopedLocalRef<jobject> trace(GetJniEnv(), 2799 Runtime::Current()->IsActiveTransaction() 2800 ? CreateInternalStackTrace<true>(soa) 2801 : CreateInternalStackTrace<false>(soa)); 2802 if (trace.get() != nullptr) { 2803 exception->SetStackState(DecodeJObject(trace.get()).Ptr()); 2804 } 2805 SetException(exception.Get()); 2806 } else { 2807 jvalue jv_args[2]; 2808 size_t i = 0; 2809 2810 if (msg != nullptr) { 2811 jv_args[i].l = msg_string.get(); 2812 ++i; 2813 } 2814 if (cause.get() != nullptr) { 2815 jv_args[i].l = cause.get(); 2816 ++i; 2817 } 2818 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get())); 2819 InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args); 2820 if (LIKELY(!IsExceptionPending())) { 2821 SetException(exception.Get()); 2822 } 2823 } 2824 } 2825 2826 void Thread::ThrowOutOfMemoryError(const char* msg) { 2827 LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s", 2828 msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : "")); 2829 if (!tls32_.throwing_OutOfMemoryError) { 2830 tls32_.throwing_OutOfMemoryError = true; 2831 ThrowNewException("Ljava/lang/OutOfMemoryError;", msg); 2832 tls32_.throwing_OutOfMemoryError = false; 2833 } else { 2834 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one. 2835 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 2836 } 2837 } 2838 2839 Thread* Thread::CurrentFromGdb() { 2840 return Thread::Current(); 2841 } 2842 2843 void Thread::DumpFromGdb() const { 2844 std::ostringstream ss; 2845 Dump(ss); 2846 std::string str(ss.str()); 2847 // log to stderr for debugging command line processes 2848 std::cerr << str; 2849 #ifdef ART_TARGET_ANDROID 2850 // log to logcat for debugging frameworks processes 2851 LOG(INFO) << str; 2852 #endif 2853 } 2854 2855 // Explicitly instantiate 32 and 64bit thread offset dumping support. 2856 template 2857 void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset); 2858 template 2859 void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset); 2860 2861 template<PointerSize ptr_size> 2862 void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { 2863 #define DO_THREAD_OFFSET(x, y) \ 2864 if (offset == (x).Uint32Value()) { \ 2865 os << (y); \ 2866 return; \ 2867 } 2868 DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags") 2869 DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table") 2870 DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception") 2871 DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer"); 2872 DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env") 2873 DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self") 2874 DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end") 2875 DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id") 2876 DO_THREAD_OFFSET(IsGcMarkingOffset<ptr_size>(), "is_gc_marking") 2877 DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method") 2878 DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame") 2879 DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope") 2880 DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger") 2881 #undef DO_THREAD_OFFSET 2882 2883 #define JNI_ENTRY_POINT_INFO(x) \ 2884 if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 2885 os << #x; \ 2886 return; \ 2887 } 2888 JNI_ENTRY_POINT_INFO(pDlsymLookup) 2889 #undef JNI_ENTRY_POINT_INFO 2890 2891 #define QUICK_ENTRY_POINT_INFO(x) \ 2892 if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 2893 os << #x; \ 2894 return; \ 2895 } 2896 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved) 2897 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8) 2898 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16) 2899 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32) 2900 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64) 2901 QUICK_ENTRY_POINT_INFO(pAllocObjectResolved) 2902 QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized) 2903 QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks) 2904 QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes) 2905 QUICK_ENTRY_POINT_INFO(pAllocStringFromChars) 2906 QUICK_ENTRY_POINT_INFO(pAllocStringFromString) 2907 QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial) 2908 QUICK_ENTRY_POINT_INFO(pCheckInstanceOf) 2909 QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage) 2910 QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess) 2911 QUICK_ENTRY_POINT_INFO(pInitializeType) 2912 QUICK_ENTRY_POINT_INFO(pResolveString) 2913 QUICK_ENTRY_POINT_INFO(pSet8Instance) 2914 QUICK_ENTRY_POINT_INFO(pSet8Static) 2915 QUICK_ENTRY_POINT_INFO(pSet16Instance) 2916 QUICK_ENTRY_POINT_INFO(pSet16Static) 2917 QUICK_ENTRY_POINT_INFO(pSet32Instance) 2918 QUICK_ENTRY_POINT_INFO(pSet32Static) 2919 QUICK_ENTRY_POINT_INFO(pSet64Instance) 2920 QUICK_ENTRY_POINT_INFO(pSet64Static) 2921 QUICK_ENTRY_POINT_INFO(pSetObjInstance) 2922 QUICK_ENTRY_POINT_INFO(pSetObjStatic) 2923 QUICK_ENTRY_POINT_INFO(pGetByteInstance) 2924 QUICK_ENTRY_POINT_INFO(pGetBooleanInstance) 2925 QUICK_ENTRY_POINT_INFO(pGetByteStatic) 2926 QUICK_ENTRY_POINT_INFO(pGetBooleanStatic) 2927 QUICK_ENTRY_POINT_INFO(pGetShortInstance) 2928 QUICK_ENTRY_POINT_INFO(pGetCharInstance) 2929 QUICK_ENTRY_POINT_INFO(pGetShortStatic) 2930 QUICK_ENTRY_POINT_INFO(pGetCharStatic) 2931 QUICK_ENTRY_POINT_INFO(pGet32Instance) 2932 QUICK_ENTRY_POINT_INFO(pGet32Static) 2933 QUICK_ENTRY_POINT_INFO(pGet64Instance) 2934 QUICK_ENTRY_POINT_INFO(pGet64Static) 2935 QUICK_ENTRY_POINT_INFO(pGetObjInstance) 2936 QUICK_ENTRY_POINT_INFO(pGetObjStatic) 2937 QUICK_ENTRY_POINT_INFO(pAputObject) 2938 QUICK_ENTRY_POINT_INFO(pJniMethodStart) 2939 QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized) 2940 QUICK_ENTRY_POINT_INFO(pJniMethodEnd) 2941 QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized) 2942 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference) 2943 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized) 2944 QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline) 2945 QUICK_ENTRY_POINT_INFO(pLockObject) 2946 QUICK_ENTRY_POINT_INFO(pUnlockObject) 2947 QUICK_ENTRY_POINT_INFO(pCmpgDouble) 2948 QUICK_ENTRY_POINT_INFO(pCmpgFloat) 2949 QUICK_ENTRY_POINT_INFO(pCmplDouble) 2950 QUICK_ENTRY_POINT_INFO(pCmplFloat) 2951 QUICK_ENTRY_POINT_INFO(pCos) 2952 QUICK_ENTRY_POINT_INFO(pSin) 2953 QUICK_ENTRY_POINT_INFO(pAcos) 2954 QUICK_ENTRY_POINT_INFO(pAsin) 2955 QUICK_ENTRY_POINT_INFO(pAtan) 2956 QUICK_ENTRY_POINT_INFO(pAtan2) 2957 QUICK_ENTRY_POINT_INFO(pCbrt) 2958 QUICK_ENTRY_POINT_INFO(pCosh) 2959 QUICK_ENTRY_POINT_INFO(pExp) 2960 QUICK_ENTRY_POINT_INFO(pExpm1) 2961 QUICK_ENTRY_POINT_INFO(pHypot) 2962 QUICK_ENTRY_POINT_INFO(pLog) 2963 QUICK_ENTRY_POINT_INFO(pLog10) 2964 QUICK_ENTRY_POINT_INFO(pNextAfter) 2965 QUICK_ENTRY_POINT_INFO(pSinh) 2966 QUICK_ENTRY_POINT_INFO(pTan) 2967 QUICK_ENTRY_POINT_INFO(pTanh) 2968 QUICK_ENTRY_POINT_INFO(pFmod) 2969 QUICK_ENTRY_POINT_INFO(pL2d) 2970 QUICK_ENTRY_POINT_INFO(pFmodf) 2971 QUICK_ENTRY_POINT_INFO(pL2f) 2972 QUICK_ENTRY_POINT_INFO(pD2iz) 2973 QUICK_ENTRY_POINT_INFO(pF2iz) 2974 QUICK_ENTRY_POINT_INFO(pIdivmod) 2975 QUICK_ENTRY_POINT_INFO(pD2l) 2976 QUICK_ENTRY_POINT_INFO(pF2l) 2977 QUICK_ENTRY_POINT_INFO(pLdiv) 2978 QUICK_ENTRY_POINT_INFO(pLmod) 2979 QUICK_ENTRY_POINT_INFO(pLmul) 2980 QUICK_ENTRY_POINT_INFO(pShlLong) 2981 QUICK_ENTRY_POINT_INFO(pShrLong) 2982 QUICK_ENTRY_POINT_INFO(pUshrLong) 2983 QUICK_ENTRY_POINT_INFO(pIndexOf) 2984 QUICK_ENTRY_POINT_INFO(pStringCompareTo) 2985 QUICK_ENTRY_POINT_INFO(pMemcpy) 2986 QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline) 2987 QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline) 2988 QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge) 2989 QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck) 2990 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck) 2991 QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck) 2992 QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck) 2993 QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck) 2994 QUICK_ENTRY_POINT_INFO(pInvokePolymorphic) 2995 QUICK_ENTRY_POINT_INFO(pTestSuspend) 2996 QUICK_ENTRY_POINT_INFO(pDeliverException) 2997 QUICK_ENTRY_POINT_INFO(pThrowArrayBounds) 2998 QUICK_ENTRY_POINT_INFO(pThrowDivZero) 2999 QUICK_ENTRY_POINT_INFO(pThrowNullPointer) 3000 QUICK_ENTRY_POINT_INFO(pThrowStackOverflow) 3001 QUICK_ENTRY_POINT_INFO(pDeoptimize) 3002 QUICK_ENTRY_POINT_INFO(pA64Load) 3003 QUICK_ENTRY_POINT_INFO(pA64Store) 3004 QUICK_ENTRY_POINT_INFO(pNewEmptyString) 3005 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B) 3006 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI) 3007 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII) 3008 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII) 3009 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString) 3010 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString) 3011 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset) 3012 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset) 3013 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C) 3014 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII) 3015 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC) 3016 QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints) 3017 QUICK_ENTRY_POINT_INFO(pNewStringFromString) 3018 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer) 3019 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder) 3020 QUICK_ENTRY_POINT_INFO(pReadBarrierJni) 3021 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00) 3022 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01) 3023 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg02) 3024 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg03) 3025 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg04) 3026 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg05) 3027 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg06) 3028 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg07) 3029 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg08) 3030 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg09) 3031 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg10) 3032 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg11) 3033 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg12) 3034 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg13) 3035 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg14) 3036 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg15) 3037 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg16) 3038 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg17) 3039 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg18) 3040 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg19) 3041 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg20) 3042 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg21) 3043 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg22) 3044 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg23) 3045 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg24) 3046 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg25) 3047 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg26) 3048 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27) 3049 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28) 3050 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29) 3051 QUICK_ENTRY_POINT_INFO(pReadBarrierSlow) 3052 QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow) 3053 3054 QUICK_ENTRY_POINT_INFO(pJniMethodFastStart) 3055 QUICK_ENTRY_POINT_INFO(pJniMethodFastEnd) 3056 #undef QUICK_ENTRY_POINT_INFO 3057 3058 os << offset; 3059 } 3060 3061 void Thread::QuickDeliverException() { 3062 // Get exception from thread. 3063 ObjPtr<mirror::Throwable> exception = GetException(); 3064 CHECK(exception != nullptr); 3065 if (exception == GetDeoptimizationException()) { 3066 artDeoptimize(this); 3067 UNREACHABLE(); 3068 } 3069 3070 // This is a real exception: let the instrumentation know about it. 3071 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 3072 if (instrumentation->HasExceptionCaughtListeners() && 3073 IsExceptionThrownByCurrentMethod(exception)) { 3074 // Instrumentation may cause GC so keep the exception object safe. 3075 StackHandleScope<1> hs(this); 3076 HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception)); 3077 instrumentation->ExceptionCaughtEvent(this, exception.Ptr()); 3078 } 3079 // Does instrumentation need to deoptimize the stack? 3080 // Note: we do this *after* reporting the exception to instrumentation in case it 3081 // now requires deoptimization. It may happen if a debugger is attached and requests 3082 // new events (single-step, breakpoint, ...) when the exception is reported. 3083 if (Dbg::IsForcedInterpreterNeededForException(this)) { 3084 NthCallerVisitor visitor(this, 0, false); 3085 visitor.WalkStack(); 3086 if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) { 3087 // Save the exception into the deoptimization context so it can be restored 3088 // before entering the interpreter. 3089 PushDeoptimizationContext( 3090 JValue(), /*is_reference */ false, /* from_code */ false, exception); 3091 artDeoptimize(this); 3092 UNREACHABLE(); 3093 } else { 3094 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 3095 << visitor.caller->PrettyMethod(); 3096 } 3097 } 3098 3099 // Don't leave exception visible while we try to find the handler, which may cause class 3100 // resolution. 3101 ClearException(); 3102 QuickExceptionHandler exception_handler(this, false); 3103 exception_handler.FindCatch(exception); 3104 exception_handler.UpdateInstrumentationStack(); 3105 exception_handler.DoLongJump(); 3106 } 3107 3108 Context* Thread::GetLongJumpContext() { 3109 Context* result = tlsPtr_.long_jump_context; 3110 if (result == nullptr) { 3111 result = Context::Create(); 3112 } else { 3113 tlsPtr_.long_jump_context = nullptr; // Avoid context being shared. 3114 result->Reset(); 3115 } 3116 return result; 3117 } 3118 3119 // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is 3120 // so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack. 3121 struct CurrentMethodVisitor FINAL : public StackVisitor { 3122 CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error) 3123 REQUIRES_SHARED(Locks::mutator_lock_) 3124 : StackVisitor(thread, 3125 context, 3126 StackVisitor::StackWalkKind::kIncludeInlinedFrames, 3127 check_suspended), 3128 this_object_(nullptr), 3129 method_(nullptr), 3130 dex_pc_(0), 3131 abort_on_error_(abort_on_error) {} 3132 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3133 ArtMethod* m = GetMethod(); 3134 if (m->IsRuntimeMethod()) { 3135 // Continue if this is a runtime method. 3136 return true; 3137 } 3138 if (context_ != nullptr) { 3139 this_object_ = GetThisObject(); 3140 } 3141 method_ = m; 3142 dex_pc_ = GetDexPc(abort_on_error_); 3143 return false; 3144 } 3145 ObjPtr<mirror::Object> this_object_; 3146 ArtMethod* method_; 3147 uint32_t dex_pc_; 3148 const bool abort_on_error_; 3149 }; 3150 3151 ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, 3152 bool check_suspended, 3153 bool abort_on_error) const { 3154 CurrentMethodVisitor visitor(const_cast<Thread*>(this), 3155 nullptr, 3156 check_suspended, 3157 abort_on_error); 3158 visitor.WalkStack(false); 3159 if (dex_pc != nullptr) { 3160 *dex_pc = visitor.dex_pc_; 3161 } 3162 return visitor.method_; 3163 } 3164 3165 bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const { 3166 return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId(); 3167 } 3168 3169 // RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor). 3170 template <typename RootVisitor, bool kPrecise = false> 3171 class ReferenceMapVisitor : public StackVisitor { 3172 public: 3173 ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor) 3174 REQUIRES_SHARED(Locks::mutator_lock_) 3175 // We are visiting the references in compiled frames, so we do not need 3176 // to know the inlined frames. 3177 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames), 3178 visitor_(visitor) {} 3179 3180 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 3181 if (false) { 3182 LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod()) 3183 << StringPrintf("@ PC:%04x", GetDexPc()); 3184 } 3185 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 3186 if (shadow_frame != nullptr) { 3187 VisitShadowFrame(shadow_frame); 3188 } else { 3189 VisitQuickFrame(); 3190 } 3191 return true; 3192 } 3193 3194 void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) { 3195 ArtMethod* m = shadow_frame->GetMethod(); 3196 VisitDeclaringClass(m); 3197 DCHECK(m != nullptr); 3198 size_t num_regs = shadow_frame->NumberOfVRegs(); 3199 DCHECK(m->IsNative() || shadow_frame->HasReferenceArray()); 3200 // handle scope for JNI or References for interpreter. 3201 for (size_t reg = 0; reg < num_regs; ++reg) { 3202 mirror::Object* ref = shadow_frame->GetVRegReference(reg); 3203 if (ref != nullptr) { 3204 mirror::Object* new_ref = ref; 3205 visitor_(&new_ref, reg, this); 3206 if (new_ref != ref) { 3207 shadow_frame->SetVRegReference(reg, new_ref); 3208 } 3209 } 3210 } 3211 // Mark lock count map required for structured locking checks. 3212 shadow_frame->GetLockCountData().VisitMonitors(visitor_, -1, this); 3213 } 3214 3215 private: 3216 // Visiting the declaring class is necessary so that we don't unload the class of a method that 3217 // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since 3218 // the threads do not all hold the heap bitmap lock for parallel GC. 3219 void VisitDeclaringClass(ArtMethod* method) 3220 REQUIRES_SHARED(Locks::mutator_lock_) 3221 NO_THREAD_SAFETY_ANALYSIS { 3222 ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>(); 3223 // klass can be null for runtime methods. 3224 if (klass != nullptr) { 3225 if (kVerifyImageObjectsMarked) { 3226 gc::Heap* const heap = Runtime::Current()->GetHeap(); 3227 gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass, 3228 /*fail_ok*/true); 3229 if (space != nullptr && space->IsImageSpace()) { 3230 bool failed = false; 3231 if (!space->GetLiveBitmap()->Test(klass.Ptr())) { 3232 failed = true; 3233 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space; 3234 } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) { 3235 failed = true; 3236 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space; 3237 } 3238 if (failed) { 3239 GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3240 space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3241 LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method 3242 << " klass@" << klass.Ptr(); 3243 // Pretty info last in case it crashes. 3244 LOG(FATAL) << "Method " << method->PrettyMethod() << " klass " 3245 << klass->PrettyClass(); 3246 } 3247 } 3248 } 3249 mirror::Object* new_ref = klass.Ptr(); 3250 visitor_(&new_ref, -1, this); 3251 if (new_ref != klass) { 3252 method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass()); 3253 } 3254 } 3255 } 3256 3257 template <typename T> 3258 ALWAYS_INLINE 3259 inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) { 3260 ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); 3261 DCHECK(cur_quick_frame != nullptr); 3262 ArtMethod* m = *cur_quick_frame; 3263 VisitDeclaringClass(m); 3264 3265 // Process register map (which native and runtime methods don't have) 3266 if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) { 3267 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); 3268 DCHECK(method_header->IsOptimized()); 3269 auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>( 3270 reinterpret_cast<uintptr_t>(cur_quick_frame)); 3271 uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); 3272 CodeInfo code_info = method_header->GetOptimizedCodeInfo(); 3273 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 3274 StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 3275 DCHECK(map.IsValid()); 3276 3277 T vreg_info(m, code_info, encoding, map, visitor_); 3278 3279 // Visit stack entries that hold pointers. 3280 const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding); 3281 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map); 3282 for (size_t i = 0; i < number_of_bits; ++i) { 3283 if (stack_mask.LoadBit(i)) { 3284 auto* ref_addr = vreg_base + i; 3285 mirror::Object* ref = ref_addr->AsMirrorPtr(); 3286 if (ref != nullptr) { 3287 mirror::Object* new_ref = ref; 3288 vreg_info.VisitStack(&new_ref, i, this); 3289 if (ref != new_ref) { 3290 ref_addr->Assign(new_ref); 3291 } 3292 } 3293 } 3294 } 3295 // Visit callee-save registers that hold pointers. 3296 uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map); 3297 for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) { 3298 if (register_mask & (1 << i)) { 3299 mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i)); 3300 if (kIsDebugBuild && ref_addr == nullptr) { 3301 std::string thread_name; 3302 GetThread()->GetThreadName(thread_name); 3303 LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name; 3304 DescribeStack(GetThread()); 3305 LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) " 3306 << "set in register_mask=" << register_mask << " at " << DescribeLocation(); 3307 } 3308 if (*ref_addr != nullptr) { 3309 vreg_info.VisitRegister(ref_addr, i, this); 3310 } 3311 } 3312 } 3313 } 3314 } 3315 3316 void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 3317 if (kPrecise) { 3318 VisitQuickFramePrecise(); 3319 } else { 3320 VisitQuickFrameNonPrecise(); 3321 } 3322 } 3323 3324 void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3325 struct UndefinedVRegInfo { 3326 UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED, 3327 const CodeInfo& code_info ATTRIBUTE_UNUSED, 3328 const CodeInfoEncoding& encoding ATTRIBUTE_UNUSED, 3329 const StackMap& map ATTRIBUTE_UNUSED, 3330 RootVisitor& _visitor) 3331 : visitor(_visitor) { 3332 } 3333 3334 ALWAYS_INLINE 3335 void VisitStack(mirror::Object** ref, 3336 size_t stack_index ATTRIBUTE_UNUSED, 3337 const StackVisitor* stack_visitor) 3338 REQUIRES_SHARED(Locks::mutator_lock_) { 3339 visitor(ref, -1, stack_visitor); 3340 } 3341 3342 ALWAYS_INLINE 3343 void VisitRegister(mirror::Object** ref, 3344 size_t register_index ATTRIBUTE_UNUSED, 3345 const StackVisitor* stack_visitor) 3346 REQUIRES_SHARED(Locks::mutator_lock_) { 3347 visitor(ref, -1, stack_visitor); 3348 } 3349 3350 RootVisitor& visitor; 3351 }; 3352 VisitQuickFrameWithVregCallback<UndefinedVRegInfo>(); 3353 } 3354 3355 void VisitQuickFramePrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3356 struct StackMapVRegInfo { 3357 StackMapVRegInfo(ArtMethod* method, 3358 const CodeInfo& _code_info, 3359 const CodeInfoEncoding& _encoding, 3360 const StackMap& map, 3361 RootVisitor& _visitor) 3362 : number_of_dex_registers(method->GetCodeItem()->registers_size_), 3363 code_info(_code_info), 3364 encoding(_encoding), 3365 dex_register_map(code_info.GetDexRegisterMapOf(map, 3366 encoding, 3367 number_of_dex_registers)), 3368 visitor(_visitor) { 3369 } 3370 3371 // TODO: If necessary, we should consider caching a reverse map instead of the linear 3372 // lookups for each location. 3373 void FindWithType(const size_t index, 3374 const DexRegisterLocation::Kind kind, 3375 mirror::Object** ref, 3376 const StackVisitor* stack_visitor) 3377 REQUIRES_SHARED(Locks::mutator_lock_) { 3378 bool found = false; 3379 for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) { 3380 DexRegisterLocation location = dex_register_map.GetDexRegisterLocation( 3381 dex_reg, number_of_dex_registers, code_info, encoding); 3382 if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) { 3383 visitor(ref, dex_reg, stack_visitor); 3384 found = true; 3385 } 3386 } 3387 3388 if (!found) { 3389 // If nothing found, report with -1. 3390 visitor(ref, -1, stack_visitor); 3391 } 3392 } 3393 3394 void VisitStack(mirror::Object** ref, size_t stack_index, const StackVisitor* stack_visitor) 3395 REQUIRES_SHARED(Locks::mutator_lock_) { 3396 const size_t stack_offset = stack_index * kFrameSlotSize; 3397 FindWithType(stack_offset, 3398 DexRegisterLocation::Kind::kInStack, 3399 ref, 3400 stack_visitor); 3401 } 3402 3403 void VisitRegister(mirror::Object** ref, 3404 size_t register_index, 3405 const StackVisitor* stack_visitor) 3406 REQUIRES_SHARED(Locks::mutator_lock_) { 3407 FindWithType(register_index, 3408 DexRegisterLocation::Kind::kInRegister, 3409 ref, 3410 stack_visitor); 3411 } 3412 3413 size_t number_of_dex_registers; 3414 const CodeInfo& code_info; 3415 const CodeInfoEncoding& encoding; 3416 DexRegisterMap dex_register_map; 3417 RootVisitor& visitor; 3418 }; 3419 VisitQuickFrameWithVregCallback<StackMapVRegInfo>(); 3420 } 3421 3422 // Visitor for when we visit a root. 3423 RootVisitor& visitor_; 3424 }; 3425 3426 class RootCallbackVisitor { 3427 public: 3428 RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {} 3429 3430 void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const 3431 REQUIRES_SHARED(Locks::mutator_lock_) { 3432 visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg)); 3433 } 3434 3435 private: 3436 RootVisitor* const visitor_; 3437 const uint32_t tid_; 3438 }; 3439 3440 template <bool kPrecise> 3441 void Thread::VisitRoots(RootVisitor* visitor) { 3442 const uint32_t thread_id = GetThreadId(); 3443 visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id)); 3444 if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) { 3445 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), 3446 RootInfo(kRootNativeStack, thread_id)); 3447 } 3448 visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id)); 3449 tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id)); 3450 tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id)); 3451 HandleScopeVisitRoots(visitor, thread_id); 3452 if (tlsPtr_.debug_invoke_req != nullptr) { 3453 tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id)); 3454 } 3455 // Visit roots for deoptimization. 3456 if (tlsPtr_.stacked_shadow_frame_record != nullptr) { 3457 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3458 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 3459 for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 3460 record != nullptr; 3461 record = record->GetLink()) { 3462 for (ShadowFrame* shadow_frame = record->GetShadowFrame(); 3463 shadow_frame != nullptr; 3464 shadow_frame = shadow_frame->GetLink()) { 3465 mapper.VisitShadowFrame(shadow_frame); 3466 } 3467 } 3468 } 3469 for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 3470 record != nullptr; 3471 record = record->GetLink()) { 3472 if (record->IsReference()) { 3473 visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(), 3474 RootInfo(kRootThreadObject, thread_id)); 3475 } 3476 visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(), 3477 RootInfo(kRootThreadObject, thread_id)); 3478 } 3479 if (tlsPtr_.frame_id_to_shadow_frame != nullptr) { 3480 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3481 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 3482 for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame; 3483 record != nullptr; 3484 record = record->GetNext()) { 3485 mapper.VisitShadowFrame(record->GetShadowFrame()); 3486 } 3487 } 3488 for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) { 3489 verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id)); 3490 } 3491 // Visit roots on this thread's stack 3492 RuntimeContextType context; 3493 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3494 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback); 3495 mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false); 3496 for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) { 3497 visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id)); 3498 } 3499 } 3500 3501 void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { 3502 if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) { 3503 VisitRoots<true>(visitor); 3504 } else { 3505 VisitRoots<false>(visitor); 3506 } 3507 } 3508 3509 class VerifyRootVisitor : public SingleRootVisitor { 3510 public: 3511 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 3512 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3513 VerifyObject(root); 3514 } 3515 }; 3516 3517 void Thread::VerifyStackImpl() { 3518 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) { 3519 VerifyRootVisitor visitor; 3520 std::unique_ptr<Context> context(Context::Create()); 3521 RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId()); 3522 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback); 3523 mapper.WalkStack(); 3524 } 3525 } 3526 3527 // Set the stack end to that to be used during a stack overflow 3528 void Thread::SetStackEndForStackOverflow() { 3529 // During stack overflow we allow use of the full stack. 3530 if (tlsPtr_.stack_end == tlsPtr_.stack_begin) { 3531 // However, we seem to have already extended to use the full stack. 3532 LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently " 3533 << GetStackOverflowReservedBytes(kRuntimeISA) << ")?"; 3534 DumpStack(LOG_STREAM(ERROR)); 3535 LOG(FATAL) << "Recursive stack overflow."; 3536 } 3537 3538 tlsPtr_.stack_end = tlsPtr_.stack_begin; 3539 3540 // Remove the stack overflow protection if is it set up. 3541 bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks(); 3542 if (implicit_stack_check) { 3543 if (!UnprotectStack()) { 3544 LOG(ERROR) << "Unable to remove stack protection for stack overflow"; 3545 } 3546 } 3547 } 3548 3549 void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) { 3550 DCHECK_LE(start, end); 3551 DCHECK_LE(end, limit); 3552 tlsPtr_.thread_local_start = start; 3553 tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start; 3554 tlsPtr_.thread_local_end = end; 3555 tlsPtr_.thread_local_limit = limit; 3556 tlsPtr_.thread_local_objects = 0; 3557 } 3558 3559 bool Thread::HasTlab() const { 3560 bool has_tlab = tlsPtr_.thread_local_pos != nullptr; 3561 if (has_tlab) { 3562 DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr); 3563 } else { 3564 DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr); 3565 } 3566 return has_tlab; 3567 } 3568 3569 std::ostream& operator<<(std::ostream& os, const Thread& thread) { 3570 thread.ShortDump(os); 3571 return os; 3572 } 3573 3574 bool Thread::ProtectStack(bool fatal_on_error) { 3575 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 3576 VLOG(threads) << "Protecting stack at " << pregion; 3577 if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) { 3578 if (fatal_on_error) { 3579 LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. " 3580 "Reason: " 3581 << strerror(errno) << " size: " << kStackOverflowProtectedSize; 3582 } 3583 return false; 3584 } 3585 return true; 3586 } 3587 3588 bool Thread::UnprotectStack() { 3589 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 3590 VLOG(threads) << "Unprotecting stack at " << pregion; 3591 return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0; 3592 } 3593 3594 void Thread::ActivateSingleStepControl(SingleStepControl* ssc) { 3595 CHECK(Dbg::IsDebuggerActive()); 3596 CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this; 3597 CHECK(ssc != nullptr); 3598 tlsPtr_.single_step_control = ssc; 3599 } 3600 3601 void Thread::DeactivateSingleStepControl() { 3602 CHECK(Dbg::IsDebuggerActive()); 3603 CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this; 3604 SingleStepControl* ssc = GetSingleStepControl(); 3605 tlsPtr_.single_step_control = nullptr; 3606 delete ssc; 3607 } 3608 3609 void Thread::SetDebugInvokeReq(DebugInvokeReq* req) { 3610 CHECK(Dbg::IsDebuggerActive()); 3611 CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this; 3612 CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself"; 3613 CHECK(req != nullptr); 3614 tlsPtr_.debug_invoke_req = req; 3615 } 3616 3617 void Thread::ClearDebugInvokeReq() { 3618 CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this; 3619 CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself"; 3620 DebugInvokeReq* req = tlsPtr_.debug_invoke_req; 3621 tlsPtr_.debug_invoke_req = nullptr; 3622 delete req; 3623 } 3624 3625 void Thread::PushVerifier(verifier::MethodVerifier* verifier) { 3626 verifier->link_ = tlsPtr_.method_verifier; 3627 tlsPtr_.method_verifier = verifier; 3628 } 3629 3630 void Thread::PopVerifier(verifier::MethodVerifier* verifier) { 3631 CHECK_EQ(tlsPtr_.method_verifier, verifier); 3632 tlsPtr_.method_verifier = verifier->link_; 3633 } 3634 3635 size_t Thread::NumberOfHeldMutexes() const { 3636 size_t count = 0; 3637 for (BaseMutex* mu : tlsPtr_.held_mutexes) { 3638 count += mu != nullptr ? 1 : 0; 3639 } 3640 return count; 3641 } 3642 3643 void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { 3644 DCHECK_EQ(GetException(), Thread::GetDeoptimizationException()); 3645 ClearException(); 3646 ShadowFrame* shadow_frame = 3647 PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame); 3648 ObjPtr<mirror::Throwable> pending_exception; 3649 bool from_code = false; 3650 PopDeoptimizationContext(result, &pending_exception, &from_code); 3651 SetTopOfStack(nullptr); 3652 SetTopOfShadowStack(shadow_frame); 3653 3654 // Restore the exception that was pending before deoptimization then interpret the 3655 // deoptimized frames. 3656 if (pending_exception != nullptr) { 3657 SetException(pending_exception); 3658 } 3659 interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result); 3660 } 3661 3662 void Thread::SetException(ObjPtr<mirror::Throwable> new_exception) { 3663 CHECK(new_exception != nullptr); 3664 // TODO: DCHECK(!IsExceptionPending()); 3665 tlsPtr_.exception = new_exception.Ptr(); 3666 } 3667 3668 bool Thread::IsAotCompiler() { 3669 return Runtime::Current()->IsAotCompiler(); 3670 } 3671 3672 mirror::Object* Thread::GetPeerFromOtherThread() const { 3673 DCHECK(tlsPtr_.jpeer == nullptr); 3674 mirror::Object* peer = tlsPtr_.opeer; 3675 if (kUseReadBarrier && Current()->GetIsGcMarking()) { 3676 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack 3677 // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly 3678 // mark/forward it here. 3679 peer = art::ReadBarrier::Mark(peer); 3680 } 3681 return peer; 3682 } 3683 3684 void Thread::SetReadBarrierEntrypoints() { 3685 // Make sure entrypoints aren't null. 3686 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active*/ true); 3687 } 3688 3689 } // namespace art 3690