1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "thread.h" 18 19 #include <limits.h> // for INT_MAX 20 #include <pthread.h> 21 #include <signal.h> 22 #include <sys/resource.h> 23 #include <sys/time.h> 24 25 #if __has_feature(hwaddress_sanitizer) 26 #include <sanitizer/hwasan_interface.h> 27 #else 28 #define __hwasan_tag_pointer(p, t) (p) 29 #endif 30 31 #include <algorithm> 32 #include <bitset> 33 #include <cerrno> 34 #include <iostream> 35 #include <list> 36 #include <sstream> 37 38 #include "android-base/stringprintf.h" 39 #include "android-base/strings.h" 40 41 #include "arch/context-inl.h" 42 #include "arch/context.h" 43 #include "art_field-inl.h" 44 #include "art_method-inl.h" 45 #include "base/atomic.h" 46 #include "base/bit_utils.h" 47 #include "base/casts.h" 48 #include "arch/context.h" 49 #include "base/file_utils.h" 50 #include "base/memory_tool.h" 51 #include "base/mutex.h" 52 #include "base/stl_util.h" 53 #include "base/systrace.h" 54 #include "base/timing_logger.h" 55 #include "base/to_str.h" 56 #include "base/utils.h" 57 #include "class_linker-inl.h" 58 #include "class_root.h" 59 #include "debugger.h" 60 #include "dex/descriptors_names.h" 61 #include "dex/dex_file-inl.h" 62 #include "dex/dex_file_annotations.h" 63 #include "dex/dex_file_types.h" 64 #include "entrypoints/entrypoint_utils.h" 65 #include "entrypoints/quick/quick_alloc_entrypoints.h" 66 #include "gc/accounting/card_table-inl.h" 67 #include "gc/accounting/heap_bitmap-inl.h" 68 #include "gc/allocator/rosalloc.h" 69 #include "gc/heap.h" 70 #include "gc/space/space-inl.h" 71 #include "gc_root.h" 72 #include "handle_scope-inl.h" 73 #include "indirect_reference_table-inl.h" 74 #include "instrumentation.h" 75 #include "interpreter/interpreter.h" 76 #include "interpreter/mterp/mterp.h" 77 #include "interpreter/shadow_frame-inl.h" 78 #include "java_frame_root_info.h" 79 #include "jni/java_vm_ext.h" 80 #include "jni/jni_internal.h" 81 #include "mirror/class-alloc-inl.h" 82 #include "mirror/class_loader.h" 83 #include "mirror/object_array-alloc-inl.h" 84 #include "mirror/object_array-inl.h" 85 #include "mirror/stack_trace_element.h" 86 #include "monitor.h" 87 #include "monitor_objects_stack_visitor.h" 88 #include "native_stack_dump.h" 89 #include "nativehelper/scoped_local_ref.h" 90 #include "nativehelper/scoped_utf_chars.h" 91 #include "nth_caller_visitor.h" 92 #include "oat_quick_method_header.h" 93 #include "obj_ptr-inl.h" 94 #include "object_lock.h" 95 #include "palette/palette.h" 96 #include "quick/quick_method_frame_info.h" 97 #include "quick_exception_handler.h" 98 #include "read_barrier-inl.h" 99 #include "reflection.h" 100 #include "runtime-inl.h" 101 #include "runtime.h" 102 #include "runtime_callbacks.h" 103 #include "scoped_thread_state_change-inl.h" 104 #include "stack.h" 105 #include "stack_map.h" 106 #include "thread-inl.h" 107 #include "thread_list.h" 108 #include "verifier/method_verifier.h" 109 #include "verify_object.h" 110 #include "well_known_classes.h" 111 112 #if ART_USE_FUTEXES 113 #include "linux/futex.h" 114 #include "sys/syscall.h" 115 #ifndef SYS_futex 116 #define SYS_futex __NR_futex 117 #endif 118 #endif // ART_USE_FUTEXES 119 120 namespace art { 121 122 using android::base::StringAppendV; 123 using android::base::StringPrintf; 124 125 extern "C" NO_RETURN void artDeoptimize(Thread* self); 126 127 bool Thread::is_started_ = false; 128 pthread_key_t Thread::pthread_key_self_; 129 ConditionVariable* Thread::resume_cond_ = nullptr; 130 const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); 131 bool (*Thread::is_sensitive_thread_hook_)() = nullptr; 132 Thread* Thread::jit_sensitive_thread_ = nullptr; 133 134 static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild; 135 136 // For implicit overflow checks we reserve an extra piece of memory at the bottom 137 // of the stack (lowest memory). The higher portion of the memory 138 // is protected against reads and the lower is available for use while 139 // throwing the StackOverflow exception. 140 constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB; 141 142 static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; 143 144 void Thread::InitCardTable() { 145 tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); 146 } 147 148 static void UnimplementedEntryPoint() { 149 UNIMPLEMENTED(FATAL); 150 } 151 152 void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints); 153 void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active); 154 155 void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { 156 CHECK(kUseReadBarrier); 157 tls32_.is_gc_marking = is_marking; 158 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking); 159 ResetQuickAllocEntryPointsForThread(is_marking); 160 } 161 162 void Thread::InitTlsEntryPoints() { 163 ScopedTrace trace("InitTlsEntryPoints"); 164 // Insert a placeholder so we can easily tell if we call an unimplemented entry point. 165 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints); 166 uintptr_t* end = reinterpret_cast<uintptr_t*>( 167 reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints)); 168 for (uintptr_t* it = begin; it != end; ++it) { 169 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 170 } 171 InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints); 172 } 173 174 void Thread::ResetQuickAllocEntryPointsForThread(bool is_marking) { 175 if (kUseReadBarrier && kRuntimeISA != InstructionSet::kX86_64) { 176 // Allocation entrypoint switching is currently only implemented for X86_64. 177 is_marking = true; 178 } 179 ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints, is_marking); 180 } 181 182 class DeoptimizationContextRecord { 183 public: 184 DeoptimizationContextRecord(const JValue& ret_val, 185 bool is_reference, 186 bool from_code, 187 ObjPtr<mirror::Throwable> pending_exception, 188 DeoptimizationMethodType method_type, 189 DeoptimizationContextRecord* link) 190 : ret_val_(ret_val), 191 is_reference_(is_reference), 192 from_code_(from_code), 193 pending_exception_(pending_exception.Ptr()), 194 deopt_method_type_(method_type), 195 link_(link) {} 196 197 JValue GetReturnValue() const { return ret_val_; } 198 bool IsReference() const { return is_reference_; } 199 bool GetFromCode() const { return from_code_; } 200 ObjPtr<mirror::Throwable> GetPendingException() const { return pending_exception_; } 201 DeoptimizationContextRecord* GetLink() const { return link_; } 202 mirror::Object** GetReturnValueAsGCRoot() { 203 DCHECK(is_reference_); 204 return ret_val_.GetGCRoot(); 205 } 206 mirror::Object** GetPendingExceptionAsGCRoot() { 207 return reinterpret_cast<mirror::Object**>(&pending_exception_); 208 } 209 DeoptimizationMethodType GetDeoptimizationMethodType() const { 210 return deopt_method_type_; 211 } 212 213 private: 214 // The value returned by the method at the top of the stack before deoptimization. 215 JValue ret_val_; 216 217 // Indicates whether the returned value is a reference. If so, the GC will visit it. 218 const bool is_reference_; 219 220 // Whether the context was created from an explicit deoptimization in the code. 221 const bool from_code_; 222 223 // The exception that was pending before deoptimization (or null if there was no pending 224 // exception). 225 mirror::Throwable* pending_exception_; 226 227 // Whether the context was created for an (idempotent) runtime method. 228 const DeoptimizationMethodType deopt_method_type_; 229 230 // A link to the previous DeoptimizationContextRecord. 231 DeoptimizationContextRecord* const link_; 232 233 DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord); 234 }; 235 236 class StackedShadowFrameRecord { 237 public: 238 StackedShadowFrameRecord(ShadowFrame* shadow_frame, 239 StackedShadowFrameType type, 240 StackedShadowFrameRecord* link) 241 : shadow_frame_(shadow_frame), 242 type_(type), 243 link_(link) {} 244 245 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 246 StackedShadowFrameType GetType() const { return type_; } 247 StackedShadowFrameRecord* GetLink() const { return link_; } 248 249 private: 250 ShadowFrame* const shadow_frame_; 251 const StackedShadowFrameType type_; 252 StackedShadowFrameRecord* const link_; 253 254 DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord); 255 }; 256 257 void Thread::PushDeoptimizationContext(const JValue& return_value, 258 bool is_reference, 259 ObjPtr<mirror::Throwable> exception, 260 bool from_code, 261 DeoptimizationMethodType method_type) { 262 DeoptimizationContextRecord* record = new DeoptimizationContextRecord( 263 return_value, 264 is_reference, 265 from_code, 266 exception, 267 method_type, 268 tlsPtr_.deoptimization_context_stack); 269 tlsPtr_.deoptimization_context_stack = record; 270 } 271 272 void Thread::PopDeoptimizationContext(JValue* result, 273 ObjPtr<mirror::Throwable>* exception, 274 bool* from_code, 275 DeoptimizationMethodType* method_type) { 276 AssertHasDeoptimizationContext(); 277 DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 278 tlsPtr_.deoptimization_context_stack = record->GetLink(); 279 result->SetJ(record->GetReturnValue().GetJ()); 280 *exception = record->GetPendingException(); 281 *from_code = record->GetFromCode(); 282 *method_type = record->GetDeoptimizationMethodType(); 283 delete record; 284 } 285 286 void Thread::AssertHasDeoptimizationContext() { 287 CHECK(tlsPtr_.deoptimization_context_stack != nullptr) 288 << "No deoptimization context for thread " << *this; 289 } 290 291 enum { 292 kPermitAvailable = 0, // Incrementing consumes the permit 293 kNoPermit = 1, // Incrementing marks as waiter waiting 294 kNoPermitWaiterWaiting = 2 295 }; 296 297 void Thread::Park(bool is_absolute, int64_t time) { 298 DCHECK(this == Thread::Current()); 299 #if ART_USE_FUTEXES 300 // Consume the permit, or mark as waiting. This cannot cause park_state to go 301 // outside of its valid range (0, 1, 2), because in all cases where 2 is 302 // assigned it is set back to 1 before returning, and this method cannot run 303 // concurrently with itself since it operates on the current thread. 304 int old_state = tls32_.park_state_.fetch_add(1, std::memory_order_relaxed); 305 if (old_state == kNoPermit) { 306 // no permit was available. block thread until later. 307 Runtime::Current()->GetRuntimeCallbacks()->ThreadParkStart(is_absolute, time); 308 bool timed_out = false; 309 if (!is_absolute && time == 0) { 310 // Thread.getState() is documented to return waiting for untimed parks. 311 ScopedThreadSuspension sts(this, ThreadState::kWaiting); 312 DCHECK_EQ(NumberOfHeldMutexes(), 0u); 313 int result = futex(tls32_.park_state_.Address(), 314 FUTEX_WAIT_PRIVATE, 315 /* sleep if val = */ kNoPermitWaiterWaiting, 316 /* timeout */ nullptr, 317 nullptr, 318 0); 319 // This errno check must happen before the scope is closed, to ensure that 320 // no destructors (such as ScopedThreadSuspension) overwrite errno. 321 if (result == -1) { 322 switch (errno) { 323 case EAGAIN: 324 FALLTHROUGH_INTENDED; 325 case EINTR: break; // park() is allowed to spuriously return 326 default: PLOG(FATAL) << "Failed to park"; 327 } 328 } 329 } else if (time > 0) { 330 // Only actually suspend and futex_wait if we're going to wait for some 331 // positive amount of time - the kernel will reject negative times with 332 // EINVAL, and a zero time will just noop. 333 334 // Thread.getState() is documented to return timed wait for timed parks. 335 ScopedThreadSuspension sts(this, ThreadState::kTimedWaiting); 336 DCHECK_EQ(NumberOfHeldMutexes(), 0u); 337 timespec timespec; 338 int result = 0; 339 if (is_absolute) { 340 // Time is millis when scheduled for an absolute time 341 timespec.tv_nsec = (time % 1000) * 1000000; 342 timespec.tv_sec = time / 1000; 343 // This odd looking pattern is recommended by futex documentation to 344 // wait until an absolute deadline, with otherwise identical behavior to 345 // FUTEX_WAIT_PRIVATE. This also allows parkUntil() to return at the 346 // correct time when the system clock changes. 347 result = futex(tls32_.park_state_.Address(), 348 FUTEX_WAIT_BITSET_PRIVATE | FUTEX_CLOCK_REALTIME, 349 /* sleep if val = */ kNoPermitWaiterWaiting, 350 ×pec, 351 nullptr, 352 FUTEX_BITSET_MATCH_ANY); 353 } else { 354 // Time is nanos when scheduled for a relative time 355 timespec.tv_sec = time / 1000000000; 356 timespec.tv_nsec = time % 1000000000; 357 result = futex(tls32_.park_state_.Address(), 358 FUTEX_WAIT_PRIVATE, 359 /* sleep if val = */ kNoPermitWaiterWaiting, 360 ×pec, 361 nullptr, 362 0); 363 } 364 // This errno check must happen before the scope is closed, to ensure that 365 // no destructors (such as ScopedThreadSuspension) overwrite errno. 366 if (result == -1) { 367 switch (errno) { 368 case ETIMEDOUT: 369 timed_out = true; 370 FALLTHROUGH_INTENDED; 371 case EAGAIN: 372 case EINTR: break; // park() is allowed to spuriously return 373 default: PLOG(FATAL) << "Failed to park"; 374 } 375 } 376 } 377 // Mark as no longer waiting, and consume permit if there is one. 378 tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed); 379 // TODO: Call to signal jvmti here 380 Runtime::Current()->GetRuntimeCallbacks()->ThreadParkFinished(timed_out); 381 } else { 382 // the fetch_add has consumed the permit. immediately return. 383 DCHECK_EQ(old_state, kPermitAvailable); 384 } 385 #else 386 #pragma clang diagnostic push 387 #pragma clang diagnostic warning "-W#warnings" 388 #warning "LockSupport.park/unpark implemented as noops without FUTEX support." 389 #pragma clang diagnostic pop 390 UNUSED(is_absolute, time); 391 UNIMPLEMENTED(WARNING); 392 sched_yield(); 393 #endif 394 } 395 396 void Thread::Unpark() { 397 #if ART_USE_FUTEXES 398 // Set permit available; will be consumed either by fetch_add (when the thread 399 // tries to park) or store (when the parked thread is woken up) 400 if (tls32_.park_state_.exchange(kPermitAvailable, std::memory_order_relaxed) 401 == kNoPermitWaiterWaiting) { 402 int result = futex(tls32_.park_state_.Address(), 403 FUTEX_WAKE_PRIVATE, 404 /* number of waiters = */ 1, 405 nullptr, 406 nullptr, 407 0); 408 if (result == -1) { 409 PLOG(FATAL) << "Failed to unpark"; 410 } 411 } 412 #else 413 UNIMPLEMENTED(WARNING); 414 #endif 415 } 416 417 void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) { 418 StackedShadowFrameRecord* record = new StackedShadowFrameRecord( 419 sf, type, tlsPtr_.stacked_shadow_frame_record); 420 tlsPtr_.stacked_shadow_frame_record = record; 421 } 422 423 ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present) { 424 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 425 if (must_be_present) { 426 DCHECK(record != nullptr); 427 } else { 428 if (record == nullptr || record->GetType() != type) { 429 return nullptr; 430 } 431 } 432 tlsPtr_.stacked_shadow_frame_record = record->GetLink(); 433 ShadowFrame* shadow_frame = record->GetShadowFrame(); 434 delete record; 435 return shadow_frame; 436 } 437 438 class FrameIdToShadowFrame { 439 public: 440 static FrameIdToShadowFrame* Create(size_t frame_id, 441 ShadowFrame* shadow_frame, 442 FrameIdToShadowFrame* next, 443 size_t num_vregs) { 444 // Append a bool array at the end to keep track of what vregs are updated by the debugger. 445 uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs]; 446 return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next); 447 } 448 449 static void Delete(FrameIdToShadowFrame* f) { 450 uint8_t* memory = reinterpret_cast<uint8_t*>(f); 451 delete[] memory; 452 } 453 454 size_t GetFrameId() const { return frame_id_; } 455 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 456 FrameIdToShadowFrame* GetNext() const { return next_; } 457 void SetNext(FrameIdToShadowFrame* next) { next_ = next; } 458 bool* GetUpdatedVRegFlags() { 459 return updated_vreg_flags_; 460 } 461 462 private: 463 FrameIdToShadowFrame(size_t frame_id, 464 ShadowFrame* shadow_frame, 465 FrameIdToShadowFrame* next) 466 : frame_id_(frame_id), 467 shadow_frame_(shadow_frame), 468 next_(next) {} 469 470 const size_t frame_id_; 471 ShadowFrame* const shadow_frame_; 472 FrameIdToShadowFrame* next_; 473 bool updated_vreg_flags_[0]; 474 475 DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame); 476 }; 477 478 static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head, 479 size_t frame_id) { 480 FrameIdToShadowFrame* found = nullptr; 481 for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) { 482 if (record->GetFrameId() == frame_id) { 483 if (kIsDebugBuild) { 484 // Sanity check we have at most one record for this frame. 485 CHECK(found == nullptr) << "Multiple records for the frame " << frame_id; 486 found = record; 487 } else { 488 return record; 489 } 490 } 491 } 492 return found; 493 } 494 495 ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) { 496 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 497 tlsPtr_.frame_id_to_shadow_frame, frame_id); 498 if (record != nullptr) { 499 return record->GetShadowFrame(); 500 } 501 return nullptr; 502 } 503 504 // Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr. 505 bool* Thread::GetUpdatedVRegFlags(size_t frame_id) { 506 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 507 tlsPtr_.frame_id_to_shadow_frame, frame_id); 508 CHECK(record != nullptr); 509 return record->GetUpdatedVRegFlags(); 510 } 511 512 ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id, 513 uint32_t num_vregs, 514 ArtMethod* method, 515 uint32_t dex_pc) { 516 ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id); 517 if (shadow_frame != nullptr) { 518 return shadow_frame; 519 } 520 VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method); 521 shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, nullptr, method, dex_pc); 522 FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id, 523 shadow_frame, 524 tlsPtr_.frame_id_to_shadow_frame, 525 num_vregs); 526 for (uint32_t i = 0; i < num_vregs; i++) { 527 // Do this to clear all references for root visitors. 528 shadow_frame->SetVRegReference(i, nullptr); 529 // This flag will be changed to true if the debugger modifies the value. 530 record->GetUpdatedVRegFlags()[i] = false; 531 } 532 tlsPtr_.frame_id_to_shadow_frame = record; 533 return shadow_frame; 534 } 535 536 TLSData* Thread::GetCustomTLS(const char* key) { 537 MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_); 538 auto it = custom_tls_.find(key); 539 return (it != custom_tls_.end()) ? it->second.get() : nullptr; 540 } 541 542 void Thread::SetCustomTLS(const char* key, TLSData* data) { 543 // We will swap the old data (which might be nullptr) with this and then delete it outside of the 544 // custom_tls_lock_. 545 std::unique_ptr<TLSData> old_data(data); 546 { 547 MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_); 548 custom_tls_.GetOrCreate(key, []() { return std::unique_ptr<TLSData>(); }).swap(old_data); 549 } 550 } 551 552 void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) { 553 FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame; 554 if (head->GetFrameId() == frame_id) { 555 tlsPtr_.frame_id_to_shadow_frame = head->GetNext(); 556 FrameIdToShadowFrame::Delete(head); 557 return; 558 } 559 FrameIdToShadowFrame* prev = head; 560 for (FrameIdToShadowFrame* record = head->GetNext(); 561 record != nullptr; 562 prev = record, record = record->GetNext()) { 563 if (record->GetFrameId() == frame_id) { 564 prev->SetNext(record->GetNext()); 565 FrameIdToShadowFrame::Delete(record); 566 return; 567 } 568 } 569 LOG(FATAL) << "No shadow frame for frame " << frame_id; 570 UNREACHABLE(); 571 } 572 573 void Thread::InitTid() { 574 tls32_.tid = ::art::GetTid(); 575 } 576 577 void Thread::InitAfterFork() { 578 // One thread (us) survived the fork, but we have a new tid so we need to 579 // update the value stashed in this Thread*. 580 InitTid(); 581 } 582 583 void* Thread::CreateCallback(void* arg) { 584 Thread* self = reinterpret_cast<Thread*>(arg); 585 Runtime* runtime = Runtime::Current(); 586 if (runtime == nullptr) { 587 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self; 588 return nullptr; 589 } 590 { 591 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true 592 // after self->Init(). 593 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 594 // Check that if we got here we cannot be shutting down (as shutdown should never have started 595 // while threads are being born). 596 CHECK(!runtime->IsShuttingDownLocked()); 597 // Note: given that the JNIEnv is created in the parent thread, the only failure point here is 598 // a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort 599 // the runtime in such a case. In case this ever changes, we need to make sure here to 600 // delete the tmp_jni_env, as we own it at this point. 601 CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env)); 602 self->tlsPtr_.tmp_jni_env = nullptr; 603 Runtime::Current()->EndThreadBirth(); 604 } 605 { 606 ScopedObjectAccess soa(self); 607 self->InitStringEntryPoints(); 608 609 // Copy peer into self, deleting global reference when done. 610 CHECK(self->tlsPtr_.jpeer != nullptr); 611 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr(); 612 self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer); 613 self->tlsPtr_.jpeer = nullptr; 614 self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str()); 615 616 ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority); 617 self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer)); 618 619 runtime->GetRuntimeCallbacks()->ThreadStart(self); 620 621 // Unpark ourselves if the java peer was unparked before it started (see 622 // b/28845097#comment49 for more information) 623 624 ArtField* unparkedField = jni::DecodeArtField( 625 WellKnownClasses::java_lang_Thread_unparkedBeforeStart); 626 bool should_unpark = false; 627 { 628 // Hold the lock here, so that if another thread calls unpark before the thread starts 629 // we don't observe the unparkedBeforeStart field before the unparker writes to it, 630 // which could cause a lost unpark. 631 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); 632 should_unpark = unparkedField->GetBoolean(self->tlsPtr_.opeer) == JNI_TRUE; 633 } 634 if (should_unpark) { 635 self->Unpark(); 636 } 637 // Invoke the 'run' method of our java.lang.Thread. 638 ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer; 639 jmethodID mid = WellKnownClasses::java_lang_Thread_run; 640 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); 641 InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr); 642 } 643 // Detach and delete self. 644 Runtime::Current()->GetThreadList()->Unregister(self); 645 646 return nullptr; 647 } 648 649 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 650 ObjPtr<mirror::Object> thread_peer) { 651 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer); 652 Thread* result = reinterpret_cast64<Thread*>(f->GetLong(thread_peer)); 653 // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ 654 // to stop it from going away. 655 if (kIsDebugBuild) { 656 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 657 if (result != nullptr && !result->IsSuspended()) { 658 Locks::thread_list_lock_->AssertHeld(soa.Self()); 659 } 660 } 661 return result; 662 } 663 664 Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 665 jobject java_thread) { 666 return FromManagedThread(soa, soa.Decode<mirror::Object>(java_thread)); 667 } 668 669 static size_t FixStackSize(size_t stack_size) { 670 // A stack size of zero means "use the default". 671 if (stack_size == 0) { 672 stack_size = Runtime::Current()->GetDefaultStackSize(); 673 } 674 675 // Dalvik used the bionic pthread default stack size for native threads, 676 // so include that here to support apps that expect large native stacks. 677 stack_size += 1 * MB; 678 679 // Under sanitization, frames of the interpreter may become bigger, both for C code as 680 // well as the ShadowFrame. Ensure a larger minimum size. Otherwise initialization 681 // of all core classes cannot be done in all test circumstances. 682 if (kMemoryToolIsAvailable) { 683 stack_size = std::max(2 * MB, stack_size); 684 } 685 686 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. 687 if (stack_size < PTHREAD_STACK_MIN) { 688 stack_size = PTHREAD_STACK_MIN; 689 } 690 691 if (Runtime::Current()->ExplicitStackOverflowChecks()) { 692 // It's likely that callers are trying to ensure they have at least a certain amount of 693 // stack space, so we should add our reserved space on top of what they requested, rather 694 // than implicitly take it away from them. 695 stack_size += GetStackOverflowReservedBytes(kRuntimeISA); 696 } else { 697 // If we are going to use implicit stack checks, allocate space for the protected 698 // region at the bottom of the stack. 699 stack_size += Thread::kStackOverflowImplicitCheckSize + 700 GetStackOverflowReservedBytes(kRuntimeISA); 701 } 702 703 // Some systems require the stack size to be a multiple of the system page size, so round up. 704 stack_size = RoundUp(stack_size, kPageSize); 705 706 return stack_size; 707 } 708 709 // Return the nearest page-aligned address below the current stack top. 710 NO_INLINE 711 static uint8_t* FindStackTop() { 712 return reinterpret_cast<uint8_t*>( 713 AlignDown(__builtin_frame_address(0), kPageSize)); 714 } 715 716 // Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack 717 // overflow is detected. It is located right below the stack_begin_. 718 ATTRIBUTE_NO_SANITIZE_ADDRESS 719 void Thread::InstallImplicitProtection() { 720 uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 721 // Page containing current top of stack. 722 uint8_t* stack_top = FindStackTop(); 723 724 // Try to directly protect the stack. 725 VLOG(threads) << "installing stack protected region at " << std::hex << 726 static_cast<void*>(pregion) << " to " << 727 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 728 if (ProtectStack(/* fatal_on_error= */ false)) { 729 // Tell the kernel that we won't be needing these pages any more. 730 // NB. madvise will probably write zeroes into the memory (on linux it does). 731 uint32_t unwanted_size = stack_top - pregion - kPageSize; 732 madvise(pregion, unwanted_size, MADV_DONTNEED); 733 return; 734 } 735 736 // There is a little complexity here that deserves a special mention. On some 737 // architectures, the stack is created using a VM_GROWSDOWN flag 738 // to prevent memory being allocated when it's not needed. This flag makes the 739 // kernel only allocate memory for the stack by growing down in memory. Because we 740 // want to put an mprotected region far away from that at the stack top, we need 741 // to make sure the pages for the stack are mapped in before we call mprotect. 742 // 743 // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN 744 // with a non-mapped stack (usually only the main thread). 745 // 746 // We map in the stack by reading every page from the stack bottom (highest address) 747 // to the stack top. (We then madvise this away.) This must be done by reading from the 748 // current stack pointer downwards. 749 // 750 // Accesses too far below the current machine register corresponding to the stack pointer (e.g., 751 // ESP on x86[-32], SP on ARM) might cause a SIGSEGV (at least on x86 with newer kernels). We 752 // thus have to move the stack pointer. We do this portably by using a recursive function with a 753 // large stack frame size. 754 755 // (Defensively) first remove the protection on the protected region as we'll want to read 756 // and write it. Ignore errors. 757 UnprotectStack(); 758 759 VLOG(threads) << "Need to map in stack for thread at " << std::hex << 760 static_cast<void*>(pregion); 761 762 struct RecurseDownStack { 763 // This function has an intentionally large stack size. 764 #pragma GCC diagnostic push 765 #pragma GCC diagnostic ignored "-Wframe-larger-than=" 766 NO_INLINE 767 static void Touch(uintptr_t target) { 768 volatile size_t zero = 0; 769 // Use a large local volatile array to ensure a large frame size. Do not use anything close 770 // to a full page for ASAN. It would be nice to ensure the frame size is at most a page, but 771 // there is no pragma support for this. 772 // Note: for ASAN we need to shrink the array a bit, as there's other overhead. 773 constexpr size_t kAsanMultiplier = 774 #ifdef ADDRESS_SANITIZER 775 2u; 776 #else 777 1u; 778 #endif 779 volatile char space[kPageSize - (kAsanMultiplier * 256)]; 780 char sink ATTRIBUTE_UNUSED = space[zero]; // NOLINT 781 // Remove tag from the pointer. Nop in non-hwasan builds. 782 uintptr_t addr = reinterpret_cast<uintptr_t>(__hwasan_tag_pointer(space, 0)); 783 if (addr >= target + kPageSize) { 784 Touch(target); 785 } 786 zero *= 2; // Try to avoid tail recursion. 787 } 788 #pragma GCC diagnostic pop 789 }; 790 RecurseDownStack::Touch(reinterpret_cast<uintptr_t>(pregion)); 791 792 VLOG(threads) << "(again) installing stack protected region at " << std::hex << 793 static_cast<void*>(pregion) << " to " << 794 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 795 796 // Protect the bottom of the stack to prevent read/write to it. 797 ProtectStack(/* fatal_on_error= */ true); 798 799 // Tell the kernel that we won't be needing these pages any more. 800 // NB. madvise will probably write zeroes into the memory (on linux it does). 801 uint32_t unwanted_size = stack_top - pregion - kPageSize; 802 madvise(pregion, unwanted_size, MADV_DONTNEED); 803 } 804 805 void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) { 806 CHECK(java_peer != nullptr); 807 Thread* self = static_cast<JNIEnvExt*>(env)->GetSelf(); 808 809 if (VLOG_IS_ON(threads)) { 810 ScopedObjectAccess soa(env); 811 812 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 813 ObjPtr<mirror::String> java_name = 814 f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString(); 815 std::string thread_name; 816 if (java_name != nullptr) { 817 thread_name = java_name->ToModifiedUtf8(); 818 } else { 819 thread_name = "(Unnamed)"; 820 } 821 822 VLOG(threads) << "Creating native thread for " << thread_name; 823 self->Dump(LOG_STREAM(INFO)); 824 } 825 826 Runtime* runtime = Runtime::Current(); 827 828 // Atomically start the birth of the thread ensuring the runtime isn't shutting down. 829 bool thread_start_during_shutdown = false; 830 { 831 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 832 if (runtime->IsShuttingDownLocked()) { 833 thread_start_during_shutdown = true; 834 } else { 835 runtime->StartThreadBirth(); 836 } 837 } 838 if (thread_start_during_shutdown) { 839 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError")); 840 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown"); 841 return; 842 } 843 844 Thread* child_thread = new Thread(is_daemon); 845 // Use global JNI ref to hold peer live while child thread starts. 846 child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer); 847 stack_size = FixStackSize(stack_size); 848 849 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing 850 // to assign it. 851 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 852 reinterpret_cast<jlong>(child_thread)); 853 854 // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and 855 // do not have a good way to report this on the child's side. 856 std::string error_msg; 857 std::unique_ptr<JNIEnvExt> child_jni_env_ext( 858 JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg)); 859 860 int pthread_create_result = 0; 861 if (child_jni_env_ext.get() != nullptr) { 862 pthread_t new_pthread; 863 pthread_attr_t attr; 864 child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get(); 865 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); 866 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), 867 "PTHREAD_CREATE_DETACHED"); 868 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); 869 pthread_create_result = pthread_create(&new_pthread, 870 &attr, 871 Thread::CreateCallback, 872 child_thread); 873 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); 874 875 if (pthread_create_result == 0) { 876 // pthread_create started the new thread. The child is now responsible for managing the 877 // JNIEnvExt we created. 878 // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization 879 // between the threads. 880 child_jni_env_ext.release(); // NOLINT pthreads API. 881 return; 882 } 883 } 884 885 // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up. 886 { 887 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 888 runtime->EndThreadBirth(); 889 } 890 // Manually delete the global reference since Thread::Init will not have been run. 891 env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer); 892 child_thread->tlsPtr_.jpeer = nullptr; 893 delete child_thread; 894 child_thread = nullptr; 895 // TODO: remove from thread group? 896 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0); 897 { 898 std::string msg(child_jni_env_ext.get() == nullptr ? 899 StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) : 900 StringPrintf("pthread_create (%s stack) failed: %s", 901 PrettySize(stack_size).c_str(), strerror(pthread_create_result))); 902 ScopedObjectAccess soa(env); 903 soa.Self()->ThrowOutOfMemoryError(msg.c_str()); 904 } 905 } 906 907 bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) { 908 // This function does all the initialization that must be run by the native thread it applies to. 909 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so 910 // we can handshake with the corresponding native thread when it's ready.) Check this native 911 // thread hasn't been through here already... 912 CHECK(Thread::Current() == nullptr); 913 914 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this 915 // avoids pthread_self_ ever being invalid when discovered from Thread::Current(). 916 tlsPtr_.pthread_self = pthread_self(); 917 CHECK(is_started_); 918 919 ScopedTrace trace("Thread::Init"); 920 921 SetUpAlternateSignalStack(); 922 if (!InitStackHwm()) { 923 return false; 924 } 925 InitCpu(); 926 InitTlsEntryPoints(); 927 RemoveSuspendTrigger(); 928 InitCardTable(); 929 InitTid(); 930 { 931 ScopedTrace trace2("InitInterpreterTls"); 932 interpreter::InitInterpreterTls(this); 933 } 934 935 #ifdef ART_TARGET_ANDROID 936 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this; 937 #else 938 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); 939 #endif 940 DCHECK_EQ(Thread::Current(), this); 941 942 tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this); 943 944 if (jni_env_ext != nullptr) { 945 DCHECK_EQ(jni_env_ext->GetVm(), java_vm); 946 DCHECK_EQ(jni_env_ext->GetSelf(), this); 947 tlsPtr_.jni_env = jni_env_ext; 948 } else { 949 std::string error_msg; 950 tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg); 951 if (tlsPtr_.jni_env == nullptr) { 952 LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg; 953 return false; 954 } 955 } 956 957 ScopedTrace trace3("ThreadList::Register"); 958 thread_list->Register(this); 959 return true; 960 } 961 962 template <typename PeerAction> 963 Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) { 964 Runtime* runtime = Runtime::Current(); 965 ScopedTrace trace("Thread::Attach"); 966 if (runtime == nullptr) { 967 LOG(ERROR) << "Thread attaching to non-existent runtime: " << 968 ((thread_name != nullptr) ? thread_name : "(Unnamed)"); 969 return nullptr; 970 } 971 Thread* self; 972 { 973 ScopedTrace trace2("Thread birth"); 974 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 975 if (runtime->IsShuttingDownLocked()) { 976 LOG(WARNING) << "Thread attaching while runtime is shutting down: " << 977 ((thread_name != nullptr) ? thread_name : "(Unnamed)"); 978 return nullptr; 979 } else { 980 Runtime::Current()->StartThreadBirth(); 981 self = new Thread(as_daemon); 982 bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); 983 Runtime::Current()->EndThreadBirth(); 984 if (!init_success) { 985 delete self; 986 return nullptr; 987 } 988 } 989 } 990 991 self->InitStringEntryPoints(); 992 993 CHECK_NE(self->GetState(), kRunnable); 994 self->SetState(kNative); 995 996 // Run the action that is acting on the peer. 997 if (!peer_action(self)) { 998 runtime->GetThreadList()->Unregister(self); 999 // Unregister deletes self, no need to do this here. 1000 return nullptr; 1001 } 1002 1003 if (VLOG_IS_ON(threads)) { 1004 if (thread_name != nullptr) { 1005 VLOG(threads) << "Attaching thread " << thread_name; 1006 } else { 1007 VLOG(threads) << "Attaching unnamed thread."; 1008 } 1009 ScopedObjectAccess soa(self); 1010 self->Dump(LOG_STREAM(INFO)); 1011 } 1012 1013 { 1014 ScopedObjectAccess soa(self); 1015 runtime->GetRuntimeCallbacks()->ThreadStart(self); 1016 } 1017 1018 return self; 1019 } 1020 1021 Thread* Thread::Attach(const char* thread_name, 1022 bool as_daemon, 1023 jobject thread_group, 1024 bool create_peer) { 1025 auto create_peer_action = [&](Thread* self) { 1026 // If we're the main thread, ClassLinker won't be created until after we're attached, 1027 // so that thread needs a two-stage attach. Regular threads don't need this hack. 1028 // In the compiler, all threads need this hack, because no-one's going to be getting 1029 // a native peer! 1030 if (create_peer) { 1031 self->CreatePeer(thread_name, as_daemon, thread_group); 1032 if (self->IsExceptionPending()) { 1033 // We cannot keep the exception around, as we're deleting self. Try to be helpful and log 1034 // it. 1035 { 1036 ScopedObjectAccess soa(self); 1037 LOG(ERROR) << "Exception creating thread peer:"; 1038 LOG(ERROR) << self->GetException()->Dump(); 1039 self->ClearException(); 1040 } 1041 return false; 1042 } 1043 } else { 1044 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. 1045 if (thread_name != nullptr) { 1046 self->tlsPtr_.name->assign(thread_name); 1047 ::art::SetThreadName(thread_name); 1048 } else if (self->GetJniEnv()->IsCheckJniEnabled()) { 1049 LOG(WARNING) << *Thread::Current() << " attached without supplying a name"; 1050 } 1051 } 1052 return true; 1053 }; 1054 return Attach(thread_name, as_daemon, create_peer_action); 1055 } 1056 1057 Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) { 1058 auto set_peer_action = [&](Thread* self) { 1059 // Install the given peer. 1060 { 1061 DCHECK(self == Thread::Current()); 1062 ScopedObjectAccess soa(self); 1063 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(thread_peer).Ptr(); 1064 } 1065 self->GetJniEnv()->SetLongField(thread_peer, 1066 WellKnownClasses::java_lang_Thread_nativePeer, 1067 reinterpret_cast64<jlong>(self)); 1068 return true; 1069 }; 1070 return Attach(thread_name, as_daemon, set_peer_action); 1071 } 1072 1073 void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) { 1074 Runtime* runtime = Runtime::Current(); 1075 CHECK(runtime->IsStarted()); 1076 JNIEnv* env = tlsPtr_.jni_env; 1077 1078 if (thread_group == nullptr) { 1079 thread_group = runtime->GetMainThreadGroup(); 1080 } 1081 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 1082 // Add missing null check in case of OOM b/18297817 1083 if (name != nullptr && thread_name.get() == nullptr) { 1084 CHECK(IsExceptionPending()); 1085 return; 1086 } 1087 jint thread_priority = GetNativePriority(); 1088 jboolean thread_is_daemon = as_daemon; 1089 1090 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 1091 if (peer.get() == nullptr) { 1092 CHECK(IsExceptionPending()); 1093 return; 1094 } 1095 { 1096 ScopedObjectAccess soa(this); 1097 tlsPtr_.opeer = soa.Decode<mirror::Object>(peer.get()).Ptr(); 1098 } 1099 env->CallNonvirtualVoidMethod(peer.get(), 1100 WellKnownClasses::java_lang_Thread, 1101 WellKnownClasses::java_lang_Thread_init, 1102 thread_group, thread_name.get(), thread_priority, thread_is_daemon); 1103 if (IsExceptionPending()) { 1104 return; 1105 } 1106 1107 Thread* self = this; 1108 DCHECK_EQ(self, Thread::Current()); 1109 env->SetLongField(peer.get(), 1110 WellKnownClasses::java_lang_Thread_nativePeer, 1111 reinterpret_cast64<jlong>(self)); 1112 1113 ScopedObjectAccess soa(self); 1114 StackHandleScope<1> hs(self); 1115 MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName())); 1116 if (peer_thread_name == nullptr) { 1117 // The Thread constructor should have set the Thread.name to a 1118 // non-null value. However, because we can run without code 1119 // available (in the compiler, in tests), we manually assign the 1120 // fields the constructor should have set. 1121 if (runtime->IsActiveTransaction()) { 1122 InitPeer<true>(soa, 1123 tlsPtr_.opeer, 1124 thread_is_daemon, 1125 thread_group, 1126 thread_name.get(), 1127 thread_priority); 1128 } else { 1129 InitPeer<false>(soa, 1130 tlsPtr_.opeer, 1131 thread_is_daemon, 1132 thread_group, 1133 thread_name.get(), 1134 thread_priority); 1135 } 1136 peer_thread_name.Assign(GetThreadName()); 1137 } 1138 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. 1139 if (peer_thread_name != nullptr) { 1140 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); 1141 } 1142 } 1143 1144 jobject Thread::CreateCompileTimePeer(JNIEnv* env, 1145 const char* name, 1146 bool as_daemon, 1147 jobject thread_group) { 1148 Runtime* runtime = Runtime::Current(); 1149 CHECK(!runtime->IsStarted()); 1150 1151 if (thread_group == nullptr) { 1152 thread_group = runtime->GetMainThreadGroup(); 1153 } 1154 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 1155 // Add missing null check in case of OOM b/18297817 1156 if (name != nullptr && thread_name.get() == nullptr) { 1157 CHECK(Thread::Current()->IsExceptionPending()); 1158 return nullptr; 1159 } 1160 jint thread_priority = kNormThreadPriority; // Always normalize to NORM priority. 1161 jboolean thread_is_daemon = as_daemon; 1162 1163 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 1164 if (peer.get() == nullptr) { 1165 CHECK(Thread::Current()->IsExceptionPending()); 1166 return nullptr; 1167 } 1168 1169 // We cannot call Thread.init, as it will recursively ask for currentThread. 1170 1171 // The Thread constructor should have set the Thread.name to a 1172 // non-null value. However, because we can run without code 1173 // available (in the compiler, in tests), we manually assign the 1174 // fields the constructor should have set. 1175 ScopedObjectAccessUnchecked soa(Thread::Current()); 1176 if (runtime->IsActiveTransaction()) { 1177 InitPeer<true>(soa, 1178 soa.Decode<mirror::Object>(peer.get()), 1179 thread_is_daemon, 1180 thread_group, 1181 thread_name.get(), 1182 thread_priority); 1183 } else { 1184 InitPeer<false>(soa, 1185 soa.Decode<mirror::Object>(peer.get()), 1186 thread_is_daemon, 1187 thread_group, 1188 thread_name.get(), 1189 thread_priority); 1190 } 1191 1192 return peer.release(); 1193 } 1194 1195 template<bool kTransactionActive> 1196 void Thread::InitPeer(ScopedObjectAccessAlreadyRunnable& soa, 1197 ObjPtr<mirror::Object> peer, 1198 jboolean thread_is_daemon, 1199 jobject thread_group, 1200 jobject thread_name, 1201 jint thread_priority) { 1202 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)-> 1203 SetBoolean<kTransactionActive>(peer, thread_is_daemon); 1204 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)-> 1205 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_group)); 1206 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name)-> 1207 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_name)); 1208 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)-> 1209 SetInt<kTransactionActive>(peer, thread_priority); 1210 } 1211 1212 void Thread::SetThreadName(const char* name) { 1213 tlsPtr_.name->assign(name); 1214 ::art::SetThreadName(name); 1215 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); 1216 } 1217 1218 static void GetThreadStack(pthread_t thread, 1219 void** stack_base, 1220 size_t* stack_size, 1221 size_t* guard_size) { 1222 #if defined(__APPLE__) 1223 *stack_size = pthread_get_stacksize_np(thread); 1224 void* stack_addr = pthread_get_stackaddr_np(thread); 1225 1226 // Check whether stack_addr is the base or end of the stack. 1227 // (On Mac OS 10.7, it's the end.) 1228 int stack_variable; 1229 if (stack_addr > &stack_variable) { 1230 *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size; 1231 } else { 1232 *stack_base = stack_addr; 1233 } 1234 1235 // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac. 1236 pthread_attr_t attributes; 1237 CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__); 1238 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1239 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1240 #else 1241 pthread_attr_t attributes; 1242 CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__); 1243 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__); 1244 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1245 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1246 1247 #if defined(__GLIBC__) 1248 // If we're the main thread, check whether we were run with an unlimited stack. In that case, 1249 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection 1250 // will be broken because we'll die long before we get close to 2GB. 1251 bool is_main_thread = (::art::GetTid() == getpid()); 1252 if (is_main_thread) { 1253 rlimit stack_limit; 1254 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) { 1255 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed"; 1256 } 1257 if (stack_limit.rlim_cur == RLIM_INFINITY) { 1258 size_t old_stack_size = *stack_size; 1259 1260 // Use the kernel default limit as our size, and adjust the base to match. 1261 *stack_size = 8 * MB; 1262 *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size); 1263 1264 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")" 1265 << " to " << PrettySize(*stack_size) 1266 << " with base " << *stack_base; 1267 } 1268 } 1269 #endif 1270 1271 #endif 1272 } 1273 1274 bool Thread::InitStackHwm() { 1275 ScopedTrace trace("InitStackHwm"); 1276 void* read_stack_base; 1277 size_t read_stack_size; 1278 size_t read_guard_size; 1279 GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size); 1280 1281 tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base); 1282 tlsPtr_.stack_size = read_stack_size; 1283 1284 // The minimum stack size we can cope with is the overflow reserved bytes (typically 1285 // 8K) + the protected region size (4K) + another page (4K). Typically this will 1286 // be 8+4+4 = 16K. The thread won't be able to do much with this stack even the GC takes 1287 // between 8K and 12K. 1288 uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize 1289 + 4 * KB; 1290 if (read_stack_size <= min_stack) { 1291 // Note, as we know the stack is small, avoid operations that could use a lot of stack. 1292 LogHelper::LogLineLowStack(__PRETTY_FUNCTION__, 1293 __LINE__, 1294 ::android::base::ERROR, 1295 "Attempt to attach a thread with a too-small stack"); 1296 return false; 1297 } 1298 1299 // This is included in the SIGQUIT output, but it's useful here for thread debugging. 1300 VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)", 1301 read_stack_base, 1302 PrettySize(read_stack_size).c_str(), 1303 PrettySize(read_guard_size).c_str()); 1304 1305 // Set stack_end_ to the bottom of the stack saving space of stack overflows 1306 1307 Runtime* runtime = Runtime::Current(); 1308 bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler(); 1309 1310 ResetDefaultStackEnd(); 1311 1312 // Install the protected region if we are doing implicit overflow checks. 1313 if (implicit_stack_check) { 1314 // The thread might have protected region at the bottom. We need 1315 // to install our own region so we need to move the limits 1316 // of the stack to make room for it. 1317 1318 tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize; 1319 tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize; 1320 tlsPtr_.stack_size -= read_guard_size; 1321 1322 InstallImplicitProtection(); 1323 } 1324 1325 // Sanity check. 1326 CHECK_GT(FindStackTop(), reinterpret_cast<void*>(tlsPtr_.stack_end)); 1327 1328 return true; 1329 } 1330 1331 void Thread::ShortDump(std::ostream& os) const { 1332 os << "Thread["; 1333 if (GetThreadId() != 0) { 1334 // If we're in kStarting, we won't have a thin lock id or tid yet. 1335 os << GetThreadId() 1336 << ",tid=" << GetTid() << ','; 1337 } 1338 os << GetState() 1339 << ",Thread*=" << this 1340 << ",peer=" << tlsPtr_.opeer 1341 << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\"" 1342 << "]"; 1343 } 1344 1345 void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map, 1346 bool force_dump_stack) const { 1347 DumpState(os); 1348 DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack); 1349 } 1350 1351 ObjPtr<mirror::String> Thread::GetThreadName() const { 1352 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 1353 if (tlsPtr_.opeer == nullptr) { 1354 return nullptr; 1355 } 1356 ObjPtr<mirror::Object> name = f->GetObject(tlsPtr_.opeer); 1357 return name == nullptr ? nullptr : name->AsString(); 1358 } 1359 1360 void Thread::GetThreadName(std::string& name) const { 1361 name.assign(*tlsPtr_.name); 1362 } 1363 1364 uint64_t Thread::GetCpuMicroTime() const { 1365 #if defined(__linux__) 1366 clockid_t cpu_clock_id; 1367 pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id); 1368 timespec now; 1369 clock_gettime(cpu_clock_id, &now); 1370 return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000); 1371 #else // __APPLE__ 1372 UNIMPLEMENTED(WARNING); 1373 return -1; 1374 #endif 1375 } 1376 1377 // Attempt to rectify locks so that we dump thread list with required locks before exiting. 1378 static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1379 LOG(ERROR) << *thread << " suspend count already zero."; 1380 Locks::thread_suspend_count_lock_->Unlock(self); 1381 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1382 Locks::mutator_lock_->SharedTryLock(self); 1383 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1384 LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; 1385 } 1386 } 1387 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1388 Locks::thread_list_lock_->TryLock(self); 1389 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1390 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; 1391 } 1392 } 1393 std::ostringstream ss; 1394 Runtime::Current()->GetThreadList()->Dump(ss); 1395 LOG(FATAL) << ss.str(); 1396 } 1397 1398 bool Thread::ModifySuspendCountInternal(Thread* self, 1399 int delta, 1400 AtomicInteger* suspend_barrier, 1401 SuspendReason reason) { 1402 if (kIsDebugBuild) { 1403 DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count) 1404 << reason << " " << delta << " " << tls32_.debug_suspend_count << " " << this; 1405 DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this; 1406 Locks::thread_suspend_count_lock_->AssertHeld(self); 1407 if (this != self && !IsSuspended()) { 1408 Locks::thread_list_lock_->AssertHeld(self); 1409 } 1410 } 1411 // User code suspensions need to be checked more closely since they originate from code outside of 1412 // the runtime's control. 1413 if (UNLIKELY(reason == SuspendReason::kForUserCode)) { 1414 Locks::user_code_suspension_lock_->AssertHeld(self); 1415 if (UNLIKELY(delta + tls32_.user_code_suspend_count < 0)) { 1416 LOG(ERROR) << "attempting to modify suspend count in an illegal way."; 1417 return false; 1418 } 1419 } 1420 if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) { 1421 UnsafeLogFatalForSuspendCount(self, this); 1422 return false; 1423 } 1424 1425 if (kUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) { 1426 // Force retry of a suspend request if it's in the middle of a thread flip to avoid a 1427 // deadlock. b/31683379. 1428 return false; 1429 } 1430 1431 uint16_t flags = kSuspendRequest; 1432 if (delta > 0 && suspend_barrier != nullptr) { 1433 uint32_t available_barrier = kMaxSuspendBarriers; 1434 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1435 if (tlsPtr_.active_suspend_barriers[i] == nullptr) { 1436 available_barrier = i; 1437 break; 1438 } 1439 } 1440 if (available_barrier == kMaxSuspendBarriers) { 1441 // No barrier spaces available, we can't add another. 1442 return false; 1443 } 1444 tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier; 1445 flags |= kActiveSuspendBarrier; 1446 } 1447 1448 tls32_.suspend_count += delta; 1449 switch (reason) { 1450 case SuspendReason::kForDebugger: 1451 tls32_.debug_suspend_count += delta; 1452 break; 1453 case SuspendReason::kForUserCode: 1454 tls32_.user_code_suspend_count += delta; 1455 break; 1456 case SuspendReason::kInternal: 1457 break; 1458 } 1459 1460 if (tls32_.suspend_count == 0) { 1461 AtomicClearFlag(kSuspendRequest); 1462 } else { 1463 // Two bits might be set simultaneously. 1464 tls32_.state_and_flags.as_atomic_int.fetch_or(flags, std::memory_order_seq_cst); 1465 TriggerSuspend(); 1466 } 1467 return true; 1468 } 1469 1470 bool Thread::PassActiveSuspendBarriers(Thread* self) { 1471 // Grab the suspend_count lock and copy the current set of 1472 // barriers. Then clear the list and the flag. The ModifySuspendCount 1473 // function requires the lock so we prevent a race between setting 1474 // the kActiveSuspendBarrier flag and clearing it. 1475 AtomicInteger* pass_barriers[kMaxSuspendBarriers]; 1476 { 1477 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1478 if (!ReadFlag(kActiveSuspendBarrier)) { 1479 // quick exit test: the barriers have already been claimed - this is 1480 // possible as there may be a race to claim and it doesn't matter 1481 // who wins. 1482 // All of the callers of this function (except the SuspendAllInternal) 1483 // will first test the kActiveSuspendBarrier flag without lock. Here 1484 // double-check whether the barrier has been passed with the 1485 // suspend_count lock. 1486 return false; 1487 } 1488 1489 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1490 pass_barriers[i] = tlsPtr_.active_suspend_barriers[i]; 1491 tlsPtr_.active_suspend_barriers[i] = nullptr; 1492 } 1493 AtomicClearFlag(kActiveSuspendBarrier); 1494 } 1495 1496 uint32_t barrier_count = 0; 1497 for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) { 1498 AtomicInteger* pending_threads = pass_barriers[i]; 1499 if (pending_threads != nullptr) { 1500 bool done = false; 1501 do { 1502 int32_t cur_val = pending_threads->load(std::memory_order_relaxed); 1503 CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val; 1504 // Reduce value by 1. 1505 done = pending_threads->CompareAndSetWeakRelaxed(cur_val, cur_val - 1); 1506 #if ART_USE_FUTEXES 1507 if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously. 1508 futex(pending_threads->Address(), FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0); 1509 } 1510 #endif 1511 } while (!done); 1512 ++barrier_count; 1513 } 1514 } 1515 CHECK_GT(barrier_count, 0U); 1516 return true; 1517 } 1518 1519 void Thread::ClearSuspendBarrier(AtomicInteger* target) { 1520 CHECK(ReadFlag(kActiveSuspendBarrier)); 1521 bool clear_flag = true; 1522 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1523 AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i]; 1524 if (ptr == target) { 1525 tlsPtr_.active_suspend_barriers[i] = nullptr; 1526 } else if (ptr != nullptr) { 1527 clear_flag = false; 1528 } 1529 } 1530 if (LIKELY(clear_flag)) { 1531 AtomicClearFlag(kActiveSuspendBarrier); 1532 } 1533 } 1534 1535 void Thread::RunCheckpointFunction() { 1536 // Grab the suspend_count lock, get the next checkpoint and update all the checkpoint fields. If 1537 // there are no more checkpoints we will also clear the kCheckpointRequest flag. 1538 Closure* checkpoint; 1539 { 1540 MutexLock mu(this, *Locks::thread_suspend_count_lock_); 1541 checkpoint = tlsPtr_.checkpoint_function; 1542 if (!checkpoint_overflow_.empty()) { 1543 // Overflow list not empty, copy the first one out and continue. 1544 tlsPtr_.checkpoint_function = checkpoint_overflow_.front(); 1545 checkpoint_overflow_.pop_front(); 1546 } else { 1547 // No overflow checkpoints. Clear the kCheckpointRequest flag 1548 tlsPtr_.checkpoint_function = nullptr; 1549 AtomicClearFlag(kCheckpointRequest); 1550 } 1551 } 1552 // Outside the lock, run the checkpoint function. 1553 ScopedTrace trace("Run checkpoint function"); 1554 CHECK(checkpoint != nullptr) << "Checkpoint flag set without pending checkpoint"; 1555 checkpoint->Run(this); 1556 } 1557 1558 void Thread::RunEmptyCheckpoint() { 1559 DCHECK_EQ(Thread::Current(), this); 1560 AtomicClearFlag(kEmptyCheckpointRequest); 1561 Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this); 1562 } 1563 1564 bool Thread::RequestCheckpoint(Closure* function) { 1565 union StateAndFlags old_state_and_flags; 1566 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1567 if (old_state_and_flags.as_struct.state != kRunnable) { 1568 return false; // Fail, thread is suspended and so can't run a checkpoint. 1569 } 1570 1571 // We must be runnable to request a checkpoint. 1572 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1573 union StateAndFlags new_state_and_flags; 1574 new_state_and_flags.as_int = old_state_and_flags.as_int; 1575 new_state_and_flags.as_struct.flags |= kCheckpointRequest; 1576 bool success = tls32_.state_and_flags.as_atomic_int.CompareAndSetStrongSequentiallyConsistent( 1577 old_state_and_flags.as_int, new_state_and_flags.as_int); 1578 if (success) { 1579 // Succeeded setting checkpoint flag, now insert the actual checkpoint. 1580 if (tlsPtr_.checkpoint_function == nullptr) { 1581 tlsPtr_.checkpoint_function = function; 1582 } else { 1583 checkpoint_overflow_.push_back(function); 1584 } 1585 CHECK_EQ(ReadFlag(kCheckpointRequest), true); 1586 TriggerSuspend(); 1587 } 1588 return success; 1589 } 1590 1591 bool Thread::RequestEmptyCheckpoint() { 1592 union StateAndFlags old_state_and_flags; 1593 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1594 if (old_state_and_flags.as_struct.state != kRunnable) { 1595 // If it's not runnable, we don't need to do anything because it won't be in the middle of a 1596 // heap access (eg. the read barrier). 1597 return false; 1598 } 1599 1600 // We must be runnable to request a checkpoint. 1601 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1602 union StateAndFlags new_state_and_flags; 1603 new_state_and_flags.as_int = old_state_and_flags.as_int; 1604 new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest; 1605 bool success = tls32_.state_and_flags.as_atomic_int.CompareAndSetStrongSequentiallyConsistent( 1606 old_state_and_flags.as_int, new_state_and_flags.as_int); 1607 if (success) { 1608 TriggerSuspend(); 1609 } 1610 return success; 1611 } 1612 1613 class BarrierClosure : public Closure { 1614 public: 1615 explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {} 1616 1617 void Run(Thread* self) override { 1618 wrapped_->Run(self); 1619 barrier_.Pass(self); 1620 } 1621 1622 void Wait(Thread* self, ThreadState suspend_state) { 1623 if (suspend_state != ThreadState::kRunnable) { 1624 barrier_.Increment<Barrier::kDisallowHoldingLocks>(self, 1); 1625 } else { 1626 barrier_.Increment<Barrier::kAllowHoldingLocks>(self, 1); 1627 } 1628 } 1629 1630 private: 1631 Closure* wrapped_; 1632 Barrier barrier_; 1633 }; 1634 1635 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. 1636 bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState suspend_state) { 1637 Thread* self = Thread::Current(); 1638 if (this == Thread::Current()) { 1639 Locks::thread_list_lock_->AssertExclusiveHeld(self); 1640 // Unlock the tll before running so that the state is the same regardless of thread. 1641 Locks::thread_list_lock_->ExclusiveUnlock(self); 1642 // Asked to run on this thread. Just run. 1643 function->Run(this); 1644 return true; 1645 } 1646 1647 // The current thread is not this thread. 1648 1649 if (GetState() == ThreadState::kTerminated) { 1650 Locks::thread_list_lock_->ExclusiveUnlock(self); 1651 return false; 1652 } 1653 1654 struct ScopedThreadListLockUnlock { 1655 explicit ScopedThreadListLockUnlock(Thread* self_in) RELEASE(*Locks::thread_list_lock_) 1656 : self_thread(self_in) { 1657 Locks::thread_list_lock_->AssertHeld(self_thread); 1658 Locks::thread_list_lock_->Unlock(self_thread); 1659 } 1660 1661 ~ScopedThreadListLockUnlock() ACQUIRE(*Locks::thread_list_lock_) { 1662 Locks::thread_list_lock_->AssertNotHeld(self_thread); 1663 Locks::thread_list_lock_->Lock(self_thread); 1664 } 1665 1666 Thread* self_thread; 1667 }; 1668 1669 for (;;) { 1670 Locks::thread_list_lock_->AssertExclusiveHeld(self); 1671 // If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the 1672 // suspend-count lock for too long. 1673 if (GetState() == ThreadState::kRunnable) { 1674 BarrierClosure barrier_closure(function); 1675 bool installed = false; 1676 { 1677 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1678 installed = RequestCheckpoint(&barrier_closure); 1679 } 1680 if (installed) { 1681 // Relinquish the thread-list lock. We should not wait holding any locks. We cannot 1682 // reacquire it since we don't know if 'this' hasn't been deleted yet. 1683 Locks::thread_list_lock_->ExclusiveUnlock(self); 1684 ScopedThreadStateChange sts(self, suspend_state); 1685 barrier_closure.Wait(self, suspend_state); 1686 return true; 1687 } 1688 // Fall-through. 1689 } 1690 1691 // This thread is not runnable, make sure we stay suspended, then run the checkpoint. 1692 // Note: ModifySuspendCountInternal also expects the thread_list_lock to be held in 1693 // certain situations. 1694 { 1695 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1696 1697 if (!ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal)) { 1698 // Just retry the loop. 1699 sched_yield(); 1700 continue; 1701 } 1702 } 1703 1704 { 1705 // Release for the wait. The suspension will keep us from being deleted. Reacquire after so 1706 // that we can call ModifySuspendCount without racing against ThreadList::Unregister. 1707 ScopedThreadListLockUnlock stllu(self); 1708 { 1709 ScopedThreadStateChange sts(self, suspend_state); 1710 while (GetState() == ThreadState::kRunnable) { 1711 // We became runnable again. Wait till the suspend triggered in ModifySuspendCount 1712 // moves us to suspended. 1713 sched_yield(); 1714 } 1715 } 1716 1717 function->Run(this); 1718 } 1719 1720 { 1721 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1722 1723 DCHECK_NE(GetState(), ThreadState::kRunnable); 1724 bool updated = ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); 1725 DCHECK(updated); 1726 } 1727 1728 { 1729 // Imitate ResumeAll, the thread may be waiting on Thread::resume_cond_ since we raised its 1730 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast. 1731 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1732 Thread::resume_cond_->Broadcast(self); 1733 } 1734 1735 // Release the thread_list_lock_ to be consistent with the barrier-closure path. 1736 Locks::thread_list_lock_->ExclusiveUnlock(self); 1737 1738 return true; // We're done, break out of the loop. 1739 } 1740 } 1741 1742 Closure* Thread::GetFlipFunction() { 1743 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1744 Closure* func; 1745 do { 1746 func = atomic_func->load(std::memory_order_relaxed); 1747 if (func == nullptr) { 1748 return nullptr; 1749 } 1750 } while (!atomic_func->CompareAndSetWeakSequentiallyConsistent(func, nullptr)); 1751 DCHECK(func != nullptr); 1752 return func; 1753 } 1754 1755 void Thread::SetFlipFunction(Closure* function) { 1756 CHECK(function != nullptr); 1757 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1758 atomic_func->store(function, std::memory_order_seq_cst); 1759 } 1760 1761 void Thread::FullSuspendCheck() { 1762 ScopedTrace trace(__FUNCTION__); 1763 VLOG(threads) << this << " self-suspending"; 1764 // Make thread appear suspended to other threads, release mutator_lock_. 1765 // Transition to suspended and back to runnable, re-acquire share on mutator_lock_. 1766 ScopedThreadSuspension(this, kSuspended); // NOLINT 1767 VLOG(threads) << this << " self-reviving"; 1768 } 1769 1770 static std::string GetSchedulerGroupName(pid_t tid) { 1771 // /proc/<pid>/cgroup looks like this: 1772 // 2:devices:/ 1773 // 1:cpuacct,cpu:/ 1774 // We want the third field from the line whose second field contains the "cpu" token. 1775 std::string cgroup_file; 1776 if (!ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) { 1777 return ""; 1778 } 1779 std::vector<std::string> cgroup_lines; 1780 Split(cgroup_file, '\n', &cgroup_lines); 1781 for (size_t i = 0; i < cgroup_lines.size(); ++i) { 1782 std::vector<std::string> cgroup_fields; 1783 Split(cgroup_lines[i], ':', &cgroup_fields); 1784 std::vector<std::string> cgroups; 1785 Split(cgroup_fields[1], ',', &cgroups); 1786 for (size_t j = 0; j < cgroups.size(); ++j) { 1787 if (cgroups[j] == "cpu") { 1788 return cgroup_fields[2].substr(1); // Skip the leading slash. 1789 } 1790 } 1791 } 1792 return ""; 1793 } 1794 1795 1796 void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { 1797 std::string group_name; 1798 int priority; 1799 bool is_daemon = false; 1800 Thread* self = Thread::Current(); 1801 1802 // If flip_function is not null, it means we have run a checkpoint 1803 // before the thread wakes up to execute the flip function and the 1804 // thread roots haven't been forwarded. So the following access to 1805 // the roots (opeer or methods in the frames) would be bad. Run it 1806 // here. TODO: clean up. 1807 if (thread != nullptr) { 1808 ScopedObjectAccessUnchecked soa(self); 1809 Thread* this_thread = const_cast<Thread*>(thread); 1810 Closure* flip_func = this_thread->GetFlipFunction(); 1811 if (flip_func != nullptr) { 1812 flip_func->Run(this_thread); 1813 } 1814 } 1815 1816 // Don't do this if we are aborting since the GC may have all the threads suspended. This will 1817 // cause ScopedObjectAccessUnchecked to deadlock. 1818 if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) { 1819 ScopedObjectAccessUnchecked soa(self); 1820 priority = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority) 1821 ->GetInt(thread->tlsPtr_.opeer); 1822 is_daemon = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon) 1823 ->GetBoolean(thread->tlsPtr_.opeer); 1824 1825 ObjPtr<mirror::Object> thread_group = 1826 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 1827 ->GetObject(thread->tlsPtr_.opeer); 1828 1829 if (thread_group != nullptr) { 1830 ArtField* group_name_field = 1831 jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name); 1832 ObjPtr<mirror::String> group_name_string = 1833 group_name_field->GetObject(thread_group)->AsString(); 1834 group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>"; 1835 } 1836 } else { 1837 priority = GetNativePriority(); 1838 } 1839 1840 std::string scheduler_group_name(GetSchedulerGroupName(tid)); 1841 if (scheduler_group_name.empty()) { 1842 scheduler_group_name = "default"; 1843 } 1844 1845 if (thread != nullptr) { 1846 os << '"' << *thread->tlsPtr_.name << '"'; 1847 if (is_daemon) { 1848 os << " daemon"; 1849 } 1850 os << " prio=" << priority 1851 << " tid=" << thread->GetThreadId() 1852 << " " << thread->GetState(); 1853 if (thread->IsStillStarting()) { 1854 os << " (still starting up)"; 1855 } 1856 os << "\n"; 1857 } else { 1858 os << '"' << ::art::GetThreadName(tid) << '"' 1859 << " prio=" << priority 1860 << " (not attached)\n"; 1861 } 1862 1863 if (thread != nullptr) { 1864 auto suspend_log_fn = [&]() REQUIRES(Locks::thread_suspend_count_lock_) { 1865 os << " | group=\"" << group_name << "\"" 1866 << " sCount=" << thread->tls32_.suspend_count 1867 << " dsCount=" << thread->tls32_.debug_suspend_count 1868 << " flags=" << thread->tls32_.state_and_flags.as_struct.flags 1869 << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer) 1870 << " self=" << reinterpret_cast<const void*>(thread) << "\n"; 1871 }; 1872 if (Locks::thread_suspend_count_lock_->IsExclusiveHeld(self)) { 1873 Locks::thread_suspend_count_lock_->AssertExclusiveHeld(self); // For annotalysis. 1874 suspend_log_fn(); 1875 } else { 1876 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1877 suspend_log_fn(); 1878 } 1879 } 1880 1881 os << " | sysTid=" << tid 1882 << " nice=" << getpriority(PRIO_PROCESS, tid) 1883 << " cgrp=" << scheduler_group_name; 1884 if (thread != nullptr) { 1885 int policy; 1886 sched_param sp; 1887 #if !defined(__APPLE__) 1888 // b/36445592 Don't use pthread_getschedparam since pthread may have exited. 1889 policy = sched_getscheduler(tid); 1890 if (policy == -1) { 1891 PLOG(WARNING) << "sched_getscheduler(" << tid << ")"; 1892 } 1893 int sched_getparam_result = sched_getparam(tid, &sp); 1894 if (sched_getparam_result == -1) { 1895 PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)"; 1896 sp.sched_priority = -1; 1897 } 1898 #else 1899 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp), 1900 __FUNCTION__); 1901 #endif 1902 os << " sched=" << policy << "/" << sp.sched_priority 1903 << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self); 1904 } 1905 os << "\n"; 1906 1907 // Grab the scheduler stats for this thread. 1908 std::string scheduler_stats; 1909 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats) 1910 && !scheduler_stats.empty()) { 1911 scheduler_stats = android::base::Trim(scheduler_stats); // Lose the trailing '\n'. 1912 } else { 1913 scheduler_stats = "0 0 0"; 1914 } 1915 1916 char native_thread_state = '?'; 1917 int utime = 0; 1918 int stime = 0; 1919 int task_cpu = 0; 1920 GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu); 1921 1922 os << " | state=" << native_thread_state 1923 << " schedstat=( " << scheduler_stats << " )" 1924 << " utm=" << utime 1925 << " stm=" << stime 1926 << " core=" << task_cpu 1927 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; 1928 if (thread != nullptr) { 1929 os << " | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-" 1930 << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize=" 1931 << PrettySize(thread->tlsPtr_.stack_size) << "\n"; 1932 // Dump the held mutexes. 1933 os << " | held mutexes="; 1934 for (size_t i = 0; i < kLockLevelCount; ++i) { 1935 if (i != kMonitorLock) { 1936 BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i)); 1937 if (mutex != nullptr) { 1938 os << " \"" << mutex->GetName() << "\""; 1939 if (mutex->IsReaderWriterMutex()) { 1940 ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex); 1941 if (rw_mutex->GetExclusiveOwnerTid() == tid) { 1942 os << "(exclusive held)"; 1943 } else { 1944 os << "(shared held)"; 1945 } 1946 } 1947 } 1948 } 1949 } 1950 os << "\n"; 1951 } 1952 } 1953 1954 void Thread::DumpState(std::ostream& os) const { 1955 Thread::DumpState(os, this, GetTid()); 1956 } 1957 1958 struct StackDumpVisitor : public MonitorObjectsStackVisitor { 1959 StackDumpVisitor(std::ostream& os_in, 1960 Thread* thread_in, 1961 Context* context, 1962 bool can_allocate, 1963 bool check_suspended = true, 1964 bool dump_locks = true) 1965 REQUIRES_SHARED(Locks::mutator_lock_) 1966 : MonitorObjectsStackVisitor(thread_in, 1967 context, 1968 check_suspended, 1969 can_allocate && dump_locks), 1970 os(os_in), 1971 last_method(nullptr), 1972 last_line_number(0), 1973 repetition_count(0) {} 1974 1975 virtual ~StackDumpVisitor() { 1976 if (frame_count == 0) { 1977 os << " (no managed stack frames)\n"; 1978 } 1979 } 1980 1981 static constexpr size_t kMaxRepetition = 3u; 1982 1983 VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED) 1984 override 1985 REQUIRES_SHARED(Locks::mutator_lock_) { 1986 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize); 1987 ObjPtr<mirror::DexCache> dex_cache = m->GetDexCache(); 1988 int line_number = -1; 1989 if (dex_cache != nullptr) { // be tolerant of bad input 1990 const DexFile* dex_file = dex_cache->GetDexFile(); 1991 line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc(false)); 1992 } 1993 if (line_number == last_line_number && last_method == m) { 1994 ++repetition_count; 1995 } else { 1996 if (repetition_count >= kMaxRepetition) { 1997 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; 1998 } 1999 repetition_count = 0; 2000 last_line_number = line_number; 2001 last_method = m; 2002 } 2003 2004 if (repetition_count >= kMaxRepetition) { 2005 // Skip visiting=printing anything. 2006 return VisitMethodResult::kSkipMethod; 2007 } 2008 2009 os << " at " << m->PrettyMethod(false); 2010 if (m->IsNative()) { 2011 os << "(Native method)"; 2012 } else { 2013 const char* source_file(m->GetDeclaringClassSourceFile()); 2014 os << "(" << (source_file != nullptr ? source_file : "unavailable") 2015 << ":" << line_number << ")"; 2016 } 2017 os << "\n"; 2018 // Go and visit locks. 2019 return VisitMethodResult::kContinueMethod; 2020 } 2021 2022 VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override { 2023 return VisitMethodResult::kContinueMethod; 2024 } 2025 2026 void VisitWaitingObject(ObjPtr<mirror::Object> obj, ThreadState state ATTRIBUTE_UNUSED) 2027 override 2028 REQUIRES_SHARED(Locks::mutator_lock_) { 2029 PrintObject(obj, " - waiting on ", ThreadList::kInvalidThreadId); 2030 } 2031 void VisitSleepingObject(ObjPtr<mirror::Object> obj) 2032 override 2033 REQUIRES_SHARED(Locks::mutator_lock_) { 2034 PrintObject(obj, " - sleeping on ", ThreadList::kInvalidThreadId); 2035 } 2036 void VisitBlockedOnObject(ObjPtr<mirror::Object> obj, 2037 ThreadState state, 2038 uint32_t owner_tid) 2039 override 2040 REQUIRES_SHARED(Locks::mutator_lock_) { 2041 const char* msg; 2042 switch (state) { 2043 case kBlocked: 2044 msg = " - waiting to lock "; 2045 break; 2046 2047 case kWaitingForLockInflation: 2048 msg = " - waiting for lock inflation of "; 2049 break; 2050 2051 default: 2052 LOG(FATAL) << "Unreachable"; 2053 UNREACHABLE(); 2054 } 2055 PrintObject(obj, msg, owner_tid); 2056 } 2057 void VisitLockedObject(ObjPtr<mirror::Object> obj) 2058 override 2059 REQUIRES_SHARED(Locks::mutator_lock_) { 2060 PrintObject(obj, " - locked ", ThreadList::kInvalidThreadId); 2061 } 2062 2063 void PrintObject(ObjPtr<mirror::Object> obj, 2064 const char* msg, 2065 uint32_t owner_tid) REQUIRES_SHARED(Locks::mutator_lock_) { 2066 if (obj == nullptr) { 2067 os << msg << "an unknown object"; 2068 } else { 2069 if ((obj->GetLockWord(true).GetState() == LockWord::kThinLocked) && 2070 Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) { 2071 // Getting the identity hashcode here would result in lock inflation and suspension of the 2072 // current thread, which isn't safe if this is the only runnable thread. 2073 os << msg << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", 2074 reinterpret_cast<intptr_t>(obj.Ptr()), 2075 obj->PrettyTypeOf().c_str()); 2076 } else { 2077 // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>) 2078 // Call PrettyTypeOf before IdentityHashCode since IdentityHashCode can cause thread 2079 // suspension and move pretty_object. 2080 const std::string pretty_type(obj->PrettyTypeOf()); 2081 os << msg << StringPrintf("<0x%08x> (a %s)", obj->IdentityHashCode(), pretty_type.c_str()); 2082 } 2083 } 2084 if (owner_tid != ThreadList::kInvalidThreadId) { 2085 os << " held by thread " << owner_tid; 2086 } 2087 os << "\n"; 2088 } 2089 2090 std::ostream& os; 2091 ArtMethod* last_method; 2092 int last_line_number; 2093 size_t repetition_count; 2094 }; 2095 2096 static bool ShouldShowNativeStack(const Thread* thread) 2097 REQUIRES_SHARED(Locks::mutator_lock_) { 2098 ThreadState state = thread->GetState(); 2099 2100 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. 2101 if (state > kWaiting && state < kStarting) { 2102 return true; 2103 } 2104 2105 // In an Object.wait variant or Thread.sleep? That's not interesting. 2106 if (state == kTimedWaiting || state == kSleeping || state == kWaiting) { 2107 return false; 2108 } 2109 2110 // Threads with no managed stack frames should be shown. 2111 if (!thread->HasManagedStack()) { 2112 return true; 2113 } 2114 2115 // In some other native method? That's interesting. 2116 // We don't just check kNative because native methods will be in state kSuspended if they're 2117 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the 2118 // thread-startup states if it's early enough in their life cycle (http://b/7432159). 2119 ArtMethod* current_method = thread->GetCurrentMethod(nullptr); 2120 return current_method != nullptr && current_method->IsNative(); 2121 } 2122 2123 void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const { 2124 // If flip_function is not null, it means we have run a checkpoint 2125 // before the thread wakes up to execute the flip function and the 2126 // thread roots haven't been forwarded. So the following access to 2127 // the roots (locks or methods in the frames) would be bad. Run it 2128 // here. TODO: clean up. 2129 { 2130 Thread* this_thread = const_cast<Thread*>(this); 2131 Closure* flip_func = this_thread->GetFlipFunction(); 2132 if (flip_func != nullptr) { 2133 flip_func->Run(this_thread); 2134 } 2135 } 2136 2137 // Dumping the Java stack involves the verifier for locks. The verifier operates under the 2138 // assumption that there is no exception pending on entry. Thus, stash any pending exception. 2139 // Thread::Current() instead of this in case a thread is dumping the stack of another suspended 2140 // thread. 2141 StackHandleScope<1> scope(Thread::Current()); 2142 Handle<mirror::Throwable> exc; 2143 bool have_exception = false; 2144 if (IsExceptionPending()) { 2145 exc = scope.NewHandle(GetException()); 2146 const_cast<Thread*>(this)->ClearException(); 2147 have_exception = true; 2148 } 2149 2150 std::unique_ptr<Context> context(Context::Create()); 2151 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), 2152 !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks); 2153 dumper.WalkStack(); 2154 2155 if (have_exception) { 2156 const_cast<Thread*>(this)->SetException(exc.Get()); 2157 } 2158 } 2159 2160 void Thread::DumpStack(std::ostream& os, 2161 bool dump_native_stack, 2162 BacktraceMap* backtrace_map, 2163 bool force_dump_stack) const { 2164 // TODO: we call this code when dying but may not have suspended the thread ourself. The 2165 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit 2166 // the race with the thread_suspend_count_lock_). 2167 bool dump_for_abort = (gAborting > 0); 2168 bool safe_to_dump = (this == Thread::Current() || IsSuspended()); 2169 if (!kIsDebugBuild) { 2170 // We always want to dump the stack for an abort, however, there is no point dumping another 2171 // thread's stack in debug builds where we'll hit the not suspended check in the stack walk. 2172 safe_to_dump = (safe_to_dump || dump_for_abort); 2173 } 2174 if (safe_to_dump || force_dump_stack) { 2175 // If we're currently in native code, dump that stack before dumping the managed stack. 2176 if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) { 2177 DumpKernelStack(os, GetTid(), " kernel: ", false); 2178 ArtMethod* method = 2179 GetCurrentMethod(nullptr, 2180 /*check_suspended=*/ !force_dump_stack, 2181 /*abort_on_error=*/ !(dump_for_abort || force_dump_stack)); 2182 DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method); 2183 } 2184 DumpJavaStack(os, 2185 /*check_suspended=*/ !force_dump_stack, 2186 /*dump_locks=*/ !force_dump_stack); 2187 } else { 2188 os << "Not able to dump stack of thread that isn't suspended"; 2189 } 2190 } 2191 2192 void Thread::ThreadExitCallback(void* arg) { 2193 Thread* self = reinterpret_cast<Thread*>(arg); 2194 if (self->tls32_.thread_exit_check_count == 0) { 2195 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's " 2196 "going to use a pthread_key_create destructor?): " << *self; 2197 CHECK(is_started_); 2198 #ifdef ART_TARGET_ANDROID 2199 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self; 2200 #else 2201 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); 2202 #endif 2203 self->tls32_.thread_exit_check_count = 1; 2204 } else { 2205 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; 2206 } 2207 } 2208 2209 void Thread::Startup() { 2210 CHECK(!is_started_); 2211 is_started_ = true; 2212 { 2213 // MutexLock to keep annotalysis happy. 2214 // 2215 // Note we use null for the thread because Thread::Current can 2216 // return garbage since (is_started_ == true) and 2217 // Thread::pthread_key_self_ is not yet initialized. 2218 // This was seen on glibc. 2219 MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_); 2220 resume_cond_ = new ConditionVariable("Thread resumption condition variable", 2221 *Locks::thread_suspend_count_lock_); 2222 } 2223 2224 // Allocate a TLS slot. 2225 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), 2226 "self key"); 2227 2228 // Double-check the TLS slot allocation. 2229 if (pthread_getspecific(pthread_key_self_) != nullptr) { 2230 LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr"; 2231 } 2232 } 2233 2234 void Thread::FinishStartup() { 2235 Runtime* runtime = Runtime::Current(); 2236 CHECK(runtime->IsStarted()); 2237 2238 // Finish attaching the main thread. 2239 ScopedObjectAccess soa(Thread::Current()); 2240 soa.Self()->CreatePeer("main", false, runtime->GetMainThreadGroup()); 2241 soa.Self()->AssertNoPendingException(); 2242 2243 runtime->RunRootClinits(soa.Self()); 2244 2245 // The thread counts as started from now on. We need to add it to the ThreadGroup. For regular 2246 // threads, this is done in Thread.start() on the Java side. 2247 soa.Self()->NotifyThreadGroup(soa, runtime->GetMainThreadGroup()); 2248 soa.Self()->AssertNoPendingException(); 2249 } 2250 2251 void Thread::Shutdown() { 2252 CHECK(is_started_); 2253 is_started_ = false; 2254 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); 2255 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); 2256 if (resume_cond_ != nullptr) { 2257 delete resume_cond_; 2258 resume_cond_ = nullptr; 2259 } 2260 } 2261 2262 void Thread::NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group) { 2263 ScopedLocalRef<jobject> thread_jobject( 2264 soa.Env(), soa.Env()->AddLocalReference<jobject>(Thread::Current()->GetPeer())); 2265 ScopedLocalRef<jobject> thread_group_jobject_scoped( 2266 soa.Env(), nullptr); 2267 jobject thread_group_jobject = thread_group; 2268 if (thread_group == nullptr || kIsDebugBuild) { 2269 // There is always a group set. Retrieve it. 2270 thread_group_jobject_scoped.reset( 2271 soa.Env()->GetObjectField(thread_jobject.get(), 2272 WellKnownClasses::java_lang_Thread_group)); 2273 thread_group_jobject = thread_group_jobject_scoped.get(); 2274 if (kIsDebugBuild && thread_group != nullptr) { 2275 CHECK(soa.Env()->IsSameObject(thread_group, thread_group_jobject)); 2276 } 2277 } 2278 soa.Env()->CallNonvirtualVoidMethod(thread_group_jobject, 2279 WellKnownClasses::java_lang_ThreadGroup, 2280 WellKnownClasses::java_lang_ThreadGroup_add, 2281 thread_jobject.get()); 2282 } 2283 2284 Thread::Thread(bool daemon) 2285 : tls32_(daemon), 2286 wait_monitor_(nullptr), 2287 is_runtime_thread_(false) { 2288 wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock); 2289 wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_); 2290 tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>; 2291 tlsPtr_.name = new std::string(kThreadNameDuringStartup); 2292 2293 static_assert((sizeof(Thread) % 4) == 0U, 2294 "art::Thread has a size which is not a multiple of 4."); 2295 tls32_.state_and_flags.as_struct.flags = 0; 2296 tls32_.state_and_flags.as_struct.state = kNative; 2297 tls32_.interrupted.store(false, std::memory_order_relaxed); 2298 // Initialize with no permit; if the java Thread was unparked before being 2299 // started, it will unpark itself before calling into java code. 2300 tls32_.park_state_.store(kNoPermit, std::memory_order_relaxed); 2301 memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes)); 2302 std::fill(tlsPtr_.rosalloc_runs, 2303 tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread, 2304 gc::allocator::RosAlloc::GetDedicatedFullRun()); 2305 tlsPtr_.checkpoint_function = nullptr; 2306 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 2307 tlsPtr_.active_suspend_barriers[i] = nullptr; 2308 } 2309 tlsPtr_.flip_function = nullptr; 2310 tlsPtr_.thread_local_mark_stack = nullptr; 2311 tls32_.is_transitioning_to_runnable = false; 2312 tls32_.use_mterp = false; 2313 } 2314 2315 void Thread::NotifyInTheadList() { 2316 tls32_.use_mterp = interpreter::CanUseMterp(); 2317 } 2318 2319 bool Thread::CanLoadClasses() const { 2320 return !IsRuntimeThread() || !Runtime::Current()->IsJavaDebuggable(); 2321 } 2322 2323 bool Thread::IsStillStarting() const { 2324 // You might think you can check whether the state is kStarting, but for much of thread startup, 2325 // the thread is in kNative; it might also be in kVmWait. 2326 // You might think you can check whether the peer is null, but the peer is actually created and 2327 // assigned fairly early on, and needs to be. 2328 // It turns out that the last thing to change is the thread name; that's a good proxy for "has 2329 // this thread _ever_ entered kRunnable". 2330 return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) || 2331 (*tlsPtr_.name == kThreadNameDuringStartup); 2332 } 2333 2334 void Thread::AssertPendingException() const { 2335 CHECK(IsExceptionPending()) << "Pending exception expected."; 2336 } 2337 2338 void Thread::AssertPendingOOMException() const { 2339 AssertPendingException(); 2340 auto* e = GetException(); 2341 CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass()) 2342 << e->Dump(); 2343 } 2344 2345 void Thread::AssertNoPendingException() const { 2346 if (UNLIKELY(IsExceptionPending())) { 2347 ScopedObjectAccess soa(Thread::Current()); 2348 LOG(FATAL) << "No pending exception expected: " << GetException()->Dump(); 2349 } 2350 } 2351 2352 void Thread::AssertNoPendingExceptionForNewException(const char* msg) const { 2353 if (UNLIKELY(IsExceptionPending())) { 2354 ScopedObjectAccess soa(Thread::Current()); 2355 LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: " 2356 << GetException()->Dump(); 2357 } 2358 } 2359 2360 class MonitorExitVisitor : public SingleRootVisitor { 2361 public: 2362 explicit MonitorExitVisitor(Thread* self) : self_(self) { } 2363 2364 // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit. 2365 void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED) 2366 override NO_THREAD_SAFETY_ANALYSIS { 2367 if (self_->HoldsLock(entered_monitor)) { 2368 LOG(WARNING) << "Calling MonitorExit on object " 2369 << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")" 2370 << " left locked by native thread " 2371 << *Thread::Current() << " which is detaching"; 2372 entered_monitor->MonitorExit(self_); 2373 } 2374 } 2375 2376 private: 2377 Thread* const self_; 2378 }; 2379 2380 void Thread::Destroy() { 2381 Thread* self = this; 2382 DCHECK_EQ(self, Thread::Current()); 2383 2384 if (tlsPtr_.jni_env != nullptr) { 2385 { 2386 ScopedObjectAccess soa(self); 2387 MonitorExitVisitor visitor(self); 2388 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. 2389 tlsPtr_.jni_env->monitors_.VisitRoots(&visitor, RootInfo(kRootVMInternal)); 2390 } 2391 // Release locally held global references which releasing may require the mutator lock. 2392 if (tlsPtr_.jpeer != nullptr) { 2393 // If pthread_create fails we don't have a jni env here. 2394 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer); 2395 tlsPtr_.jpeer = nullptr; 2396 } 2397 if (tlsPtr_.class_loader_override != nullptr) { 2398 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override); 2399 tlsPtr_.class_loader_override = nullptr; 2400 } 2401 } 2402 2403 if (tlsPtr_.opeer != nullptr) { 2404 ScopedObjectAccess soa(self); 2405 // We may need to call user-supplied managed code, do this before final clean-up. 2406 HandleUncaughtExceptions(soa); 2407 RemoveFromThreadGroup(soa); 2408 Runtime* runtime = Runtime::Current(); 2409 if (runtime != nullptr) { 2410 runtime->GetRuntimeCallbacks()->ThreadDeath(self); 2411 } 2412 2413 // this.nativePeer = 0; 2414 if (Runtime::Current()->IsActiveTransaction()) { 2415 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2416 ->SetLong<true>(tlsPtr_.opeer, 0); 2417 } else { 2418 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2419 ->SetLong<false>(tlsPtr_.opeer, 0); 2420 } 2421 2422 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone 2423 // who is waiting. 2424 ObjPtr<mirror::Object> lock = 2425 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer); 2426 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 2427 if (lock != nullptr) { 2428 StackHandleScope<1> hs(self); 2429 Handle<mirror::Object> h_obj(hs.NewHandle(lock)); 2430 ObjectLock<mirror::Object> locker(self, h_obj); 2431 locker.NotifyAll(); 2432 } 2433 tlsPtr_.opeer = nullptr; 2434 } 2435 2436 { 2437 ScopedObjectAccess soa(self); 2438 Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this); 2439 if (kUseReadBarrier) { 2440 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); 2441 } 2442 } 2443 } 2444 2445 Thread::~Thread() { 2446 CHECK(tlsPtr_.class_loader_override == nullptr); 2447 CHECK(tlsPtr_.jpeer == nullptr); 2448 CHECK(tlsPtr_.opeer == nullptr); 2449 bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run? 2450 if (initialized) { 2451 delete tlsPtr_.jni_env; 2452 tlsPtr_.jni_env = nullptr; 2453 } 2454 CHECK_NE(GetState(), kRunnable); 2455 CHECK(!ReadFlag(kCheckpointRequest)); 2456 CHECK(!ReadFlag(kEmptyCheckpointRequest)); 2457 CHECK(tlsPtr_.checkpoint_function == nullptr); 2458 CHECK_EQ(checkpoint_overflow_.size(), 0u); 2459 CHECK(tlsPtr_.flip_function == nullptr); 2460 CHECK_EQ(tls32_.is_transitioning_to_runnable, false); 2461 2462 // Make sure we processed all deoptimization requests. 2463 CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization"; 2464 CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) << 2465 "Not all deoptimized frames have been consumed by the debugger."; 2466 2467 // We may be deleting a still born thread. 2468 SetStateUnsafe(kTerminated); 2469 2470 delete wait_cond_; 2471 delete wait_mutex_; 2472 2473 if (tlsPtr_.long_jump_context != nullptr) { 2474 delete tlsPtr_.long_jump_context; 2475 } 2476 2477 if (initialized) { 2478 CleanupCpu(); 2479 } 2480 2481 if (tlsPtr_.single_step_control != nullptr) { 2482 delete tlsPtr_.single_step_control; 2483 } 2484 delete tlsPtr_.instrumentation_stack; 2485 delete tlsPtr_.name; 2486 delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample; 2487 2488 Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this); 2489 2490 TearDownAlternateSignalStack(); 2491 } 2492 2493 void Thread::HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa) { 2494 if (!IsExceptionPending()) { 2495 return; 2496 } 2497 ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2498 ScopedThreadStateChange tsc(this, kNative); 2499 2500 // Get and clear the exception. 2501 ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred()); 2502 tlsPtr_.jni_env->ExceptionClear(); 2503 2504 // Call the Thread instance's dispatchUncaughtException(Throwable) 2505 tlsPtr_.jni_env->CallVoidMethod(peer.get(), 2506 WellKnownClasses::java_lang_Thread_dispatchUncaughtException, 2507 exception.get()); 2508 2509 // If the dispatchUncaughtException threw, clear that exception too. 2510 tlsPtr_.jni_env->ExceptionClear(); 2511 } 2512 2513 void Thread::RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa) { 2514 // this.group.removeThread(this); 2515 // group can be null if we're in the compiler or a test. 2516 ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 2517 ->GetObject(tlsPtr_.opeer); 2518 if (ogroup != nullptr) { 2519 ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup)); 2520 ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2521 ScopedThreadStateChange tsc(soa.Self(), kNative); 2522 tlsPtr_.jni_env->CallVoidMethod(group.get(), 2523 WellKnownClasses::java_lang_ThreadGroup_removeThread, 2524 peer.get()); 2525 } 2526 } 2527 2528 bool Thread::HandleScopeContains(jobject obj) const { 2529 StackReference<mirror::Object>* hs_entry = 2530 reinterpret_cast<StackReference<mirror::Object>*>(obj); 2531 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) { 2532 if (cur->Contains(hs_entry)) { 2533 return true; 2534 } 2535 } 2536 // JNI code invoked from portable code uses shadow frames rather than the handle scope. 2537 return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry); 2538 } 2539 2540 void Thread::HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id) { 2541 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( 2542 visitor, RootInfo(kRootNativeStack, thread_id)); 2543 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) { 2544 cur->VisitRoots(buffered_visitor); 2545 } 2546 } 2547 2548 ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const { 2549 if (obj == nullptr) { 2550 return nullptr; 2551 } 2552 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2553 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2554 ObjPtr<mirror::Object> result; 2555 bool expect_null = false; 2556 // The "kinds" below are sorted by the frequency we expect to encounter them. 2557 if (kind == kLocal) { 2558 IndirectReferenceTable& locals = tlsPtr_.jni_env->locals_; 2559 // Local references do not need a read barrier. 2560 result = locals.Get<kWithoutReadBarrier>(ref); 2561 } else if (kind == kHandleScopeOrInvalid) { 2562 // TODO: make stack indirect reference table lookup more efficient. 2563 // Check if this is a local reference in the handle scope. 2564 if (LIKELY(HandleScopeContains(obj))) { 2565 // Read from handle scope. 2566 result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr(); 2567 VerifyObject(result); 2568 } else { 2569 tlsPtr_.jni_env->vm_->JniAbortF(nullptr, "use of invalid jobject %p", obj); 2570 expect_null = true; 2571 result = nullptr; 2572 } 2573 } else if (kind == kGlobal) { 2574 result = tlsPtr_.jni_env->vm_->DecodeGlobal(ref); 2575 } else { 2576 DCHECK_EQ(kind, kWeakGlobal); 2577 result = tlsPtr_.jni_env->vm_->DecodeWeakGlobal(const_cast<Thread*>(this), ref); 2578 if (Runtime::Current()->IsClearedJniWeakGlobal(result)) { 2579 // This is a special case where it's okay to return null. 2580 expect_null = true; 2581 result = nullptr; 2582 } 2583 } 2584 2585 if (UNLIKELY(!expect_null && result == nullptr)) { 2586 tlsPtr_.jni_env->vm_->JniAbortF(nullptr, "use of deleted %s %p", 2587 ToStr<IndirectRefKind>(kind).c_str(), obj); 2588 } 2589 return result; 2590 } 2591 2592 bool Thread::IsJWeakCleared(jweak obj) const { 2593 CHECK(obj != nullptr); 2594 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2595 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2596 CHECK_EQ(kind, kWeakGlobal); 2597 return tlsPtr_.jni_env->vm_->IsWeakGlobalCleared(const_cast<Thread*>(this), ref); 2598 } 2599 2600 // Implements java.lang.Thread.interrupted. 2601 bool Thread::Interrupted() { 2602 DCHECK_EQ(Thread::Current(), this); 2603 // No other thread can concurrently reset the interrupted flag. 2604 bool interrupted = tls32_.interrupted.load(std::memory_order_seq_cst); 2605 if (interrupted) { 2606 tls32_.interrupted.store(false, std::memory_order_seq_cst); 2607 } 2608 return interrupted; 2609 } 2610 2611 // Implements java.lang.Thread.isInterrupted. 2612 bool Thread::IsInterrupted() { 2613 return tls32_.interrupted.load(std::memory_order_seq_cst); 2614 } 2615 2616 void Thread::Interrupt(Thread* self) { 2617 { 2618 MutexLock mu(self, *wait_mutex_); 2619 if (tls32_.interrupted.load(std::memory_order_seq_cst)) { 2620 return; 2621 } 2622 tls32_.interrupted.store(true, std::memory_order_seq_cst); 2623 NotifyLocked(self); 2624 } 2625 Unpark(); 2626 } 2627 2628 void Thread::Notify() { 2629 Thread* self = Thread::Current(); 2630 MutexLock mu(self, *wait_mutex_); 2631 NotifyLocked(self); 2632 } 2633 2634 void Thread::NotifyLocked(Thread* self) { 2635 if (wait_monitor_ != nullptr) { 2636 wait_cond_->Signal(self); 2637 } 2638 } 2639 2640 void Thread::SetClassLoaderOverride(jobject class_loader_override) { 2641 if (tlsPtr_.class_loader_override != nullptr) { 2642 GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override); 2643 } 2644 tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override); 2645 } 2646 2647 using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>; 2648 2649 // Counts the stack trace depth and also fetches the first max_saved_frames frames. 2650 class FetchStackTraceVisitor : public StackVisitor { 2651 public: 2652 explicit FetchStackTraceVisitor(Thread* thread, 2653 ArtMethodDexPcPair* saved_frames = nullptr, 2654 size_t max_saved_frames = 0) 2655 REQUIRES_SHARED(Locks::mutator_lock_) 2656 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2657 saved_frames_(saved_frames), 2658 max_saved_frames_(max_saved_frames) {} 2659 2660 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { 2661 // We want to skip frames up to and including the exception's constructor. 2662 // Note we also skip the frame if it doesn't have a method (namely the callee 2663 // save frame) 2664 ArtMethod* m = GetMethod(); 2665 if (skipping_ && !m->IsRuntimeMethod() && 2666 !GetClassRoot<mirror::Throwable>()->IsAssignableFrom(m->GetDeclaringClass())) { 2667 skipping_ = false; 2668 } 2669 if (!skipping_) { 2670 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save). 2671 if (depth_ < max_saved_frames_) { 2672 saved_frames_[depth_].first = m; 2673 saved_frames_[depth_].second = m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc(); 2674 } 2675 ++depth_; 2676 } 2677 } else { 2678 ++skip_depth_; 2679 } 2680 return true; 2681 } 2682 2683 uint32_t GetDepth() const { 2684 return depth_; 2685 } 2686 2687 uint32_t GetSkipDepth() const { 2688 return skip_depth_; 2689 } 2690 2691 private: 2692 uint32_t depth_ = 0; 2693 uint32_t skip_depth_ = 0; 2694 bool skipping_ = true; 2695 ArtMethodDexPcPair* saved_frames_; 2696 const size_t max_saved_frames_; 2697 2698 DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor); 2699 }; 2700 2701 template<bool kTransactionActive> 2702 class BuildInternalStackTraceVisitor : public StackVisitor { 2703 public: 2704 BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) 2705 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2706 self_(self), 2707 skip_depth_(skip_depth), 2708 pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} 2709 2710 bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) { 2711 // Allocate method trace as an object array where the first element is a pointer array that 2712 // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring 2713 // class of the ArtMethod pointers. 2714 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 2715 StackHandleScope<1> hs(self_); 2716 ObjPtr<mirror::Class> array_class = 2717 GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker); 2718 // The first element is the methods and dex pc array, the other elements are declaring classes 2719 // for the methods to ensure classes in the stack trace don't get unloaded. 2720 Handle<mirror::ObjectArray<mirror::Object>> trace( 2721 hs.NewHandle( 2722 mirror::ObjectArray<mirror::Object>::Alloc(hs.Self(), array_class, depth + 1))); 2723 if (trace == nullptr) { 2724 // Acquire uninterruptible_ in all paths. 2725 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2726 self_->AssertPendingOOMException(); 2727 return false; 2728 } 2729 ObjPtr<mirror::PointerArray> methods_and_pcs = 2730 class_linker->AllocPointerArray(self_, depth * 2); 2731 const char* last_no_suspend_cause = 2732 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2733 if (methods_and_pcs == nullptr) { 2734 self_->AssertPendingOOMException(); 2735 return false; 2736 } 2737 trace->Set(0, methods_and_pcs); 2738 trace_ = trace.Get(); 2739 // If We are called from native, use non-transactional mode. 2740 CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; 2741 return true; 2742 } 2743 2744 virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) { 2745 self_->EndAssertNoThreadSuspension(nullptr); 2746 } 2747 2748 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { 2749 if (trace_ == nullptr) { 2750 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. 2751 } 2752 if (skip_depth_ > 0) { 2753 skip_depth_--; 2754 return true; 2755 } 2756 ArtMethod* m = GetMethod(); 2757 if (m->IsRuntimeMethod()) { 2758 return true; // Ignore runtime frames (in particular callee save). 2759 } 2760 AddFrame(m, m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc()); 2761 return true; 2762 } 2763 2764 void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { 2765 ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs(); 2766 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, method, pointer_size_); 2767 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>( 2768 trace_methods_and_pcs->GetLength() / 2 + count_, 2769 dex_pc, 2770 pointer_size_); 2771 // Save the declaring class of the method to ensure that the declaring classes of the methods 2772 // do not get unloaded while the stack trace is live. 2773 trace_->Set(count_ + 1, method->GetDeclaringClass()); 2774 ++count_; 2775 } 2776 2777 ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) { 2778 return ObjPtr<mirror::PointerArray>::DownCast(trace_->Get(0)); 2779 } 2780 2781 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const { 2782 return trace_; 2783 } 2784 2785 private: 2786 Thread* const self_; 2787 // How many more frames to skip. 2788 int32_t skip_depth_; 2789 // Current position down stack trace. 2790 uint32_t count_ = 0; 2791 // An object array where the first element is a pointer array that contains the ArtMethod 2792 // pointers on the stack and dex PCs. The rest of the elements are the declaring 2793 // class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of 2794 // the i'th frame. 2795 mirror::ObjectArray<mirror::Object>* trace_ = nullptr; 2796 // For cross compilation. 2797 const PointerSize pointer_size_; 2798 2799 DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor); 2800 }; 2801 2802 template<bool kTransactionActive> 2803 jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { 2804 // Compute depth of stack, save frames if possible to avoid needing to recompute many. 2805 constexpr size_t kMaxSavedFrames = 256; 2806 std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]); 2807 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this), 2808 &saved_frames[0], 2809 kMaxSavedFrames); 2810 count_visitor.WalkStack(); 2811 const uint32_t depth = count_visitor.GetDepth(); 2812 const uint32_t skip_depth = count_visitor.GetSkipDepth(); 2813 2814 // Build internal stack trace. 2815 BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(), 2816 const_cast<Thread*>(this), 2817 skip_depth); 2818 if (!build_trace_visitor.Init(depth)) { 2819 return nullptr; // Allocation failed. 2820 } 2821 // If we saved all of the frames we don't even need to do the actual stack walk. This is faster 2822 // than doing the stack walk twice. 2823 if (depth < kMaxSavedFrames) { 2824 for (size_t i = 0; i < depth; ++i) { 2825 build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second); 2826 } 2827 } else { 2828 build_trace_visitor.WalkStack(); 2829 } 2830 2831 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); 2832 if (kIsDebugBuild) { 2833 ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs(); 2834 // Second half of trace_methods is dex PCs. 2835 for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) { 2836 auto* method = trace_methods->GetElementPtrSize<ArtMethod*>( 2837 i, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); 2838 CHECK(method != nullptr); 2839 } 2840 } 2841 return soa.AddLocalReference<jobject>(trace); 2842 } 2843 template jobject Thread::CreateInternalStackTrace<false>( 2844 const ScopedObjectAccessAlreadyRunnable& soa) const; 2845 template jobject Thread::CreateInternalStackTrace<true>( 2846 const ScopedObjectAccessAlreadyRunnable& soa) const; 2847 2848 bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const { 2849 // Only count the depth since we do not pass a stack frame array as an argument. 2850 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this)); 2851 count_visitor.WalkStack(); 2852 return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth()); 2853 } 2854 2855 static ObjPtr<mirror::StackTraceElement> CreateStackTraceElement( 2856 const ScopedObjectAccessAlreadyRunnable& soa, 2857 ArtMethod* method, 2858 uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { 2859 int32_t line_number; 2860 StackHandleScope<3> hs(soa.Self()); 2861 auto class_name_object(hs.NewHandle<mirror::String>(nullptr)); 2862 auto source_name_object(hs.NewHandle<mirror::String>(nullptr)); 2863 if (method->IsProxyMethod()) { 2864 line_number = -1; 2865 class_name_object.Assign(method->GetDeclaringClass()->GetName()); 2866 // source_name_object intentionally left null for proxy methods 2867 } else { 2868 line_number = method->GetLineNumFromDexPC(dex_pc); 2869 // Allocate element, potentially triggering GC 2870 // TODO: reuse class_name_object via Class::name_? 2871 const char* descriptor = method->GetDeclaringClassDescriptor(); 2872 CHECK(descriptor != nullptr); 2873 std::string class_name(PrettyDescriptor(descriptor)); 2874 class_name_object.Assign( 2875 mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str())); 2876 if (class_name_object == nullptr) { 2877 soa.Self()->AssertPendingOOMException(); 2878 return nullptr; 2879 } 2880 const char* source_file = method->GetDeclaringClassSourceFile(); 2881 if (line_number == -1) { 2882 // Make the line_number field of StackTraceElement hold the dex pc. 2883 // source_name_object is intentionally left null if we failed to map the dex pc to 2884 // a line number (most probably because there is no debug info). See b/30183883. 2885 line_number = dex_pc; 2886 } else { 2887 if (source_file != nullptr) { 2888 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file)); 2889 if (source_name_object == nullptr) { 2890 soa.Self()->AssertPendingOOMException(); 2891 return nullptr; 2892 } 2893 } 2894 } 2895 } 2896 const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName(); 2897 CHECK(method_name != nullptr); 2898 Handle<mirror::String> method_name_object( 2899 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name))); 2900 if (method_name_object == nullptr) { 2901 return nullptr; 2902 } 2903 return mirror::StackTraceElement::Alloc(soa.Self(), 2904 class_name_object, 2905 method_name_object, 2906 source_name_object, 2907 line_number); 2908 } 2909 2910 jobjectArray Thread::InternalStackTraceToStackTraceElementArray( 2911 const ScopedObjectAccessAlreadyRunnable& soa, 2912 jobject internal, 2913 jobjectArray output_array, 2914 int* stack_depth) { 2915 // Decode the internal stack trace into the depth, method trace and PC trace. 2916 // Subtract one for the methods and PC trace. 2917 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1; 2918 DCHECK_GE(depth, 0); 2919 2920 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); 2921 2922 jobjectArray result; 2923 2924 if (output_array != nullptr) { 2925 // Reuse the array we were given. 2926 result = output_array; 2927 // ...adjusting the number of frames we'll write to not exceed the array length. 2928 const int32_t traces_length = 2929 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->GetLength(); 2930 depth = std::min(depth, traces_length); 2931 } else { 2932 // Create java_trace array and place in local reference table 2933 ObjPtr<mirror::ObjectArray<mirror::StackTraceElement>> java_traces = 2934 class_linker->AllocStackTraceElementArray(soa.Self(), depth); 2935 if (java_traces == nullptr) { 2936 return nullptr; 2937 } 2938 result = soa.AddLocalReference<jobjectArray>(java_traces); 2939 } 2940 2941 if (stack_depth != nullptr) { 2942 *stack_depth = depth; 2943 } 2944 2945 for (int32_t i = 0; i < depth; ++i) { 2946 ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces = 2947 soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>(); 2948 // Methods and dex PC trace is element 0. 2949 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray()); 2950 const ObjPtr<mirror::PointerArray> method_trace = 2951 ObjPtr<mirror::PointerArray>::DownCast(decoded_traces->Get(0)); 2952 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) 2953 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize); 2954 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>( 2955 i + method_trace->GetLength() / 2, kRuntimePointerSize); 2956 const ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(soa, method, dex_pc); 2957 if (obj == nullptr) { 2958 return nullptr; 2959 } 2960 // We are called from native: use non-transactional mode. 2961 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(i, obj); 2962 } 2963 return result; 2964 } 2965 2966 jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { 2967 // This code allocates. Do not allow it to operate with a pending exception. 2968 if (IsExceptionPending()) { 2969 return nullptr; 2970 } 2971 2972 // If flip_function is not null, it means we have run a checkpoint 2973 // before the thread wakes up to execute the flip function and the 2974 // thread roots haven't been forwarded. So the following access to 2975 // the roots (locks or methods in the frames) would be bad. Run it 2976 // here. TODO: clean up. 2977 // Note: copied from DumpJavaStack. 2978 { 2979 Thread* this_thread = const_cast<Thread*>(this); 2980 Closure* flip_func = this_thread->GetFlipFunction(); 2981 if (flip_func != nullptr) { 2982 flip_func->Run(this_thread); 2983 } 2984 } 2985 2986 class CollectFramesAndLocksStackVisitor : public MonitorObjectsStackVisitor { 2987 public: 2988 CollectFramesAndLocksStackVisitor(const ScopedObjectAccessAlreadyRunnable& soaa_in, 2989 Thread* self, 2990 Context* context) 2991 : MonitorObjectsStackVisitor(self, context), 2992 wait_jobject_(soaa_in.Env(), nullptr), 2993 block_jobject_(soaa_in.Env(), nullptr), 2994 soaa_(soaa_in) {} 2995 2996 protected: 2997 VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED) 2998 override 2999 REQUIRES_SHARED(Locks::mutator_lock_) { 3000 ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement( 3001 soaa_, m, GetDexPc(/* abort on error */ false)); 3002 if (obj == nullptr) { 3003 return VisitMethodResult::kEndStackWalk; 3004 } 3005 stack_trace_elements_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj.Ptr())); 3006 return VisitMethodResult::kContinueMethod; 3007 } 3008 3009 VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override { 3010 lock_objects_.push_back({}); 3011 lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_); 3012 3013 DCHECK_EQ(lock_objects_.size(), stack_trace_elements_.size()); 3014 3015 return VisitMethodResult::kContinueMethod; 3016 } 3017 3018 void VisitWaitingObject(ObjPtr<mirror::Object> obj, ThreadState state ATTRIBUTE_UNUSED) 3019 override 3020 REQUIRES_SHARED(Locks::mutator_lock_) { 3021 wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj)); 3022 } 3023 void VisitSleepingObject(ObjPtr<mirror::Object> obj) 3024 override 3025 REQUIRES_SHARED(Locks::mutator_lock_) { 3026 wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj)); 3027 } 3028 void VisitBlockedOnObject(ObjPtr<mirror::Object> obj, 3029 ThreadState state ATTRIBUTE_UNUSED, 3030 uint32_t owner_tid ATTRIBUTE_UNUSED) 3031 override 3032 REQUIRES_SHARED(Locks::mutator_lock_) { 3033 block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj)); 3034 } 3035 void VisitLockedObject(ObjPtr<mirror::Object> obj) 3036 override 3037 REQUIRES_SHARED(Locks::mutator_lock_) { 3038 frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj)); 3039 } 3040 3041 public: 3042 std::vector<ScopedLocalRef<jobject>> stack_trace_elements_; 3043 ScopedLocalRef<jobject> wait_jobject_; 3044 ScopedLocalRef<jobject> block_jobject_; 3045 std::vector<std::vector<ScopedLocalRef<jobject>>> lock_objects_; 3046 3047 private: 3048 const ScopedObjectAccessAlreadyRunnable& soaa_; 3049 3050 std::vector<ScopedLocalRef<jobject>> frame_lock_objects_; 3051 }; 3052 3053 std::unique_ptr<Context> context(Context::Create()); 3054 CollectFramesAndLocksStackVisitor dumper(soa, const_cast<Thread*>(this), context.get()); 3055 dumper.WalkStack(); 3056 3057 // There should not be a pending exception. Otherwise, return with it pending. 3058 if (IsExceptionPending()) { 3059 return nullptr; 3060 } 3061 3062 // Now go and create Java arrays. 3063 3064 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 3065 3066 StackHandleScope<6> hs(soa.Self()); 3067 Handle<mirror::Class> h_aste_array_class = hs.NewHandle(class_linker->FindSystemClass( 3068 soa.Self(), 3069 "[Ldalvik/system/AnnotatedStackTraceElement;")); 3070 if (h_aste_array_class == nullptr) { 3071 return nullptr; 3072 } 3073 Handle<mirror::Class> h_aste_class = hs.NewHandle(h_aste_array_class->GetComponentType()); 3074 3075 Handle<mirror::Class> h_o_array_class = 3076 hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker)); 3077 DCHECK(h_o_array_class != nullptr); // Class roots must be already initialized. 3078 3079 3080 // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 . 3081 class_linker->EnsureInitialized(soa.Self(), 3082 h_aste_class, 3083 /* can_init_fields= */ true, 3084 /* can_init_parents= */ true); 3085 if (soa.Self()->IsExceptionPending()) { 3086 // This should not fail in a healthy runtime. 3087 return nullptr; 3088 } 3089 3090 ArtField* stack_trace_element_field = h_aste_class->FindField( 3091 soa.Self(), h_aste_class.Get(), "stackTraceElement", "Ljava/lang/StackTraceElement;"); 3092 DCHECK(stack_trace_element_field != nullptr); 3093 ArtField* held_locks_field = h_aste_class->FindField( 3094 soa.Self(), h_aste_class.Get(), "heldLocks", "[Ljava/lang/Object;"); 3095 DCHECK(held_locks_field != nullptr); 3096 ArtField* blocked_on_field = h_aste_class->FindField( 3097 soa.Self(), h_aste_class.Get(), "blockedOn", "Ljava/lang/Object;"); 3098 DCHECK(blocked_on_field != nullptr); 3099 3100 size_t length = dumper.stack_trace_elements_.size(); 3101 ObjPtr<mirror::ObjectArray<mirror::Object>> array = 3102 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), h_aste_array_class.Get(), length); 3103 if (array == nullptr) { 3104 soa.Self()->AssertPendingOOMException(); 3105 return nullptr; 3106 } 3107 3108 ScopedLocalRef<jobjectArray> result(soa.Env(), soa.Env()->AddLocalReference<jobjectArray>(array)); 3109 3110 MutableHandle<mirror::Object> handle(hs.NewHandle<mirror::Object>(nullptr)); 3111 MutableHandle<mirror::ObjectArray<mirror::Object>> handle2( 3112 hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr)); 3113 for (size_t i = 0; i != length; ++i) { 3114 handle.Assign(h_aste_class->AllocObject(soa.Self())); 3115 if (handle == nullptr) { 3116 soa.Self()->AssertPendingOOMException(); 3117 return nullptr; 3118 } 3119 3120 // Set stack trace element. 3121 stack_trace_element_field->SetObject<false>( 3122 handle.Get(), soa.Decode<mirror::Object>(dumper.stack_trace_elements_[i].get())); 3123 3124 // Create locked-on array. 3125 if (!dumper.lock_objects_[i].empty()) { 3126 handle2.Assign(mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), 3127 h_o_array_class.Get(), 3128 dumper.lock_objects_[i].size())); 3129 if (handle2 == nullptr) { 3130 soa.Self()->AssertPendingOOMException(); 3131 return nullptr; 3132 } 3133 int32_t j = 0; 3134 for (auto& scoped_local : dumper.lock_objects_[i]) { 3135 if (scoped_local == nullptr) { 3136 continue; 3137 } 3138 handle2->Set(j, soa.Decode<mirror::Object>(scoped_local.get())); 3139 DCHECK(!soa.Self()->IsExceptionPending()); 3140 j++; 3141 } 3142 held_locks_field->SetObject<false>(handle.Get(), handle2.Get()); 3143 } 3144 3145 // Set blocked-on object. 3146 if (i == 0) { 3147 if (dumper.block_jobject_ != nullptr) { 3148 blocked_on_field->SetObject<false>( 3149 handle.Get(), soa.Decode<mirror::Object>(dumper.block_jobject_.get())); 3150 } 3151 } 3152 3153 ScopedLocalRef<jobject> elem(soa.Env(), soa.AddLocalReference<jobject>(handle.Get())); 3154 soa.Env()->SetObjectArrayElement(result.get(), i, elem.get()); 3155 DCHECK(!soa.Self()->IsExceptionPending()); 3156 } 3157 3158 return result.release(); 3159 } 3160 3161 void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) { 3162 va_list args; 3163 va_start(args, fmt); 3164 ThrowNewExceptionV(exception_class_descriptor, fmt, args); 3165 va_end(args); 3166 } 3167 3168 void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, 3169 const char* fmt, va_list ap) { 3170 std::string msg; 3171 StringAppendV(&msg, fmt, ap); 3172 ThrowNewException(exception_class_descriptor, msg.c_str()); 3173 } 3174 3175 void Thread::ThrowNewException(const char* exception_class_descriptor, 3176 const char* msg) { 3177 // Callers should either clear or call ThrowNewWrappedException. 3178 AssertNoPendingExceptionForNewException(msg); 3179 ThrowNewWrappedException(exception_class_descriptor, msg); 3180 } 3181 3182 static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self) 3183 REQUIRES_SHARED(Locks::mutator_lock_) { 3184 ArtMethod* method = self->GetCurrentMethod(nullptr); 3185 return method != nullptr 3186 ? method->GetDeclaringClass()->GetClassLoader() 3187 : nullptr; 3188 } 3189 3190 void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, 3191 const char* msg) { 3192 DCHECK_EQ(this, Thread::Current()); 3193 ScopedObjectAccessUnchecked soa(this); 3194 StackHandleScope<3> hs(soa.Self()); 3195 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self()))); 3196 ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException())); 3197 ClearException(); 3198 Runtime* runtime = Runtime::Current(); 3199 auto* cl = runtime->GetClassLinker(); 3200 Handle<mirror::Class> exception_class( 3201 hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader))); 3202 if (UNLIKELY(exception_class == nullptr)) { 3203 CHECK(IsExceptionPending()); 3204 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); 3205 return; 3206 } 3207 3208 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true, 3209 true))) { 3210 DCHECK(IsExceptionPending()); 3211 return; 3212 } 3213 DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass()); 3214 Handle<mirror::Throwable> exception( 3215 hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this)))); 3216 3217 // If we couldn't allocate the exception, throw the pre-allocated out of memory exception. 3218 if (exception == nullptr) { 3219 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one. 3220 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingException()); 3221 return; 3222 } 3223 3224 // Choose an appropriate constructor and set up the arguments. 3225 const char* signature; 3226 ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr); 3227 if (msg != nullptr) { 3228 // Ensure we remember this and the method over the String allocation. 3229 msg_string.reset( 3230 soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg))); 3231 if (UNLIKELY(msg_string.get() == nullptr)) { 3232 CHECK(IsExceptionPending()); // OOME. 3233 return; 3234 } 3235 if (cause.get() == nullptr) { 3236 signature = "(Ljava/lang/String;)V"; 3237 } else { 3238 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; 3239 } 3240 } else { 3241 if (cause.get() == nullptr) { 3242 signature = "()V"; 3243 } else { 3244 signature = "(Ljava/lang/Throwable;)V"; 3245 } 3246 } 3247 ArtMethod* exception_init_method = 3248 exception_class->FindConstructor(signature, cl->GetImagePointerSize()); 3249 3250 CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in " 3251 << PrettyDescriptor(exception_class_descriptor); 3252 3253 if (UNLIKELY(!runtime->IsStarted())) { 3254 // Something is trying to throw an exception without a started runtime, which is the common 3255 // case in the compiler. We won't be able to invoke the constructor of the exception, so set 3256 // the exception fields directly. 3257 if (msg != nullptr) { 3258 exception->SetDetailMessage(DecodeJObject(msg_string.get())->AsString()); 3259 } 3260 if (cause.get() != nullptr) { 3261 exception->SetCause(DecodeJObject(cause.get())->AsThrowable()); 3262 } 3263 ScopedLocalRef<jobject> trace(GetJniEnv(), 3264 Runtime::Current()->IsActiveTransaction() 3265 ? CreateInternalStackTrace<true>(soa) 3266 : CreateInternalStackTrace<false>(soa)); 3267 if (trace.get() != nullptr) { 3268 exception->SetStackState(DecodeJObject(trace.get()).Ptr()); 3269 } 3270 SetException(exception.Get()); 3271 } else { 3272 jvalue jv_args[2]; 3273 size_t i = 0; 3274 3275 if (msg != nullptr) { 3276 jv_args[i].l = msg_string.get(); 3277 ++i; 3278 } 3279 if (cause.get() != nullptr) { 3280 jv_args[i].l = cause.get(); 3281 ++i; 3282 } 3283 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get())); 3284 InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args); 3285 if (LIKELY(!IsExceptionPending())) { 3286 SetException(exception.Get()); 3287 } 3288 } 3289 } 3290 3291 void Thread::ThrowOutOfMemoryError(const char* msg) { 3292 LOG(WARNING) << "Throwing OutOfMemoryError " 3293 << '"' << msg << '"' 3294 << " (VmSize " << GetProcessStatus("VmSize") 3295 << (tls32_.throwing_OutOfMemoryError ? ", recursive case)" : ")"); 3296 if (!tls32_.throwing_OutOfMemoryError) { 3297 tls32_.throwing_OutOfMemoryError = true; 3298 ThrowNewException("Ljava/lang/OutOfMemoryError;", msg); 3299 tls32_.throwing_OutOfMemoryError = false; 3300 } else { 3301 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one. 3302 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME()); 3303 } 3304 } 3305 3306 Thread* Thread::CurrentFromGdb() { 3307 return Thread::Current(); 3308 } 3309 3310 void Thread::DumpFromGdb() const { 3311 std::ostringstream ss; 3312 Dump(ss); 3313 std::string str(ss.str()); 3314 // log to stderr for debugging command line processes 3315 std::cerr << str; 3316 #ifdef ART_TARGET_ANDROID 3317 // log to logcat for debugging frameworks processes 3318 LOG(INFO) << str; 3319 #endif 3320 } 3321 3322 // Explicitly instantiate 32 and 64bit thread offset dumping support. 3323 template 3324 void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset); 3325 template 3326 void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset); 3327 3328 template<PointerSize ptr_size> 3329 void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { 3330 #define DO_THREAD_OFFSET(x, y) \ 3331 if (offset == (x).Uint32Value()) { \ 3332 os << (y); \ 3333 return; \ 3334 } 3335 DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags") 3336 DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table") 3337 DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception") 3338 DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer"); 3339 DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env") 3340 DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self") 3341 DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end") 3342 DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id") 3343 DO_THREAD_OFFSET(IsGcMarkingOffset<ptr_size>(), "is_gc_marking") 3344 DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method") 3345 DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame") 3346 DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope") 3347 DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger") 3348 #undef DO_THREAD_OFFSET 3349 3350 #define JNI_ENTRY_POINT_INFO(x) \ 3351 if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 3352 os << #x; \ 3353 return; \ 3354 } 3355 JNI_ENTRY_POINT_INFO(pDlsymLookup) 3356 #undef JNI_ENTRY_POINT_INFO 3357 3358 #define QUICK_ENTRY_POINT_INFO(x) \ 3359 if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 3360 os << #x; \ 3361 return; \ 3362 } 3363 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved) 3364 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8) 3365 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16) 3366 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32) 3367 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64) 3368 QUICK_ENTRY_POINT_INFO(pAllocObjectResolved) 3369 QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized) 3370 QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks) 3371 QUICK_ENTRY_POINT_INFO(pAllocStringObject) 3372 QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes) 3373 QUICK_ENTRY_POINT_INFO(pAllocStringFromChars) 3374 QUICK_ENTRY_POINT_INFO(pAllocStringFromString) 3375 QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial) 3376 QUICK_ENTRY_POINT_INFO(pCheckInstanceOf) 3377 QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage) 3378 QUICK_ENTRY_POINT_INFO(pResolveTypeAndVerifyAccess) 3379 QUICK_ENTRY_POINT_INFO(pResolveType) 3380 QUICK_ENTRY_POINT_INFO(pResolveString) 3381 QUICK_ENTRY_POINT_INFO(pSet8Instance) 3382 QUICK_ENTRY_POINT_INFO(pSet8Static) 3383 QUICK_ENTRY_POINT_INFO(pSet16Instance) 3384 QUICK_ENTRY_POINT_INFO(pSet16Static) 3385 QUICK_ENTRY_POINT_INFO(pSet32Instance) 3386 QUICK_ENTRY_POINT_INFO(pSet32Static) 3387 QUICK_ENTRY_POINT_INFO(pSet64Instance) 3388 QUICK_ENTRY_POINT_INFO(pSet64Static) 3389 QUICK_ENTRY_POINT_INFO(pSetObjInstance) 3390 QUICK_ENTRY_POINT_INFO(pSetObjStatic) 3391 QUICK_ENTRY_POINT_INFO(pGetByteInstance) 3392 QUICK_ENTRY_POINT_INFO(pGetBooleanInstance) 3393 QUICK_ENTRY_POINT_INFO(pGetByteStatic) 3394 QUICK_ENTRY_POINT_INFO(pGetBooleanStatic) 3395 QUICK_ENTRY_POINT_INFO(pGetShortInstance) 3396 QUICK_ENTRY_POINT_INFO(pGetCharInstance) 3397 QUICK_ENTRY_POINT_INFO(pGetShortStatic) 3398 QUICK_ENTRY_POINT_INFO(pGetCharStatic) 3399 QUICK_ENTRY_POINT_INFO(pGet32Instance) 3400 QUICK_ENTRY_POINT_INFO(pGet32Static) 3401 QUICK_ENTRY_POINT_INFO(pGet64Instance) 3402 QUICK_ENTRY_POINT_INFO(pGet64Static) 3403 QUICK_ENTRY_POINT_INFO(pGetObjInstance) 3404 QUICK_ENTRY_POINT_INFO(pGetObjStatic) 3405 QUICK_ENTRY_POINT_INFO(pAputObject) 3406 QUICK_ENTRY_POINT_INFO(pJniMethodStart) 3407 QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized) 3408 QUICK_ENTRY_POINT_INFO(pJniMethodEnd) 3409 QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized) 3410 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference) 3411 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized) 3412 QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline) 3413 QUICK_ENTRY_POINT_INFO(pLockObject) 3414 QUICK_ENTRY_POINT_INFO(pUnlockObject) 3415 QUICK_ENTRY_POINT_INFO(pCmpgDouble) 3416 QUICK_ENTRY_POINT_INFO(pCmpgFloat) 3417 QUICK_ENTRY_POINT_INFO(pCmplDouble) 3418 QUICK_ENTRY_POINT_INFO(pCmplFloat) 3419 QUICK_ENTRY_POINT_INFO(pCos) 3420 QUICK_ENTRY_POINT_INFO(pSin) 3421 QUICK_ENTRY_POINT_INFO(pAcos) 3422 QUICK_ENTRY_POINT_INFO(pAsin) 3423 QUICK_ENTRY_POINT_INFO(pAtan) 3424 QUICK_ENTRY_POINT_INFO(pAtan2) 3425 QUICK_ENTRY_POINT_INFO(pCbrt) 3426 QUICK_ENTRY_POINT_INFO(pCosh) 3427 QUICK_ENTRY_POINT_INFO(pExp) 3428 QUICK_ENTRY_POINT_INFO(pExpm1) 3429 QUICK_ENTRY_POINT_INFO(pHypot) 3430 QUICK_ENTRY_POINT_INFO(pLog) 3431 QUICK_ENTRY_POINT_INFO(pLog10) 3432 QUICK_ENTRY_POINT_INFO(pNextAfter) 3433 QUICK_ENTRY_POINT_INFO(pSinh) 3434 QUICK_ENTRY_POINT_INFO(pTan) 3435 QUICK_ENTRY_POINT_INFO(pTanh) 3436 QUICK_ENTRY_POINT_INFO(pFmod) 3437 QUICK_ENTRY_POINT_INFO(pL2d) 3438 QUICK_ENTRY_POINT_INFO(pFmodf) 3439 QUICK_ENTRY_POINT_INFO(pL2f) 3440 QUICK_ENTRY_POINT_INFO(pD2iz) 3441 QUICK_ENTRY_POINT_INFO(pF2iz) 3442 QUICK_ENTRY_POINT_INFO(pIdivmod) 3443 QUICK_ENTRY_POINT_INFO(pD2l) 3444 QUICK_ENTRY_POINT_INFO(pF2l) 3445 QUICK_ENTRY_POINT_INFO(pLdiv) 3446 QUICK_ENTRY_POINT_INFO(pLmod) 3447 QUICK_ENTRY_POINT_INFO(pLmul) 3448 QUICK_ENTRY_POINT_INFO(pShlLong) 3449 QUICK_ENTRY_POINT_INFO(pShrLong) 3450 QUICK_ENTRY_POINT_INFO(pUshrLong) 3451 QUICK_ENTRY_POINT_INFO(pIndexOf) 3452 QUICK_ENTRY_POINT_INFO(pStringCompareTo) 3453 QUICK_ENTRY_POINT_INFO(pMemcpy) 3454 QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline) 3455 QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline) 3456 QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge) 3457 QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck) 3458 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck) 3459 QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck) 3460 QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck) 3461 QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck) 3462 QUICK_ENTRY_POINT_INFO(pInvokePolymorphic) 3463 QUICK_ENTRY_POINT_INFO(pTestSuspend) 3464 QUICK_ENTRY_POINT_INFO(pDeliverException) 3465 QUICK_ENTRY_POINT_INFO(pThrowArrayBounds) 3466 QUICK_ENTRY_POINT_INFO(pThrowDivZero) 3467 QUICK_ENTRY_POINT_INFO(pThrowNullPointer) 3468 QUICK_ENTRY_POINT_INFO(pThrowStackOverflow) 3469 QUICK_ENTRY_POINT_INFO(pDeoptimize) 3470 QUICK_ENTRY_POINT_INFO(pA64Load) 3471 QUICK_ENTRY_POINT_INFO(pA64Store) 3472 QUICK_ENTRY_POINT_INFO(pNewEmptyString) 3473 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B) 3474 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI) 3475 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII) 3476 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII) 3477 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString) 3478 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString) 3479 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset) 3480 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset) 3481 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C) 3482 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII) 3483 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC) 3484 QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints) 3485 QUICK_ENTRY_POINT_INFO(pNewStringFromString) 3486 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer) 3487 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder) 3488 QUICK_ENTRY_POINT_INFO(pReadBarrierJni) 3489 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00) 3490 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01) 3491 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg02) 3492 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg03) 3493 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg04) 3494 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg05) 3495 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg06) 3496 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg07) 3497 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg08) 3498 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg09) 3499 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg10) 3500 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg11) 3501 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg12) 3502 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg13) 3503 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg14) 3504 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg15) 3505 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg16) 3506 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg17) 3507 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg18) 3508 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg19) 3509 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg20) 3510 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg21) 3511 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg22) 3512 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg23) 3513 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg24) 3514 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg25) 3515 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg26) 3516 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27) 3517 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28) 3518 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29) 3519 QUICK_ENTRY_POINT_INFO(pReadBarrierSlow) 3520 QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow) 3521 3522 QUICK_ENTRY_POINT_INFO(pJniMethodFastStart) 3523 QUICK_ENTRY_POINT_INFO(pJniMethodFastEnd) 3524 #undef QUICK_ENTRY_POINT_INFO 3525 3526 os << offset; 3527 } 3528 3529 void Thread::QuickDeliverException() { 3530 // Get exception from thread. 3531 ObjPtr<mirror::Throwable> exception = GetException(); 3532 CHECK(exception != nullptr); 3533 if (exception == GetDeoptimizationException()) { 3534 artDeoptimize(this); 3535 UNREACHABLE(); 3536 } 3537 3538 ReadBarrier::MaybeAssertToSpaceInvariant(exception.Ptr()); 3539 3540 // This is a real exception: let the instrumentation know about it. 3541 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 3542 if (instrumentation->HasExceptionThrownListeners() && 3543 IsExceptionThrownByCurrentMethod(exception)) { 3544 // Instrumentation may cause GC so keep the exception object safe. 3545 StackHandleScope<1> hs(this); 3546 HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception)); 3547 instrumentation->ExceptionThrownEvent(this, exception.Ptr()); 3548 } 3549 // Does instrumentation need to deoptimize the stack or otherwise go to interpreter for something? 3550 // Note: we do this *after* reporting the exception to instrumentation in case it now requires 3551 // deoptimization. It may happen if a debugger is attached and requests new events (single-step, 3552 // breakpoint, ...) when the exception is reported. 3553 // 3554 // Note we need to check for both force_frame_pop and force_retry_instruction. The first is 3555 // expected to happen fairly regularly but the second can only happen if we are using 3556 // instrumentation trampolines (for example with DDMS tracing). That forces us to do deopt later 3557 // and see every frame being popped. We don't need to handle it any differently. 3558 ShadowFrame* cf; 3559 bool force_deopt; 3560 { 3561 NthCallerVisitor visitor(this, 0, false); 3562 visitor.WalkStack(); 3563 cf = visitor.GetCurrentShadowFrame(); 3564 if (cf == nullptr) { 3565 cf = FindDebuggerShadowFrame(visitor.GetFrameId()); 3566 } 3567 bool force_frame_pop = cf != nullptr && cf->GetForcePopFrame(); 3568 bool force_retry_instr = cf != nullptr && cf->GetForceRetryInstruction(); 3569 if (kIsDebugBuild && force_frame_pop) { 3570 NthCallerVisitor penultimate_visitor(this, 1, false); 3571 penultimate_visitor.WalkStack(); 3572 ShadowFrame* penultimate_frame = penultimate_visitor.GetCurrentShadowFrame(); 3573 if (penultimate_frame == nullptr) { 3574 penultimate_frame = FindDebuggerShadowFrame(penultimate_visitor.GetFrameId()); 3575 } 3576 DCHECK(penultimate_frame != nullptr && 3577 penultimate_frame->GetForceRetryInstruction()) 3578 << "Force pop frame without retry instruction found. penultimate frame is null: " 3579 << (penultimate_frame == nullptr ? "true" : "false"); 3580 } 3581 force_deopt = force_frame_pop || force_retry_instr; 3582 } 3583 if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt || IsForceInterpreter()) { 3584 NthCallerVisitor visitor(this, 0, false); 3585 visitor.WalkStack(); 3586 if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) { 3587 // method_type shouldn't matter due to exception handling. 3588 const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault; 3589 // Save the exception into the deoptimization context so it can be restored 3590 // before entering the interpreter. 3591 if (force_deopt) { 3592 VLOG(deopt) << "Deopting " << cf->GetMethod()->PrettyMethod() << " for frame-pop"; 3593 DCHECK(Runtime::Current()->AreNonStandardExitsEnabled()); 3594 // Get rid of the exception since we are doing a framepop instead. 3595 LOG(WARNING) << "Suppressing pending exception for retry-instruction/frame-pop: " 3596 << exception->Dump(); 3597 ClearException(); 3598 } 3599 PushDeoptimizationContext( 3600 JValue(), 3601 /* is_reference= */ false, 3602 (force_deopt ? nullptr : exception), 3603 /* from_code= */ false, 3604 method_type); 3605 artDeoptimize(this); 3606 UNREACHABLE(); 3607 } else { 3608 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 3609 << visitor.caller->PrettyMethod(); 3610 } 3611 } 3612 3613 // Don't leave exception visible while we try to find the handler, which may cause class 3614 // resolution. 3615 ClearException(); 3616 QuickExceptionHandler exception_handler(this, false); 3617 exception_handler.FindCatch(exception); 3618 if (exception_handler.GetClearException()) { 3619 // Exception was cleared as part of delivery. 3620 DCHECK(!IsExceptionPending()); 3621 } else { 3622 // Exception was put back with a throw location. 3623 DCHECK(IsExceptionPending()); 3624 // Check the to-space invariant on the re-installed exception (if applicable). 3625 ReadBarrier::MaybeAssertToSpaceInvariant(GetException()); 3626 } 3627 exception_handler.DoLongJump(); 3628 } 3629 3630 Context* Thread::GetLongJumpContext() { 3631 Context* result = tlsPtr_.long_jump_context; 3632 if (result == nullptr) { 3633 result = Context::Create(); 3634 } else { 3635 tlsPtr_.long_jump_context = nullptr; // Avoid context being shared. 3636 result->Reset(); 3637 } 3638 return result; 3639 } 3640 3641 ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out, 3642 bool check_suspended, 3643 bool abort_on_error) const { 3644 // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is 3645 // so we don't abort in a special situation (thinlocked monitor) when dumping the Java 3646 // stack. 3647 ArtMethod* method = nullptr; 3648 uint32_t dex_pc = dex::kDexNoIndex; 3649 StackVisitor::WalkStack( 3650 [&](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) { 3651 ArtMethod* m = visitor->GetMethod(); 3652 if (m->IsRuntimeMethod()) { 3653 // Continue if this is a runtime method. 3654 return true; 3655 } 3656 method = m; 3657 dex_pc = visitor->GetDexPc(abort_on_error); 3658 return false; 3659 }, 3660 const_cast<Thread*>(this), 3661 /* context= */ nullptr, 3662 StackVisitor::StackWalkKind::kIncludeInlinedFrames, 3663 check_suspended); 3664 3665 if (dex_pc_out != nullptr) { 3666 *dex_pc_out = dex_pc; 3667 } 3668 return method; 3669 } 3670 3671 bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const { 3672 return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId(); 3673 } 3674 3675 extern std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMethod** sp) 3676 REQUIRES_SHARED(Locks::mutator_lock_); 3677 3678 // RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor). 3679 template <typename RootVisitor, bool kPrecise = false> 3680 class ReferenceMapVisitor : public StackVisitor { 3681 public: 3682 ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor) 3683 REQUIRES_SHARED(Locks::mutator_lock_) 3684 // We are visiting the references in compiled frames, so we do not need 3685 // to know the inlined frames. 3686 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames), 3687 visitor_(visitor) {} 3688 3689 bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) { 3690 if (false) { 3691 LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod()) 3692 << StringPrintf("@ PC:%04x", GetDexPc()); 3693 } 3694 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 3695 if (shadow_frame != nullptr) { 3696 VisitShadowFrame(shadow_frame); 3697 } else { 3698 VisitQuickFrame(); 3699 } 3700 return true; 3701 } 3702 3703 void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) { 3704 ArtMethod* m = shadow_frame->GetMethod(); 3705 VisitDeclaringClass(m); 3706 DCHECK(m != nullptr); 3707 size_t num_regs = shadow_frame->NumberOfVRegs(); 3708 DCHECK(m->IsNative() || shadow_frame->HasReferenceArray()); 3709 // handle scope for JNI or References for interpreter. 3710 for (size_t reg = 0; reg < num_regs; ++reg) { 3711 mirror::Object* ref = shadow_frame->GetVRegReference(reg); 3712 if (ref != nullptr) { 3713 mirror::Object* new_ref = ref; 3714 visitor_(&new_ref, reg, this); 3715 if (new_ref != ref) { 3716 shadow_frame->SetVRegReference(reg, new_ref); 3717 } 3718 } 3719 } 3720 // Mark lock count map required for structured locking checks. 3721 shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this); 3722 } 3723 3724 private: 3725 // Visiting the declaring class is necessary so that we don't unload the class of a method that 3726 // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since 3727 // the threads do not all hold the heap bitmap lock for parallel GC. 3728 void VisitDeclaringClass(ArtMethod* method) 3729 REQUIRES_SHARED(Locks::mutator_lock_) 3730 NO_THREAD_SAFETY_ANALYSIS { 3731 ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>(); 3732 // klass can be null for runtime methods. 3733 if (klass != nullptr) { 3734 if (kVerifyImageObjectsMarked) { 3735 gc::Heap* const heap = Runtime::Current()->GetHeap(); 3736 gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass, 3737 /*fail_ok=*/true); 3738 if (space != nullptr && space->IsImageSpace()) { 3739 bool failed = false; 3740 if (!space->GetLiveBitmap()->Test(klass.Ptr())) { 3741 failed = true; 3742 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space; 3743 } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) { 3744 failed = true; 3745 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space; 3746 } 3747 if (failed) { 3748 GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3749 space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3750 LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method 3751 << " klass@" << klass.Ptr(); 3752 // Pretty info last in case it crashes. 3753 LOG(FATAL) << "Method " << method->PrettyMethod() << " klass " 3754 << klass->PrettyClass(); 3755 } 3756 } 3757 } 3758 mirror::Object* new_ref = klass.Ptr(); 3759 visitor_(&new_ref, /* vreg= */ -1, this); 3760 if (new_ref != klass) { 3761 method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass()); 3762 } 3763 } 3764 } 3765 3766 template <typename T> 3767 ALWAYS_INLINE 3768 inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) { 3769 ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); 3770 DCHECK(cur_quick_frame != nullptr); 3771 ArtMethod* m = *cur_quick_frame; 3772 VisitDeclaringClass(m); 3773 3774 // Process register map (which native and runtime methods don't have) 3775 if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) { 3776 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); 3777 DCHECK(method_header->IsOptimized()); 3778 StackReference<mirror::Object>* vreg_base = 3779 reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame); 3780 uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); 3781 CodeInfo code_info(method_header, kPrecise 3782 ? CodeInfo::DecodeFlags::AllTables // We will need dex register maps. 3783 : CodeInfo::DecodeFlags::GcMasksOnly); 3784 StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset); 3785 DCHECK(map.IsValid()); 3786 3787 T vreg_info(m, code_info, map, visitor_); 3788 3789 // Visit stack entries that hold pointers. 3790 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map); 3791 for (size_t i = 0; i < stack_mask.size_in_bits(); ++i) { 3792 if (stack_mask.LoadBit(i)) { 3793 StackReference<mirror::Object>* ref_addr = vreg_base + i; 3794 mirror::Object* ref = ref_addr->AsMirrorPtr(); 3795 if (ref != nullptr) { 3796 mirror::Object* new_ref = ref; 3797 vreg_info.VisitStack(&new_ref, i, this); 3798 if (ref != new_ref) { 3799 ref_addr->Assign(new_ref); 3800 } 3801 } 3802 } 3803 } 3804 // Visit callee-save registers that hold pointers. 3805 uint32_t register_mask = code_info.GetRegisterMaskOf(map); 3806 for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) { 3807 if (register_mask & (1 << i)) { 3808 mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i)); 3809 if (kIsDebugBuild && ref_addr == nullptr) { 3810 std::string thread_name; 3811 GetThread()->GetThreadName(thread_name); 3812 LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name; 3813 DescribeStack(GetThread()); 3814 LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) " 3815 << "set in register_mask=" << register_mask << " at " << DescribeLocation(); 3816 } 3817 if (*ref_addr != nullptr) { 3818 vreg_info.VisitRegister(ref_addr, i, this); 3819 } 3820 } 3821 } 3822 } else if (!m->IsRuntimeMethod() && m->IsProxyMethod()) { 3823 // If this is a proxy method, visit its reference arguments. 3824 DCHECK(!m->IsStatic()); 3825 DCHECK(!m->IsNative()); 3826 std::vector<StackReference<mirror::Object>*> ref_addrs = 3827 GetProxyReferenceArguments(cur_quick_frame); 3828 for (StackReference<mirror::Object>* ref_addr : ref_addrs) { 3829 mirror::Object* ref = ref_addr->AsMirrorPtr(); 3830 if (ref != nullptr) { 3831 mirror::Object* new_ref = ref; 3832 visitor_(&new_ref, /* vreg= */ -1, this); 3833 if (ref != new_ref) { 3834 ref_addr->Assign(new_ref); 3835 } 3836 } 3837 } 3838 } 3839 } 3840 3841 void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 3842 if (kPrecise) { 3843 VisitQuickFramePrecise(); 3844 } else { 3845 VisitQuickFrameNonPrecise(); 3846 } 3847 } 3848 3849 void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3850 struct UndefinedVRegInfo { 3851 UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED, 3852 const CodeInfo& code_info ATTRIBUTE_UNUSED, 3853 const StackMap& map ATTRIBUTE_UNUSED, 3854 RootVisitor& _visitor) 3855 : visitor(_visitor) { 3856 } 3857 3858 ALWAYS_INLINE 3859 void VisitStack(mirror::Object** ref, 3860 size_t stack_index ATTRIBUTE_UNUSED, 3861 const StackVisitor* stack_visitor) 3862 REQUIRES_SHARED(Locks::mutator_lock_) { 3863 visitor(ref, -1, stack_visitor); 3864 } 3865 3866 ALWAYS_INLINE 3867 void VisitRegister(mirror::Object** ref, 3868 size_t register_index ATTRIBUTE_UNUSED, 3869 const StackVisitor* stack_visitor) 3870 REQUIRES_SHARED(Locks::mutator_lock_) { 3871 visitor(ref, -1, stack_visitor); 3872 } 3873 3874 RootVisitor& visitor; 3875 }; 3876 VisitQuickFrameWithVregCallback<UndefinedVRegInfo>(); 3877 } 3878 3879 void VisitQuickFramePrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3880 struct StackMapVRegInfo { 3881 StackMapVRegInfo(ArtMethod* method, 3882 const CodeInfo& _code_info, 3883 const StackMap& map, 3884 RootVisitor& _visitor) 3885 : number_of_dex_registers(method->DexInstructionData().RegistersSize()), 3886 code_info(_code_info), 3887 dex_register_map(code_info.GetDexRegisterMapOf(map)), 3888 visitor(_visitor) { 3889 } 3890 3891 // TODO: If necessary, we should consider caching a reverse map instead of the linear 3892 // lookups for each location. 3893 void FindWithType(const size_t index, 3894 const DexRegisterLocation::Kind kind, 3895 mirror::Object** ref, 3896 const StackVisitor* stack_visitor) 3897 REQUIRES_SHARED(Locks::mutator_lock_) { 3898 bool found = false; 3899 for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) { 3900 DexRegisterLocation location = dex_register_map[dex_reg]; 3901 if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) { 3902 visitor(ref, dex_reg, stack_visitor); 3903 found = true; 3904 } 3905 } 3906 3907 if (!found) { 3908 // If nothing found, report with -1. 3909 visitor(ref, -1, stack_visitor); 3910 } 3911 } 3912 3913 void VisitStack(mirror::Object** ref, size_t stack_index, const StackVisitor* stack_visitor) 3914 REQUIRES_SHARED(Locks::mutator_lock_) { 3915 const size_t stack_offset = stack_index * kFrameSlotSize; 3916 FindWithType(stack_offset, 3917 DexRegisterLocation::Kind::kInStack, 3918 ref, 3919 stack_visitor); 3920 } 3921 3922 void VisitRegister(mirror::Object** ref, 3923 size_t register_index, 3924 const StackVisitor* stack_visitor) 3925 REQUIRES_SHARED(Locks::mutator_lock_) { 3926 FindWithType(register_index, 3927 DexRegisterLocation::Kind::kInRegister, 3928 ref, 3929 stack_visitor); 3930 } 3931 3932 size_t number_of_dex_registers; 3933 const CodeInfo& code_info; 3934 DexRegisterMap dex_register_map; 3935 RootVisitor& visitor; 3936 }; 3937 VisitQuickFrameWithVregCallback<StackMapVRegInfo>(); 3938 } 3939 3940 // Visitor for when we visit a root. 3941 RootVisitor& visitor_; 3942 }; 3943 3944 class RootCallbackVisitor { 3945 public: 3946 RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {} 3947 3948 void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const 3949 REQUIRES_SHARED(Locks::mutator_lock_) { 3950 visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg)); 3951 } 3952 3953 private: 3954 RootVisitor* const visitor_; 3955 const uint32_t tid_; 3956 }; 3957 3958 template <bool kPrecise> 3959 void Thread::VisitRoots(RootVisitor* visitor) { 3960 const pid_t thread_id = GetThreadId(); 3961 visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id)); 3962 if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) { 3963 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), 3964 RootInfo(kRootNativeStack, thread_id)); 3965 } 3966 if (tlsPtr_.async_exception != nullptr) { 3967 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.async_exception), 3968 RootInfo(kRootNativeStack, thread_id)); 3969 } 3970 visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id)); 3971 tlsPtr_.jni_env->VisitJniLocalRoots(visitor, RootInfo(kRootJNILocal, thread_id)); 3972 tlsPtr_.jni_env->VisitMonitorRoots(visitor, RootInfo(kRootJNIMonitor, thread_id)); 3973 HandleScopeVisitRoots(visitor, thread_id); 3974 if (tlsPtr_.debug_invoke_req != nullptr) { 3975 tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id)); 3976 } 3977 // Visit roots for deoptimization. 3978 if (tlsPtr_.stacked_shadow_frame_record != nullptr) { 3979 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3980 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 3981 for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 3982 record != nullptr; 3983 record = record->GetLink()) { 3984 for (ShadowFrame* shadow_frame = record->GetShadowFrame(); 3985 shadow_frame != nullptr; 3986 shadow_frame = shadow_frame->GetLink()) { 3987 mapper.VisitShadowFrame(shadow_frame); 3988 } 3989 } 3990 } 3991 for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 3992 record != nullptr; 3993 record = record->GetLink()) { 3994 if (record->IsReference()) { 3995 visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(), 3996 RootInfo(kRootThreadObject, thread_id)); 3997 } 3998 visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(), 3999 RootInfo(kRootThreadObject, thread_id)); 4000 } 4001 if (tlsPtr_.frame_id_to_shadow_frame != nullptr) { 4002 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 4003 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 4004 for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame; 4005 record != nullptr; 4006 record = record->GetNext()) { 4007 mapper.VisitShadowFrame(record->GetShadowFrame()); 4008 } 4009 } 4010 for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) { 4011 verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id)); 4012 } 4013 // Visit roots on this thread's stack 4014 RuntimeContextType context; 4015 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 4016 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback); 4017 mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false); 4018 for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) { 4019 visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id)); 4020 } 4021 } 4022 4023 void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { 4024 if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) { 4025 VisitRoots</* kPrecise= */ true>(visitor); 4026 } else { 4027 VisitRoots</* kPrecise= */ false>(visitor); 4028 } 4029 } 4030 4031 class VerifyRootVisitor : public SingleRootVisitor { 4032 public: 4033 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 4034 override REQUIRES_SHARED(Locks::mutator_lock_) { 4035 VerifyObject(root); 4036 } 4037 }; 4038 4039 void Thread::VerifyStackImpl() { 4040 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) { 4041 VerifyRootVisitor visitor; 4042 std::unique_ptr<Context> context(Context::Create()); 4043 RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId()); 4044 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback); 4045 mapper.WalkStack(); 4046 } 4047 } 4048 4049 // Set the stack end to that to be used during a stack overflow 4050 void Thread::SetStackEndForStackOverflow() { 4051 // During stack overflow we allow use of the full stack. 4052 if (tlsPtr_.stack_end == tlsPtr_.stack_begin) { 4053 // However, we seem to have already extended to use the full stack. 4054 LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently " 4055 << GetStackOverflowReservedBytes(kRuntimeISA) << ")?"; 4056 DumpStack(LOG_STREAM(ERROR)); 4057 LOG(FATAL) << "Recursive stack overflow."; 4058 } 4059 4060 tlsPtr_.stack_end = tlsPtr_.stack_begin; 4061 4062 // Remove the stack overflow protection if is it set up. 4063 bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks(); 4064 if (implicit_stack_check) { 4065 if (!UnprotectStack()) { 4066 LOG(ERROR) << "Unable to remove stack protection for stack overflow"; 4067 } 4068 } 4069 } 4070 4071 void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) { 4072 DCHECK_LE(start, end); 4073 DCHECK_LE(end, limit); 4074 tlsPtr_.thread_local_start = start; 4075 tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start; 4076 tlsPtr_.thread_local_end = end; 4077 tlsPtr_.thread_local_limit = limit; 4078 tlsPtr_.thread_local_objects = 0; 4079 } 4080 4081 bool Thread::HasTlab() const { 4082 bool has_tlab = tlsPtr_.thread_local_pos != nullptr; 4083 if (has_tlab) { 4084 DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr); 4085 } else { 4086 DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr); 4087 } 4088 return has_tlab; 4089 } 4090 4091 std::ostream& operator<<(std::ostream& os, const Thread& thread) { 4092 thread.ShortDump(os); 4093 return os; 4094 } 4095 4096 bool Thread::ProtectStack(bool fatal_on_error) { 4097 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 4098 VLOG(threads) << "Protecting stack at " << pregion; 4099 if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) { 4100 if (fatal_on_error) { 4101 LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. " 4102 "Reason: " 4103 << strerror(errno) << " size: " << kStackOverflowProtectedSize; 4104 } 4105 return false; 4106 } 4107 return true; 4108 } 4109 4110 bool Thread::UnprotectStack() { 4111 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 4112 VLOG(threads) << "Unprotecting stack at " << pregion; 4113 return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0; 4114 } 4115 4116 void Thread::ActivateSingleStepControl(SingleStepControl* ssc) { 4117 CHECK(Dbg::IsDebuggerActive()); 4118 CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this; 4119 CHECK(ssc != nullptr); 4120 tlsPtr_.single_step_control = ssc; 4121 } 4122 4123 void Thread::DeactivateSingleStepControl() { 4124 CHECK(Dbg::IsDebuggerActive()); 4125 CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this; 4126 SingleStepControl* ssc = GetSingleStepControl(); 4127 tlsPtr_.single_step_control = nullptr; 4128 delete ssc; 4129 } 4130 4131 void Thread::SetDebugInvokeReq(DebugInvokeReq* req) { 4132 CHECK(Dbg::IsDebuggerActive()); 4133 CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this; 4134 CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself"; 4135 CHECK(req != nullptr); 4136 tlsPtr_.debug_invoke_req = req; 4137 } 4138 4139 void Thread::ClearDebugInvokeReq() { 4140 CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this; 4141 CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself"; 4142 DebugInvokeReq* req = tlsPtr_.debug_invoke_req; 4143 tlsPtr_.debug_invoke_req = nullptr; 4144 delete req; 4145 } 4146 4147 void Thread::PushVerifier(verifier::MethodVerifier* verifier) { 4148 verifier->link_ = tlsPtr_.method_verifier; 4149 tlsPtr_.method_verifier = verifier; 4150 } 4151 4152 void Thread::PopVerifier(verifier::MethodVerifier* verifier) { 4153 CHECK_EQ(tlsPtr_.method_verifier, verifier); 4154 tlsPtr_.method_verifier = verifier->link_; 4155 } 4156 4157 size_t Thread::NumberOfHeldMutexes() const { 4158 size_t count = 0; 4159 for (BaseMutex* mu : tlsPtr_.held_mutexes) { 4160 count += mu != nullptr ? 1 : 0; 4161 } 4162 return count; 4163 } 4164 4165 void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { 4166 DCHECK_EQ(GetException(), Thread::GetDeoptimizationException()); 4167 ClearException(); 4168 ShadowFrame* shadow_frame = 4169 PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame); 4170 ObjPtr<mirror::Throwable> pending_exception; 4171 bool from_code = false; 4172 DeoptimizationMethodType method_type; 4173 PopDeoptimizationContext(result, &pending_exception, &from_code, &method_type); 4174 SetTopOfStack(nullptr); 4175 SetTopOfShadowStack(shadow_frame); 4176 4177 // Restore the exception that was pending before deoptimization then interpret the 4178 // deoptimized frames. 4179 if (pending_exception != nullptr) { 4180 SetException(pending_exception); 4181 } 4182 interpreter::EnterInterpreterFromDeoptimize(this, 4183 shadow_frame, 4184 result, 4185 from_code, 4186 method_type); 4187 } 4188 4189 void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) { 4190 CHECK(new_exception != nullptr); 4191 Runtime::Current()->SetAsyncExceptionsThrown(); 4192 if (kIsDebugBuild) { 4193 // Make sure we are in a checkpoint. 4194 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); 4195 CHECK(this == Thread::Current() || GetSuspendCount() >= 1) 4196 << "It doesn't look like this was called in a checkpoint! this: " 4197 << this << " count: " << GetSuspendCount(); 4198 } 4199 tlsPtr_.async_exception = new_exception.Ptr(); 4200 } 4201 4202 bool Thread::ObserveAsyncException() { 4203 DCHECK(this == Thread::Current()); 4204 if (tlsPtr_.async_exception != nullptr) { 4205 if (tlsPtr_.exception != nullptr) { 4206 LOG(WARNING) << "Overwriting pending exception with async exception. Pending exception is: " 4207 << tlsPtr_.exception->Dump(); 4208 LOG(WARNING) << "Async exception is " << tlsPtr_.async_exception->Dump(); 4209 } 4210 tlsPtr_.exception = tlsPtr_.async_exception; 4211 tlsPtr_.async_exception = nullptr; 4212 return true; 4213 } else { 4214 return IsExceptionPending(); 4215 } 4216 } 4217 4218 void Thread::SetException(ObjPtr<mirror::Throwable> new_exception) { 4219 CHECK(new_exception != nullptr); 4220 // TODO: DCHECK(!IsExceptionPending()); 4221 tlsPtr_.exception = new_exception.Ptr(); 4222 } 4223 4224 bool Thread::IsAotCompiler() { 4225 return Runtime::Current()->IsAotCompiler(); 4226 } 4227 4228 mirror::Object* Thread::GetPeerFromOtherThread() const { 4229 DCHECK(tlsPtr_.jpeer == nullptr); 4230 mirror::Object* peer = tlsPtr_.opeer; 4231 if (kUseReadBarrier && Current()->GetIsGcMarking()) { 4232 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack 4233 // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly 4234 // mark/forward it here. 4235 peer = art::ReadBarrier::Mark(peer); 4236 } 4237 return peer; 4238 } 4239 4240 void Thread::SetReadBarrierEntrypoints() { 4241 // Make sure entrypoints aren't null. 4242 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true); 4243 } 4244 4245 void Thread::ClearAllInterpreterCaches() { 4246 static struct ClearInterpreterCacheClosure : Closure { 4247 void Run(Thread* thread) override { 4248 thread->GetInterpreterCache()->Clear(thread); 4249 } 4250 } closure; 4251 Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); 4252 } 4253 4254 4255 void Thread::ReleaseLongJumpContextInternal() { 4256 // Each QuickExceptionHandler gets a long jump context and uses 4257 // it for doing the long jump, after finding catch blocks/doing deoptimization. 4258 // Both finding catch blocks and deoptimization can trigger another 4259 // exception such as a result of class loading. So there can be nested 4260 // cases of exception handling and multiple contexts being used. 4261 // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context 4262 // for reuse so there is no need to always allocate a new one each time when 4263 // getting a context. Since we only keep one context for reuse, delete the 4264 // existing one since the passed in context is yet to be used for longjump. 4265 delete tlsPtr_.long_jump_context; 4266 } 4267 4268 void Thread::SetNativePriority(int new_priority) { 4269 // ART tests on JVM can reach this code path, use tid = 0 as shorthand for current thread. 4270 PaletteStatus status = PaletteSchedSetPriority(0, new_priority); 4271 CHECK(status == PaletteStatus::kOkay || status == PaletteStatus::kCheckErrno); 4272 } 4273 4274 int Thread::GetNativePriority() { 4275 int priority = 0; 4276 // ART tests on JVM can reach this code path, use tid = 0 as shorthand for current thread. 4277 PaletteStatus status = PaletteSchedGetPriority(0, &priority); 4278 CHECK(status == PaletteStatus::kOkay || status == PaletteStatus::kCheckErrno); 4279 return priority; 4280 } 4281 4282 bool Thread::IsSystemDaemon() const { 4283 if (GetPeer() == nullptr) { 4284 return false; 4285 } 4286 return jni::DecodeArtField( 4287 WellKnownClasses::java_lang_Thread_systemDaemon)->GetBoolean(GetPeer()); 4288 } 4289 4290 } // namespace art 4291