1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "thread_list.h" 18 19 #include <backtrace/BacktraceMap.h> 20 #include <dirent.h> 21 #include "nativehelper/ScopedLocalRef.h" 22 #include "nativehelper/ScopedUtfChars.h" 23 #include <sys/types.h> 24 #include <unistd.h> 25 26 #include <sstream> 27 28 #include "android-base/stringprintf.h" 29 30 #include "base/histogram-inl.h" 31 #include "base/mutex-inl.h" 32 #include "base/systrace.h" 33 #include "base/time_utils.h" 34 #include "base/timing_logger.h" 35 #include "debugger.h" 36 #include "gc/collector/concurrent_copying.h" 37 #include "gc/gc_pause_listener.h" 38 #include "gc/heap.h" 39 #include "gc/reference_processor.h" 40 #include "gc_root.h" 41 #include "jni_internal.h" 42 #include "lock_word.h" 43 #include "monitor.h" 44 #include "native_stack_dump.h" 45 #include "scoped_thread_state_change-inl.h" 46 #include "thread.h" 47 #include "trace.h" 48 #include "well_known_classes.h" 49 50 #if ART_USE_FUTEXES 51 #include "linux/futex.h" 52 #include "sys/syscall.h" 53 #ifndef SYS_futex 54 #define SYS_futex __NR_futex 55 #endif 56 #endif // ART_USE_FUTEXES 57 58 namespace art { 59 60 using android::base::StringPrintf; 61 62 static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5); 63 // Use 0 since we want to yield to prevent blocking for an unpredictable amount of time. 64 static constexpr useconds_t kThreadSuspendInitialSleepUs = 0; 65 static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000; 66 static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000; 67 68 // Whether we should try to dump the native stack of unattached threads. See commit ed8b723 for 69 // some history. 70 // Turned off again. b/29248079 71 static constexpr bool kDumpUnattachedThreadNativeStackForSigQuit = false; 72 73 ThreadList::ThreadList(uint64_t thread_suspend_timeout_ns) 74 : suspend_all_count_(0), 75 debug_suspend_all_count_(0), 76 unregistering_count_(0), 77 suspend_all_historam_("suspend all histogram", 16, 64), 78 long_suspend_(false), 79 shut_down_(false), 80 thread_suspend_timeout_ns_(thread_suspend_timeout_ns), 81 empty_checkpoint_barrier_(new Barrier(0)) { 82 CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U))); 83 } 84 85 ThreadList::~ThreadList() { 86 CHECK(shut_down_); 87 } 88 89 void ThreadList::ShutDown() { 90 ScopedTrace trace(__PRETTY_FUNCTION__); 91 // Detach the current thread if necessary. If we failed to start, there might not be any threads. 92 // We need to detach the current thread here in case there's another thread waiting to join with 93 // us. 94 bool contains = false; 95 Thread* self = Thread::Current(); 96 { 97 MutexLock mu(self, *Locks::thread_list_lock_); 98 contains = Contains(self); 99 } 100 if (contains) { 101 Runtime::Current()->DetachCurrentThread(); 102 } 103 WaitForOtherNonDaemonThreadsToExit(); 104 // Disable GC and wait for GC to complete in case there are still daemon threads doing 105 // allocations. 106 gc::Heap* const heap = Runtime::Current()->GetHeap(); 107 heap->DisableGCForShutdown(); 108 // In case a GC is in progress, wait for it to finish. 109 heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current()); 110 // TODO: there's an unaddressed race here where a thread may attach during shutdown, see 111 // Thread::Init. 112 SuspendAllDaemonThreadsForShutdown(); 113 114 shut_down_ = true; 115 } 116 117 bool ThreadList::Contains(Thread* thread) { 118 return find(list_.begin(), list_.end(), thread) != list_.end(); 119 } 120 121 bool ThreadList::Contains(pid_t tid) { 122 for (const auto& thread : list_) { 123 if (thread->GetTid() == tid) { 124 return true; 125 } 126 } 127 return false; 128 } 129 130 pid_t ThreadList::GetLockOwner() { 131 return Locks::thread_list_lock_->GetExclusiveOwnerTid(); 132 } 133 134 void ThreadList::DumpNativeStacks(std::ostream& os) { 135 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 136 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid())); 137 for (const auto& thread : list_) { 138 os << "DUMPING THREAD " << thread->GetTid() << "\n"; 139 DumpNativeStack(os, thread->GetTid(), map.get(), "\t"); 140 os << "\n"; 141 } 142 } 143 144 void ThreadList::DumpForSigQuit(std::ostream& os) { 145 { 146 ScopedObjectAccess soa(Thread::Current()); 147 // Only print if we have samples. 148 if (suspend_all_historam_.SampleSize() > 0) { 149 Histogram<uint64_t>::CumulativeData data; 150 suspend_all_historam_.CreateHistogram(&data); 151 suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data); // Dump time to suspend. 152 } 153 } 154 bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit(); 155 Dump(os, dump_native_stack); 156 DumpUnattachedThreads(os, dump_native_stack && kDumpUnattachedThreadNativeStackForSigQuit); 157 } 158 159 static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack) 160 NO_THREAD_SAFETY_ANALYSIS { 161 // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should 162 // refactor DumpState to avoid skipping analysis. 163 Thread::DumpState(os, nullptr, tid); 164 DumpKernelStack(os, tid, " kernel: ", false); 165 if (dump_native_stack) { 166 DumpNativeStack(os, tid, nullptr, " native: "); 167 } 168 os << std::endl; 169 } 170 171 void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) { 172 DIR* d = opendir("/proc/self/task"); 173 if (!d) { 174 return; 175 } 176 177 Thread* self = Thread::Current(); 178 dirent* e; 179 while ((e = readdir(d)) != nullptr) { 180 char* end; 181 pid_t tid = strtol(e->d_name, &end, 10); 182 if (!*end) { 183 bool contains; 184 { 185 MutexLock mu(self, *Locks::thread_list_lock_); 186 contains = Contains(tid); 187 } 188 if (!contains) { 189 DumpUnattachedThread(os, tid, dump_native_stack); 190 } 191 } 192 } 193 closedir(d); 194 } 195 196 // Dump checkpoint timeout in milliseconds. Larger amount on the target, since the device could be 197 // overloaded with ANR dumps. 198 static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000; 199 200 // A closure used by Thread::Dump. 201 class DumpCheckpoint FINAL : public Closure { 202 public: 203 DumpCheckpoint(std::ostream* os, bool dump_native_stack) 204 : os_(os), 205 barrier_(0), 206 backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr), 207 dump_native_stack_(dump_native_stack) {} 208 209 void Run(Thread* thread) OVERRIDE { 210 // Note thread and self may not be equal if thread was already suspended at the point of the 211 // request. 212 Thread* self = Thread::Current(); 213 CHECK(self != nullptr); 214 std::ostringstream local_os; 215 { 216 ScopedObjectAccess soa(self); 217 thread->Dump(local_os, dump_native_stack_, backtrace_map_.get()); 218 } 219 { 220 // Use the logging lock to ensure serialization when writing to the common ostream. 221 MutexLock mu(self, *Locks::logging_lock_); 222 *os_ << local_os.str() << std::endl; 223 } 224 barrier_.Pass(self); 225 } 226 227 void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) { 228 Thread* self = Thread::Current(); 229 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 230 bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout); 231 if (timed_out) { 232 // Avoid a recursive abort. 233 LOG((kIsDebugBuild && (gAborting == 0)) ? ::android::base::FATAL : ::android::base::ERROR) 234 << "Unexpected time out during dump checkpoint."; 235 } 236 } 237 238 private: 239 // The common stream that will accumulate all the dumps. 240 std::ostream* const os_; 241 // The barrier to be passed through and for the requestor to wait upon. 242 Barrier barrier_; 243 // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately. 244 std::unique_ptr<BacktraceMap> backtrace_map_; 245 // Whether we should dump the native stack. 246 const bool dump_native_stack_; 247 }; 248 249 void ThreadList::Dump(std::ostream& os, bool dump_native_stack) { 250 Thread* self = Thread::Current(); 251 { 252 MutexLock mu(self, *Locks::thread_list_lock_); 253 os << "DALVIK THREADS (" << list_.size() << "):\n"; 254 } 255 if (self != nullptr) { 256 DumpCheckpoint checkpoint(&os, dump_native_stack); 257 size_t threads_running_checkpoint; 258 { 259 // Use SOA to prevent deadlocks if multiple threads are calling Dump() at the same time. 260 ScopedObjectAccess soa(self); 261 threads_running_checkpoint = RunCheckpoint(&checkpoint); 262 } 263 if (threads_running_checkpoint != 0) { 264 checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint); 265 } 266 } else { 267 DumpUnattachedThreads(os, dump_native_stack); 268 } 269 } 270 271 void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) { 272 MutexLock mu(self, *Locks::thread_list_lock_); 273 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 274 for (const auto& thread : list_) { 275 if (thread != ignore1 && thread != ignore2) { 276 CHECK(thread->IsSuspended()) 277 << "\nUnsuspended thread: <<" << *thread << "\n" 278 << "self: <<" << *Thread::Current(); 279 } 280 } 281 } 282 283 #if HAVE_TIMED_RWLOCK 284 // Attempt to rectify locks so that we dump thread list with required locks before exiting. 285 NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() { 286 Runtime* runtime = Runtime::Current(); 287 std::ostringstream ss; 288 ss << "Thread suspend timeout\n"; 289 Locks::mutator_lock_->Dump(ss); 290 ss << "\n"; 291 runtime->GetThreadList()->Dump(ss); 292 LOG(FATAL) << ss.str(); 293 exit(0); 294 } 295 #endif 296 297 // Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an 298 // individual thread requires polling. delay_us is the requested sleep wait. If delay_us is 0 then 299 // we use sched_yield instead of calling usleep. 300 static void ThreadSuspendSleep(useconds_t delay_us) { 301 if (delay_us == 0) { 302 sched_yield(); 303 } else { 304 usleep(delay_us); 305 } 306 } 307 308 size_t ThreadList::RunCheckpoint(Closure* checkpoint_function, Closure* callback) { 309 Thread* self = Thread::Current(); 310 Locks::mutator_lock_->AssertNotExclusiveHeld(self); 311 Locks::thread_list_lock_->AssertNotHeld(self); 312 Locks::thread_suspend_count_lock_->AssertNotHeld(self); 313 314 std::vector<Thread*> suspended_count_modified_threads; 315 size_t count = 0; 316 { 317 // Call a checkpoint function for each thread, threads which are suspend get their checkpoint 318 // manually called. 319 MutexLock mu(self, *Locks::thread_list_lock_); 320 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 321 count = list_.size(); 322 for (const auto& thread : list_) { 323 if (thread != self) { 324 while (true) { 325 if (thread->RequestCheckpoint(checkpoint_function)) { 326 // This thread will run its checkpoint some time in the near future. 327 break; 328 } else { 329 // We are probably suspended, try to make sure that we stay suspended. 330 // The thread switched back to runnable. 331 if (thread->GetState() == kRunnable) { 332 // Spurious fail, try again. 333 continue; 334 } 335 bool updated = thread->ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal); 336 DCHECK(updated); 337 suspended_count_modified_threads.push_back(thread); 338 break; 339 } 340 } 341 } 342 } 343 // Run the callback to be called inside this critical section. 344 if (callback != nullptr) { 345 callback->Run(self); 346 } 347 } 348 349 // Run the checkpoint on ourself while we wait for threads to suspend. 350 checkpoint_function->Run(self); 351 352 // Run the checkpoint on the suspended threads. 353 for (const auto& thread : suspended_count_modified_threads) { 354 if (!thread->IsSuspended()) { 355 if (ATRACE_ENABLED()) { 356 std::ostringstream oss; 357 thread->ShortDump(oss); 358 ATRACE_BEGIN((std::string("Waiting for suspension of thread ") + oss.str()).c_str()); 359 } 360 // Busy wait until the thread is suspended. 361 const uint64_t start_time = NanoTime(); 362 do { 363 ThreadSuspendSleep(kThreadSuspendInitialSleepUs); 364 } while (!thread->IsSuspended()); 365 const uint64_t total_delay = NanoTime() - start_time; 366 // Shouldn't need to wait for longer than 1000 microseconds. 367 constexpr uint64_t kLongWaitThreshold = MsToNs(1); 368 ATRACE_END(); 369 if (UNLIKELY(total_delay > kLongWaitThreshold)) { 370 LOG(WARNING) << "Long wait of " << PrettyDuration(total_delay) << " for " 371 << *thread << " suspension!"; 372 } 373 } 374 // We know for sure that the thread is suspended at this point. 375 checkpoint_function->Run(thread); 376 { 377 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 378 bool updated = thread->ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); 379 DCHECK(updated); 380 } 381 } 382 383 { 384 // Imitate ResumeAll, threads may be waiting on Thread::resume_cond_ since we raised their 385 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast. 386 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 387 Thread::resume_cond_->Broadcast(self); 388 } 389 390 return count; 391 } 392 393 void ThreadList::RunEmptyCheckpoint() { 394 Thread* self = Thread::Current(); 395 Locks::mutator_lock_->AssertNotExclusiveHeld(self); 396 Locks::thread_list_lock_->AssertNotHeld(self); 397 Locks::thread_suspend_count_lock_->AssertNotHeld(self); 398 std::vector<uint32_t> runnable_thread_ids; 399 size_t count = 0; 400 Barrier* barrier = empty_checkpoint_barrier_.get(); 401 barrier->Init(self, 0); 402 { 403 MutexLock mu(self, *Locks::thread_list_lock_); 404 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 405 for (Thread* thread : list_) { 406 if (thread != self) { 407 while (true) { 408 if (thread->RequestEmptyCheckpoint()) { 409 // This thread will run an empty checkpoint (decrement the empty checkpoint barrier) 410 // some time in the near future. 411 ++count; 412 if (kIsDebugBuild) { 413 runnable_thread_ids.push_back(thread->GetThreadId()); 414 } 415 break; 416 } 417 if (thread->GetState() != kRunnable) { 418 // It's seen suspended, we are done because it must not be in the middle of a mutator 419 // heap access. 420 break; 421 } 422 } 423 } 424 } 425 } 426 427 // Wake up the threads blocking for weak ref access so that they will respond to the empty 428 // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state. 429 Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); 430 Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true); 431 { 432 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 433 uint64_t total_wait_time = 0; 434 bool first_iter = true; 435 while (true) { 436 // Wake up the runnable threads blocked on the mutexes that another thread, which is blocked 437 // on a weak ref access, holds (indirectly blocking for weak ref access through another thread 438 // and a mutex.) This needs to be done periodically because the thread may be preempted 439 // between the CheckEmptyCheckpointFromMutex call and the subsequent futex wait in 440 // Mutex::ExclusiveLock, etc. when the wakeup via WakeupToRespondToEmptyCheckpoint 441 // arrives. This could cause a *very rare* deadlock, if not repeated. Most of the cases are 442 // handled in the first iteration. 443 for (BaseMutex* mutex : Locks::expected_mutexes_on_weak_ref_access_) { 444 mutex->WakeupToRespondToEmptyCheckpoint(); 445 } 446 static constexpr uint64_t kEmptyCheckpointPeriodicTimeoutMs = 100; // 100ms 447 static constexpr uint64_t kEmptyCheckpointTotalTimeoutMs = 600 * 1000; // 10 minutes. 448 size_t barrier_count = first_iter ? count : 0; 449 first_iter = false; // Don't add to the barrier count from the second iteration on. 450 bool timed_out = barrier->Increment(self, barrier_count, kEmptyCheckpointPeriodicTimeoutMs); 451 if (!timed_out) { 452 break; // Success 453 } 454 // This is a very rare case. 455 total_wait_time += kEmptyCheckpointPeriodicTimeoutMs; 456 if (kIsDebugBuild && total_wait_time > kEmptyCheckpointTotalTimeoutMs) { 457 std::ostringstream ss; 458 ss << "Empty checkpoint timeout\n"; 459 ss << "Barrier count " << barrier->GetCount(self) << "\n"; 460 ss << "Runnable thread IDs"; 461 for (uint32_t tid : runnable_thread_ids) { 462 ss << " " << tid; 463 } 464 ss << "\n"; 465 Locks::mutator_lock_->Dump(ss); 466 ss << "\n"; 467 LOG(FATAL_WITHOUT_ABORT) << ss.str(); 468 // Some threads in 'runnable_thread_ids' are probably stuck. Try to dump their stacks. 469 // Avoid using ThreadList::Dump() initially because it is likely to get stuck as well. 470 { 471 ScopedObjectAccess soa(self); 472 MutexLock mu1(self, *Locks::thread_list_lock_); 473 for (Thread* thread : GetList()) { 474 uint32_t tid = thread->GetThreadId(); 475 bool is_in_runnable_thread_ids = 476 std::find(runnable_thread_ids.begin(), runnable_thread_ids.end(), tid) != 477 runnable_thread_ids.end(); 478 if (is_in_runnable_thread_ids && 479 thread->ReadFlag(kEmptyCheckpointRequest)) { 480 // Found a runnable thread that hasn't responded to the empty checkpoint request. 481 // Assume it's stuck and safe to dump its stack. 482 thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT), 483 /*dump_native_stack*/ true, 484 /*backtrace_map*/ nullptr, 485 /*force_dump_stack*/ true); 486 } 487 } 488 } 489 LOG(FATAL_WITHOUT_ABORT) 490 << "Dumped runnable threads that haven't responded to empty checkpoint."; 491 // Now use ThreadList::Dump() to dump more threads, noting it may get stuck. 492 Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); 493 LOG(FATAL) << "Dumped all threads."; 494 } 495 } 496 } 497 } 498 499 // Request that a checkpoint function be run on all active (non-suspended) 500 // threads. Returns the number of successful requests. 501 size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) { 502 Thread* self = Thread::Current(); 503 Locks::mutator_lock_->AssertNotExclusiveHeld(self); 504 Locks::thread_list_lock_->AssertNotHeld(self); 505 Locks::thread_suspend_count_lock_->AssertNotHeld(self); 506 CHECK_NE(self->GetState(), kRunnable); 507 508 size_t count = 0; 509 { 510 // Call a checkpoint function for each non-suspended thread. 511 MutexLock mu(self, *Locks::thread_list_lock_); 512 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 513 for (const auto& thread : list_) { 514 if (thread != self) { 515 if (thread->RequestCheckpoint(checkpoint_function)) { 516 // This thread will run its checkpoint some time in the near future. 517 count++; 518 } 519 } 520 } 521 } 522 523 // Return the number of threads that will run the checkpoint function. 524 return count; 525 } 526 527 // A checkpoint/suspend-all hybrid to switch thread roots from 528 // from-space to to-space refs. Used to synchronize threads at a point 529 // to mark the initiation of marking while maintaining the to-space 530 // invariant. 531 size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, 532 Closure* flip_callback, 533 gc::collector::GarbageCollector* collector, 534 gc::GcPauseListener* pause_listener) { 535 TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings()); 536 Thread* self = Thread::Current(); 537 Locks::mutator_lock_->AssertNotHeld(self); 538 Locks::thread_list_lock_->AssertNotHeld(self); 539 Locks::thread_suspend_count_lock_->AssertNotHeld(self); 540 CHECK_NE(self->GetState(), kRunnable); 541 542 collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls. 543 544 // ThreadFlipBegin happens before we suspend all the threads, so it does not count towards the 545 // pause. 546 const uint64_t suspend_start_time = NanoTime(); 547 SuspendAllInternal(self, self, nullptr); 548 if (pause_listener != nullptr) { 549 pause_listener->StartPause(); 550 } 551 552 // Run the flip callback for the collector. 553 Locks::mutator_lock_->ExclusiveLock(self); 554 suspend_all_historam_.AdjustAndAddValue(NanoTime() - suspend_start_time); 555 flip_callback->Run(self); 556 Locks::mutator_lock_->ExclusiveUnlock(self); 557 collector->RegisterPause(NanoTime() - suspend_start_time); 558 if (pause_listener != nullptr) { 559 pause_listener->EndPause(); 560 } 561 562 // Resume runnable threads. 563 size_t runnable_thread_count = 0; 564 std::vector<Thread*> other_threads; 565 { 566 TimingLogger::ScopedTiming split2("ResumeRunnableThreads", collector->GetTimings()); 567 MutexLock mu(self, *Locks::thread_list_lock_); 568 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 569 --suspend_all_count_; 570 for (const auto& thread : list_) { 571 // Set the flip function for all threads because Thread::DumpState/DumpJavaStack() (invoked by 572 // a checkpoint) may cause the flip function to be run for a runnable/suspended thread before 573 // a runnable thread runs it for itself or we run it for a suspended thread below. 574 thread->SetFlipFunction(thread_flip_visitor); 575 if (thread == self) { 576 continue; 577 } 578 // Resume early the threads that were runnable but are suspended just for this thread flip or 579 // about to transition from non-runnable (eg. kNative at the SOA entry in a JNI function) to 580 // runnable (both cases waiting inside Thread::TransitionFromSuspendedToRunnable), or waiting 581 // for the thread flip to end at the JNI critical section entry (kWaitingForGcThreadFlip), 582 ThreadState state = thread->GetState(); 583 if ((state == kWaitingForGcThreadFlip || thread->IsTransitioningToRunnable()) && 584 thread->GetSuspendCount() == 1) { 585 // The thread will resume right after the broadcast. 586 bool updated = thread->ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); 587 DCHECK(updated); 588 ++runnable_thread_count; 589 } else { 590 other_threads.push_back(thread); 591 } 592 } 593 Thread::resume_cond_->Broadcast(self); 594 } 595 596 collector->GetHeap()->ThreadFlipEnd(self); 597 598 // Run the closure on the other threads and let them resume. 599 { 600 TimingLogger::ScopedTiming split3("FlipOtherThreads", collector->GetTimings()); 601 ReaderMutexLock mu(self, *Locks::mutator_lock_); 602 for (const auto& thread : other_threads) { 603 Closure* flip_func = thread->GetFlipFunction(); 604 if (flip_func != nullptr) { 605 flip_func->Run(thread); 606 } 607 } 608 // Run it for self. 609 Closure* flip_func = self->GetFlipFunction(); 610 if (flip_func != nullptr) { 611 flip_func->Run(self); 612 } 613 } 614 615 // Resume other threads. 616 { 617 TimingLogger::ScopedTiming split4("ResumeOtherThreads", collector->GetTimings()); 618 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 619 for (const auto& thread : other_threads) { 620 bool updated = thread->ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); 621 DCHECK(updated); 622 } 623 Thread::resume_cond_->Broadcast(self); 624 } 625 626 return runnable_thread_count + other_threads.size() + 1; // +1 for self. 627 } 628 629 void ThreadList::SuspendAll(const char* cause, bool long_suspend) { 630 Thread* self = Thread::Current(); 631 632 if (self != nullptr) { 633 VLOG(threads) << *self << " SuspendAll for " << cause << " starting..."; 634 } else { 635 VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting..."; 636 } 637 { 638 ScopedTrace trace("Suspending mutator threads"); 639 const uint64_t start_time = NanoTime(); 640 641 SuspendAllInternal(self, self); 642 // All threads are known to have suspended (but a thread may still own the mutator lock) 643 // Make sure this thread grabs exclusive access to the mutator lock and its protected data. 644 #if HAVE_TIMED_RWLOCK 645 while (true) { 646 if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 647 NsToMs(thread_suspend_timeout_ns_), 648 0)) { 649 break; 650 } else if (!long_suspend_) { 651 // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this 652 // could result in a thread suspend timeout. 653 // Timeout if we wait more than thread_suspend_timeout_ns_ nanoseconds. 654 UnsafeLogFatalForThreadSuspendAllTimeout(); 655 } 656 } 657 #else 658 Locks::mutator_lock_->ExclusiveLock(self); 659 #endif 660 661 long_suspend_ = long_suspend; 662 663 const uint64_t end_time = NanoTime(); 664 const uint64_t suspend_time = end_time - start_time; 665 suspend_all_historam_.AdjustAndAddValue(suspend_time); 666 if (suspend_time > kLongThreadSuspendThreshold) { 667 LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time); 668 } 669 670 if (kDebugLocking) { 671 // Debug check that all threads are suspended. 672 AssertThreadsAreSuspended(self, self); 673 } 674 } 675 ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str()); 676 677 if (self != nullptr) { 678 VLOG(threads) << *self << " SuspendAll complete"; 679 } else { 680 VLOG(threads) << "Thread[null] SuspendAll complete"; 681 } 682 } 683 684 // Ensures all threads running Java suspend and that those not running Java don't start. 685 // Debugger thread might be set to kRunnable for a short period of time after the 686 // SuspendAllInternal. This is safe because it will be set back to suspended state before 687 // the SuspendAll returns. 688 void ThreadList::SuspendAllInternal(Thread* self, 689 Thread* ignore1, 690 Thread* ignore2, 691 SuspendReason reason) { 692 Locks::mutator_lock_->AssertNotExclusiveHeld(self); 693 Locks::thread_list_lock_->AssertNotHeld(self); 694 Locks::thread_suspend_count_lock_->AssertNotHeld(self); 695 if (kDebugLocking && self != nullptr) { 696 CHECK_NE(self->GetState(), kRunnable); 697 } 698 699 // First request that all threads suspend, then wait for them to suspend before 700 // returning. This suspension scheme also relies on other behaviour: 701 // 1. Threads cannot be deleted while they are suspended or have a suspend- 702 // request flag set - (see Unregister() below). 703 // 2. When threads are created, they are created in a suspended state (actually 704 // kNative) and will never begin executing Java code without first checking 705 // the suspend-request flag. 706 707 // The atomic counter for number of threads that need to pass the barrier. 708 AtomicInteger pending_threads; 709 uint32_t num_ignored = 0; 710 if (ignore1 != nullptr) { 711 ++num_ignored; 712 } 713 if (ignore2 != nullptr && ignore1 != ignore2) { 714 ++num_ignored; 715 } 716 { 717 MutexLock mu(self, *Locks::thread_list_lock_); 718 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 719 // Update global suspend all state for attaching threads. 720 ++suspend_all_count_; 721 if (reason == SuspendReason::kForDebugger) { 722 ++debug_suspend_all_count_; 723 } 724 pending_threads.StoreRelaxed(list_.size() - num_ignored); 725 // Increment everybody's suspend count (except those that should be ignored). 726 for (const auto& thread : list_) { 727 if (thread == ignore1 || thread == ignore2) { 728 continue; 729 } 730 VLOG(threads) << "requesting thread suspend: " << *thread; 731 bool updated = thread->ModifySuspendCount(self, +1, &pending_threads, reason); 732 DCHECK(updated); 733 734 // Must install the pending_threads counter first, then check thread->IsSuspend() and clear 735 // the counter. Otherwise there's a race with Thread::TransitionFromRunnableToSuspended() 736 // that can lead a thread to miss a call to PassActiveSuspendBarriers(). 737 if (thread->IsSuspended()) { 738 // Only clear the counter for the current thread. 739 thread->ClearSuspendBarrier(&pending_threads); 740 pending_threads.FetchAndSubSequentiallyConsistent(1); 741 } 742 } 743 } 744 745 // Wait for the barrier to be passed by all runnable threads. This wait 746 // is done with a timeout so that we can detect problems. 747 #if ART_USE_FUTEXES 748 timespec wait_timeout; 749 InitTimeSpec(false, CLOCK_MONOTONIC, NsToMs(thread_suspend_timeout_ns_), 0, &wait_timeout); 750 #endif 751 const uint64_t start_time = NanoTime(); 752 while (true) { 753 int32_t cur_val = pending_threads.LoadRelaxed(); 754 if (LIKELY(cur_val > 0)) { 755 #if ART_USE_FUTEXES 756 if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) { 757 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. 758 if ((errno != EAGAIN) && (errno != EINTR)) { 759 if (errno == ETIMEDOUT) { 760 LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR) 761 << "Timed out waiting for threads to suspend, waited for " 762 << PrettyDuration(NanoTime() - start_time); 763 } else { 764 PLOG(FATAL) << "futex wait failed for SuspendAllInternal()"; 765 } 766 } 767 } // else re-check pending_threads in the next iteration (this may be a spurious wake-up). 768 #else 769 // Spin wait. This is likely to be slow, but on most architecture ART_USE_FUTEXES is set. 770 UNUSED(start_time); 771 #endif 772 } else { 773 CHECK_EQ(cur_val, 0); 774 break; 775 } 776 } 777 } 778 779 void ThreadList::ResumeAll() { 780 Thread* self = Thread::Current(); 781 782 if (self != nullptr) { 783 VLOG(threads) << *self << " ResumeAll starting"; 784 } else { 785 VLOG(threads) << "Thread[null] ResumeAll starting"; 786 } 787 788 ATRACE_END(); 789 790 ScopedTrace trace("Resuming mutator threads"); 791 792 if (kDebugLocking) { 793 // Debug check that all threads are suspended. 794 AssertThreadsAreSuspended(self, self); 795 } 796 797 long_suspend_ = false; 798 799 Locks::mutator_lock_->ExclusiveUnlock(self); 800 { 801 MutexLock mu(self, *Locks::thread_list_lock_); 802 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 803 // Update global suspend all state for attaching threads. 804 --suspend_all_count_; 805 // Decrement the suspend counts for all threads. 806 for (const auto& thread : list_) { 807 if (thread == self) { 808 continue; 809 } 810 bool updated = thread->ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); 811 DCHECK(updated); 812 } 813 814 // Broadcast a notification to all suspended threads, some or all of 815 // which may choose to wake up. No need to wait for them. 816 if (self != nullptr) { 817 VLOG(threads) << *self << " ResumeAll waking others"; 818 } else { 819 VLOG(threads) << "Thread[null] ResumeAll waking others"; 820 } 821 Thread::resume_cond_->Broadcast(self); 822 } 823 824 if (self != nullptr) { 825 VLOG(threads) << *self << " ResumeAll complete"; 826 } else { 827 VLOG(threads) << "Thread[null] ResumeAll complete"; 828 } 829 } 830 831 bool ThreadList::Resume(Thread* thread, SuspendReason reason) { 832 // This assumes there was an ATRACE_BEGIN when we suspended the thread. 833 ATRACE_END(); 834 835 Thread* self = Thread::Current(); 836 DCHECK_NE(thread, self); 837 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..." << reason; 838 839 { 840 // To check Contains. 841 MutexLock mu(self, *Locks::thread_list_lock_); 842 // To check IsSuspended. 843 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 844 if (UNLIKELY(!thread->IsSuspended())) { 845 LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread) 846 << ") thread not suspended"; 847 return false; 848 } 849 if (!Contains(thread)) { 850 // We only expect threads within the thread-list to have been suspended otherwise we can't 851 // stop such threads from delete-ing themselves. 852 LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread) 853 << ") thread not within thread list"; 854 return false; 855 } 856 if (UNLIKELY(!thread->ModifySuspendCount(self, -1, nullptr, reason))) { 857 LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread) 858 << ") could not modify suspend count."; 859 return false; 860 } 861 } 862 863 { 864 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") waking others"; 865 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 866 Thread::resume_cond_->Broadcast(self); 867 } 868 869 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete"; 870 return true; 871 } 872 873 static void ThreadSuspendByPeerWarning(Thread* self, 874 LogSeverity severity, 875 const char* message, 876 jobject peer) { 877 JNIEnvExt* env = self->GetJniEnv(); 878 ScopedLocalRef<jstring> 879 scoped_name_string(env, static_cast<jstring>(env->GetObjectField( 880 peer, WellKnownClasses::java_lang_Thread_name))); 881 ScopedUtfChars scoped_name_chars(env, scoped_name_string.get()); 882 if (scoped_name_chars.c_str() == nullptr) { 883 LOG(severity) << message << ": " << peer; 884 env->ExceptionClear(); 885 } else { 886 LOG(severity) << message << ": " << peer << ":" << scoped_name_chars.c_str(); 887 } 888 } 889 890 Thread* ThreadList::SuspendThreadByPeer(jobject peer, 891 bool request_suspension, 892 SuspendReason reason, 893 bool* timed_out) { 894 const uint64_t start_time = NanoTime(); 895 useconds_t sleep_us = kThreadSuspendInitialSleepUs; 896 *timed_out = false; 897 Thread* const self = Thread::Current(); 898 Thread* suspended_thread = nullptr; 899 VLOG(threads) << "SuspendThreadByPeer starting"; 900 while (true) { 901 Thread* thread; 902 { 903 // Note: this will transition to runnable and potentially suspend. We ensure only one thread 904 // is requesting another suspend, to avoid deadlock, by requiring this function be called 905 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather 906 // than request thread suspension, to avoid potential cycles in threads requesting each other 907 // suspend. 908 ScopedObjectAccess soa(self); 909 MutexLock thread_list_mu(self, *Locks::thread_list_lock_); 910 thread = Thread::FromManagedThread(soa, peer); 911 if (thread == nullptr) { 912 if (suspended_thread != nullptr) { 913 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); 914 // If we incremented the suspend count but the thread reset its peer, we need to 915 // re-decrement it since it is shutting down and may deadlock the runtime in 916 // ThreadList::WaitForOtherNonDaemonThreadsToExit. 917 bool updated = suspended_thread->ModifySuspendCount(soa.Self(), 918 -1, 919 nullptr, 920 reason); 921 DCHECK(updated); 922 } 923 ThreadSuspendByPeerWarning(self, 924 ::android::base::WARNING, 925 "No such thread for suspend", 926 peer); 927 return nullptr; 928 } 929 if (!Contains(thread)) { 930 CHECK(suspended_thread == nullptr); 931 VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: " 932 << reinterpret_cast<void*>(thread); 933 return nullptr; 934 } 935 VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread; 936 { 937 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); 938 if (request_suspension) { 939 if (self->GetSuspendCount() > 0) { 940 // We hold the suspend count lock but another thread is trying to suspend us. Its not 941 // safe to try to suspend another thread in case we get a cycle. Start the loop again 942 // which will allow this thread to be suspended. 943 continue; 944 } 945 CHECK(suspended_thread == nullptr); 946 suspended_thread = thread; 947 bool updated = suspended_thread->ModifySuspendCount(self, +1, nullptr, reason); 948 DCHECK(updated); 949 request_suspension = false; 950 } else { 951 // If the caller isn't requesting suspension, a suspension should have already occurred. 952 CHECK_GT(thread->GetSuspendCount(), 0); 953 } 954 // IsSuspended on the current thread will fail as the current thread is changed into 955 // Runnable above. As the suspend count is now raised if this is the current thread 956 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler 957 // to just explicitly handle the current thread in the callers to this code. 958 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger"; 959 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend 960 // count, or else we've waited and it has self suspended) or is the current thread, we're 961 // done. 962 if (thread->IsSuspended()) { 963 VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread; 964 if (ATRACE_ENABLED()) { 965 std::string name; 966 thread->GetThreadName(name); 967 ATRACE_BEGIN(StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(), 968 peer).c_str()); 969 } 970 return thread; 971 } 972 const uint64_t total_delay = NanoTime() - start_time; 973 if (total_delay >= thread_suspend_timeout_ns_) { 974 ThreadSuspendByPeerWarning(self, 975 ::android::base::FATAL, 976 "Thread suspension timed out", 977 peer); 978 if (suspended_thread != nullptr) { 979 CHECK_EQ(suspended_thread, thread); 980 bool updated = suspended_thread->ModifySuspendCount(soa.Self(), 981 -1, 982 nullptr, 983 reason); 984 DCHECK(updated); 985 } 986 *timed_out = true; 987 return nullptr; 988 } else if (sleep_us == 0 && 989 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) { 990 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent 991 // excessive CPU usage. 992 sleep_us = kThreadSuspendMaxYieldUs / 2; 993 } 994 } 995 // Release locks and come out of runnable state. 996 } 997 VLOG(threads) << "SuspendThreadByPeer waiting to allow thread chance to suspend"; 998 ThreadSuspendSleep(sleep_us); 999 // This may stay at 0 if sleep_us == 0, but this is WAI since we want to avoid using usleep at 1000 // all if possible. This shouldn't be an issue since time to suspend should always be small. 1001 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs); 1002 } 1003 } 1004 1005 static void ThreadSuspendByThreadIdWarning(LogSeverity severity, 1006 const char* message, 1007 uint32_t thread_id) { 1008 LOG(severity) << StringPrintf("%s: %d", message, thread_id); 1009 } 1010 1011 Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, 1012 SuspendReason reason, 1013 bool* timed_out) { 1014 const uint64_t start_time = NanoTime(); 1015 useconds_t sleep_us = kThreadSuspendInitialSleepUs; 1016 *timed_out = false; 1017 Thread* suspended_thread = nullptr; 1018 Thread* const self = Thread::Current(); 1019 CHECK_NE(thread_id, kInvalidThreadId); 1020 VLOG(threads) << "SuspendThreadByThreadId starting"; 1021 while (true) { 1022 { 1023 // Note: this will transition to runnable and potentially suspend. We ensure only one thread 1024 // is requesting another suspend, to avoid deadlock, by requiring this function be called 1025 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather 1026 // than request thread suspension, to avoid potential cycles in threads requesting each other 1027 // suspend. 1028 ScopedObjectAccess soa(self); 1029 MutexLock thread_list_mu(self, *Locks::thread_list_lock_); 1030 Thread* thread = nullptr; 1031 for (const auto& it : list_) { 1032 if (it->GetThreadId() == thread_id) { 1033 thread = it; 1034 break; 1035 } 1036 } 1037 if (thread == nullptr) { 1038 CHECK(suspended_thread == nullptr) << "Suspended thread " << suspended_thread 1039 << " no longer in thread list"; 1040 // There's a race in inflating a lock and the owner giving up ownership and then dying. 1041 ThreadSuspendByThreadIdWarning(::android::base::WARNING, 1042 "No such thread id for suspend", 1043 thread_id); 1044 return nullptr; 1045 } 1046 VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread; 1047 DCHECK(Contains(thread)); 1048 { 1049 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); 1050 if (suspended_thread == nullptr) { 1051 if (self->GetSuspendCount() > 0) { 1052 // We hold the suspend count lock but another thread is trying to suspend us. Its not 1053 // safe to try to suspend another thread in case we get a cycle. Start the loop again 1054 // which will allow this thread to be suspended. 1055 continue; 1056 } 1057 bool updated = thread->ModifySuspendCount(self, +1, nullptr, reason); 1058 DCHECK(updated); 1059 suspended_thread = thread; 1060 } else { 1061 CHECK_EQ(suspended_thread, thread); 1062 // If the caller isn't requesting suspension, a suspension should have already occurred. 1063 CHECK_GT(thread->GetSuspendCount(), 0); 1064 } 1065 // IsSuspended on the current thread will fail as the current thread is changed into 1066 // Runnable above. As the suspend count is now raised if this is the current thread 1067 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler 1068 // to just explicitly handle the current thread in the callers to this code. 1069 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger"; 1070 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend 1071 // count, or else we've waited and it has self suspended) or is the current thread, we're 1072 // done. 1073 if (thread->IsSuspended()) { 1074 if (ATRACE_ENABLED()) { 1075 std::string name; 1076 thread->GetThreadName(name); 1077 ATRACE_BEGIN(StringPrintf("SuspendThreadByThreadId suspended %s id=%d", 1078 name.c_str(), thread_id).c_str()); 1079 } 1080 VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread; 1081 return thread; 1082 } 1083 const uint64_t total_delay = NanoTime() - start_time; 1084 if (total_delay >= thread_suspend_timeout_ns_) { 1085 ThreadSuspendByThreadIdWarning(::android::base::WARNING, 1086 "Thread suspension timed out", 1087 thread_id); 1088 if (suspended_thread != nullptr) { 1089 bool updated = thread->ModifySuspendCount(soa.Self(), -1, nullptr, reason); 1090 DCHECK(updated); 1091 } 1092 *timed_out = true; 1093 return nullptr; 1094 } else if (sleep_us == 0 && 1095 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) { 1096 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent 1097 // excessive CPU usage. 1098 sleep_us = kThreadSuspendMaxYieldUs / 2; 1099 } 1100 } 1101 // Release locks and come out of runnable state. 1102 } 1103 VLOG(threads) << "SuspendThreadByThreadId waiting to allow thread chance to suspend"; 1104 ThreadSuspendSleep(sleep_us); 1105 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs); 1106 } 1107 } 1108 1109 Thread* ThreadList::FindThreadByThreadId(uint32_t thread_id) { 1110 for (const auto& thread : list_) { 1111 if (thread->GetThreadId() == thread_id) { 1112 return thread; 1113 } 1114 } 1115 return nullptr; 1116 } 1117 1118 void ThreadList::SuspendAllForDebugger() { 1119 Thread* self = Thread::Current(); 1120 Thread* debug_thread = Dbg::GetDebugThread(); 1121 1122 VLOG(threads) << *self << " SuspendAllForDebugger starting..."; 1123 1124 SuspendAllInternal(self, self, debug_thread, SuspendReason::kForDebugger); 1125 // Block on the mutator lock until all Runnable threads release their share of access then 1126 // immediately unlock again. 1127 #if HAVE_TIMED_RWLOCK 1128 // Timeout if we wait more than 30 seconds. 1129 if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) { 1130 UnsafeLogFatalForThreadSuspendAllTimeout(); 1131 } else { 1132 Locks::mutator_lock_->ExclusiveUnlock(self); 1133 } 1134 #else 1135 Locks::mutator_lock_->ExclusiveLock(self); 1136 Locks::mutator_lock_->ExclusiveUnlock(self); 1137 #endif 1138 // Disabled for the following race condition: 1139 // Thread 1 calls SuspendAllForDebugger, gets preempted after pulsing the mutator lock. 1140 // Thread 2 calls SuspendAll and SetStateUnsafe (perhaps from Dbg::Disconnected). 1141 // Thread 1 fails assertion that all threads are suspended due to thread 2 being in a runnable 1142 // state (from SetStateUnsafe). 1143 // AssertThreadsAreSuspended(self, self, debug_thread); 1144 1145 VLOG(threads) << *self << " SuspendAllForDebugger complete"; 1146 } 1147 1148 void ThreadList::SuspendSelfForDebugger() { 1149 Thread* const self = Thread::Current(); 1150 self->SetReadyForDebugInvoke(true); 1151 1152 // The debugger thread must not suspend itself due to debugger activity! 1153 Thread* debug_thread = Dbg::GetDebugThread(); 1154 CHECK(self != debug_thread); 1155 CHECK_NE(self->GetState(), kRunnable); 1156 Locks::mutator_lock_->AssertNotHeld(self); 1157 1158 // The debugger may have detached while we were executing an invoke request. In that case, we 1159 // must not suspend ourself. 1160 DebugInvokeReq* pReq = self->GetInvokeReq(); 1161 const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive()); 1162 if (!skip_thread_suspension) { 1163 // Collisions with other suspends aren't really interesting. We want 1164 // to ensure that we're the only one fiddling with the suspend count 1165 // though. 1166 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1167 bool updated = self->ModifySuspendCount(self, +1, nullptr, SuspendReason::kForDebugger); 1168 DCHECK(updated); 1169 CHECK_GT(self->GetSuspendCount(), 0); 1170 1171 VLOG(threads) << *self << " self-suspending (debugger)"; 1172 } else { 1173 // We must no longer be subject to debugger suspension. 1174 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1175 CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us"; 1176 1177 VLOG(threads) << *self << " not self-suspending because debugger detached during invoke"; 1178 } 1179 1180 // If the debugger requested an invoke, we need to send the reply and clear the request. 1181 if (pReq != nullptr) { 1182 Dbg::FinishInvokeMethod(pReq); 1183 self->ClearDebugInvokeReq(); 1184 pReq = nullptr; // object has been deleted, clear it for safety. 1185 } 1186 1187 // Tell JDWP that we've completed suspension. The JDWP thread can't 1188 // tell us to resume before we're fully asleep because we hold the 1189 // suspend count lock. 1190 Dbg::ClearWaitForEventThread(); 1191 1192 { 1193 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1194 while (self->GetSuspendCount() != 0) { 1195 Thread::resume_cond_->Wait(self); 1196 if (self->GetSuspendCount() != 0) { 1197 // The condition was signaled but we're still suspended. This 1198 // can happen when we suspend then resume all threads to 1199 // update instrumentation or compute monitor info. This can 1200 // also happen if the debugger lets go while a SIGQUIT thread 1201 // dump event is pending (assuming SignalCatcher was resumed for 1202 // just long enough to try to grab the thread-suspend lock). 1203 VLOG(jdwp) << *self << " still suspended after undo " 1204 << "(suspend count=" << self->GetSuspendCount() << ", " 1205 << "debug suspend count=" << self->GetDebugSuspendCount() << ")"; 1206 } 1207 } 1208 CHECK_EQ(self->GetSuspendCount(), 0); 1209 } 1210 1211 self->SetReadyForDebugInvoke(false); 1212 VLOG(threads) << *self << " self-reviving (debugger)"; 1213 } 1214 1215 void ThreadList::ResumeAllForDebugger() { 1216 Thread* self = Thread::Current(); 1217 Thread* debug_thread = Dbg::GetDebugThread(); 1218 1219 VLOG(threads) << *self << " ResumeAllForDebugger starting..."; 1220 1221 // Threads can't resume if we exclusively hold the mutator lock. 1222 Locks::mutator_lock_->AssertNotExclusiveHeld(self); 1223 1224 { 1225 MutexLock thread_list_mu(self, *Locks::thread_list_lock_); 1226 { 1227 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); 1228 // Update global suspend all state for attaching threads. 1229 DCHECK_GE(suspend_all_count_, debug_suspend_all_count_); 1230 if (debug_suspend_all_count_ > 0) { 1231 --suspend_all_count_; 1232 --debug_suspend_all_count_; 1233 } else { 1234 // We've been asked to resume all threads without being asked to 1235 // suspend them all before. That may happen if a debugger tries 1236 // to resume some suspended threads (with suspend count == 1) 1237 // at once with a VirtualMachine.Resume command. Let's print a 1238 // warning. 1239 LOG(WARNING) << "Debugger attempted to resume all threads without " 1240 << "having suspended them all before."; 1241 } 1242 // Decrement everybody's suspend count (except our own). 1243 for (const auto& thread : list_) { 1244 if (thread == self || thread == debug_thread) { 1245 continue; 1246 } 1247 if (thread->GetDebugSuspendCount() == 0) { 1248 // This thread may have been individually resumed with ThreadReference.Resume. 1249 continue; 1250 } 1251 VLOG(threads) << "requesting thread resume: " << *thread; 1252 bool updated = thread->ModifySuspendCount(self, -1, nullptr, SuspendReason::kForDebugger); 1253 DCHECK(updated); 1254 } 1255 } 1256 } 1257 1258 { 1259 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1260 Thread::resume_cond_->Broadcast(self); 1261 } 1262 1263 VLOG(threads) << *self << " ResumeAllForDebugger complete"; 1264 } 1265 1266 void ThreadList::UndoDebuggerSuspensions() { 1267 Thread* self = Thread::Current(); 1268 1269 VLOG(threads) << *self << " UndoDebuggerSuspensions starting"; 1270 1271 { 1272 MutexLock mu(self, *Locks::thread_list_lock_); 1273 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1274 // Update global suspend all state for attaching threads. 1275 suspend_all_count_ -= debug_suspend_all_count_; 1276 debug_suspend_all_count_ = 0; 1277 // Update running threads. 1278 for (const auto& thread : list_) { 1279 if (thread == self || thread->GetDebugSuspendCount() == 0) { 1280 continue; 1281 } 1282 bool suspended = thread->ModifySuspendCount(self, 1283 -thread->GetDebugSuspendCount(), 1284 nullptr, 1285 SuspendReason::kForDebugger); 1286 DCHECK(suspended); 1287 } 1288 } 1289 1290 { 1291 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1292 Thread::resume_cond_->Broadcast(self); 1293 } 1294 1295 VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete"; 1296 } 1297 1298 void ThreadList::WaitForOtherNonDaemonThreadsToExit() { 1299 ScopedTrace trace(__PRETTY_FUNCTION__); 1300 Thread* self = Thread::Current(); 1301 Locks::mutator_lock_->AssertNotHeld(self); 1302 while (true) { 1303 { 1304 // No more threads can be born after we start to shutdown. 1305 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1306 CHECK(Runtime::Current()->IsShuttingDownLocked()); 1307 CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U); 1308 } 1309 MutexLock mu(self, *Locks::thread_list_lock_); 1310 // Also wait for any threads that are unregistering to finish. This is required so that no 1311 // threads access the thread list after it is deleted. TODO: This may not work for user daemon 1312 // threads since they could unregister at the wrong time. 1313 bool done = unregistering_count_ == 0; 1314 if (done) { 1315 for (const auto& thread : list_) { 1316 if (thread != self && !thread->IsDaemon()) { 1317 done = false; 1318 break; 1319 } 1320 } 1321 } 1322 if (done) { 1323 break; 1324 } 1325 // Wait for another thread to exit before re-checking. 1326 Locks::thread_exit_cond_->Wait(self); 1327 } 1328 } 1329 1330 void ThreadList::SuspendAllDaemonThreadsForShutdown() { 1331 ScopedTrace trace(__PRETTY_FUNCTION__); 1332 Thread* self = Thread::Current(); 1333 size_t daemons_left = 0; 1334 { 1335 // Tell all the daemons it's time to suspend. 1336 MutexLock mu(self, *Locks::thread_list_lock_); 1337 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1338 for (const auto& thread : list_) { 1339 // This is only run after all non-daemon threads have exited, so the remainder should all be 1340 // daemons. 1341 CHECK(thread->IsDaemon()) << *thread; 1342 if (thread != self) { 1343 bool updated = thread->ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal); 1344 DCHECK(updated); 1345 ++daemons_left; 1346 } 1347 // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be 1348 // the sleep forever one. 1349 thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions(); 1350 } 1351 } 1352 // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they 1353 // are about to access runtime state and are not in a runnable state. Examples: Monitor code 1354 // or waking up from a condition variable. TODO: Try and see if there is a better way to wait 1355 // for daemon threads to be in a blocked state. 1356 if (daemons_left > 0) { 1357 static constexpr size_t kDaemonSleepTime = 200 * 1000; 1358 usleep(kDaemonSleepTime); 1359 } 1360 // Give the threads a chance to suspend, complaining if they're slow. 1361 bool have_complained = false; 1362 static constexpr size_t kTimeoutMicroseconds = 2000 * 1000; 1363 static constexpr size_t kSleepMicroseconds = 1000; 1364 for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) { 1365 bool all_suspended = true; 1366 { 1367 MutexLock mu(self, *Locks::thread_list_lock_); 1368 for (const auto& thread : list_) { 1369 if (thread != self && thread->GetState() == kRunnable) { 1370 if (!have_complained) { 1371 LOG(WARNING) << "daemon thread not yet suspended: " << *thread; 1372 have_complained = true; 1373 } 1374 all_suspended = false; 1375 } 1376 } 1377 } 1378 if (all_suspended) { 1379 return; 1380 } 1381 usleep(kSleepMicroseconds); 1382 } 1383 LOG(WARNING) << "timed out suspending all daemon threads"; 1384 } 1385 1386 void ThreadList::Register(Thread* self) { 1387 DCHECK_EQ(self, Thread::Current()); 1388 CHECK(!shut_down_); 1389 1390 if (VLOG_IS_ON(threads)) { 1391 std::ostringstream oss; 1392 self->ShortDump(oss); // We don't hold the mutator_lock_ yet and so cannot call Dump. 1393 LOG(INFO) << "ThreadList::Register() " << *self << "\n" << oss.str(); 1394 } 1395 1396 // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing 1397 // SuspendAll requests. 1398 MutexLock mu(self, *Locks::thread_list_lock_); 1399 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1400 CHECK_GE(suspend_all_count_, debug_suspend_all_count_); 1401 // Modify suspend count in increments of 1 to maintain invariants in ModifySuspendCount. While 1402 // this isn't particularly efficient the suspend counts are most commonly 0 or 1. 1403 for (int delta = debug_suspend_all_count_; delta > 0; delta--) { 1404 bool updated = self->ModifySuspendCount(self, +1, nullptr, SuspendReason::kForDebugger); 1405 DCHECK(updated); 1406 } 1407 for (int delta = suspend_all_count_ - debug_suspend_all_count_; delta > 0; delta--) { 1408 bool updated = self->ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal); 1409 DCHECK(updated); 1410 } 1411 CHECK(!Contains(self)); 1412 list_.push_back(self); 1413 if (kUseReadBarrier) { 1414 gc::collector::ConcurrentCopying* const cc = 1415 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector(); 1416 // Initialize according to the state of the CC collector. 1417 self->SetIsGcMarkingAndUpdateEntrypoints(cc->IsMarking()); 1418 if (cc->IsUsingReadBarrierEntrypoints()) { 1419 self->SetReadBarrierEntrypoints(); 1420 } 1421 self->SetWeakRefAccessEnabled(cc->IsWeakRefAccessEnabled()); 1422 } 1423 } 1424 1425 void ThreadList::Unregister(Thread* self) { 1426 DCHECK_EQ(self, Thread::Current()); 1427 CHECK_NE(self->GetState(), kRunnable); 1428 Locks::mutator_lock_->AssertNotHeld(self); 1429 1430 VLOG(threads) << "ThreadList::Unregister() " << *self; 1431 1432 { 1433 MutexLock mu(self, *Locks::thread_list_lock_); 1434 ++unregistering_count_; 1435 } 1436 1437 // Any time-consuming destruction, plus anything that can call back into managed code or 1438 // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what 1439 // causes the threads to join. It is important to do this after incrementing unregistering_count_ 1440 // since we want the runtime to wait for the daemon threads to exit before deleting the thread 1441 // list. 1442 self->Destroy(); 1443 1444 // If tracing, remember thread id and name before thread exits. 1445 Trace::StoreExitingThreadInfo(self); 1446 1447 uint32_t thin_lock_id = self->GetThreadId(); 1448 while (true) { 1449 // Remove and delete the Thread* while holding the thread_list_lock_ and 1450 // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended. 1451 // Note: deliberately not using MutexLock that could hold a stale self pointer. 1452 MutexLock mu(self, *Locks::thread_list_lock_); 1453 if (!Contains(self)) { 1454 std::string thread_name; 1455 self->GetThreadName(thread_name); 1456 std::ostringstream os; 1457 DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr); 1458 LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str(); 1459 break; 1460 } else { 1461 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1462 if (!self->IsSuspended()) { 1463 list_.remove(self); 1464 break; 1465 } 1466 } 1467 // We failed to remove the thread due to a suspend request, loop and try again. 1468 } 1469 delete self; 1470 1471 // Release the thread ID after the thread is finished and deleted to avoid cases where we can 1472 // temporarily have multiple threads with the same thread id. When this occurs, it causes 1473 // problems in FindThreadByThreadId / SuspendThreadByThreadId. 1474 ReleaseThreadId(nullptr, thin_lock_id); 1475 1476 // Clear the TLS data, so that the underlying native thread is recognizably detached. 1477 // (It may wish to reattach later.) 1478 #ifdef ART_TARGET_ANDROID 1479 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr; 1480 #else 1481 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self"); 1482 #endif 1483 1484 // Signal that a thread just detached. 1485 MutexLock mu(nullptr, *Locks::thread_list_lock_); 1486 --unregistering_count_; 1487 Locks::thread_exit_cond_->Broadcast(nullptr); 1488 } 1489 1490 void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) { 1491 for (const auto& thread : list_) { 1492 callback(thread, context); 1493 } 1494 } 1495 1496 void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) { 1497 Thread* const self = Thread::Current(); 1498 std::vector<Thread*> threads_to_visit; 1499 1500 // Tell threads to suspend and copy them into list. 1501 { 1502 MutexLock mu(self, *Locks::thread_list_lock_); 1503 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1504 for (Thread* thread : list_) { 1505 bool suspended = thread->ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal); 1506 DCHECK(suspended); 1507 if (thread == self || thread->IsSuspended()) { 1508 threads_to_visit.push_back(thread); 1509 } else { 1510 bool resumed = thread->ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); 1511 DCHECK(resumed); 1512 } 1513 } 1514 } 1515 1516 // Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock 1517 // order violations. 1518 for (Thread* thread : threads_to_visit) { 1519 thread->VisitRoots(visitor, kVisitRootFlagAllRoots); 1520 } 1521 1522 // Restore suspend counts. 1523 { 1524 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1525 for (Thread* thread : threads_to_visit) { 1526 bool updated = thread->ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); 1527 DCHECK(updated); 1528 } 1529 } 1530 } 1531 1532 void ThreadList::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const { 1533 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 1534 for (const auto& thread : list_) { 1535 thread->VisitRoots(visitor, flags); 1536 } 1537 } 1538 1539 uint32_t ThreadList::AllocThreadId(Thread* self) { 1540 MutexLock mu(self, *Locks::allocated_thread_ids_lock_); 1541 for (size_t i = 0; i < allocated_ids_.size(); ++i) { 1542 if (!allocated_ids_[i]) { 1543 allocated_ids_.set(i); 1544 return i + 1; // Zero is reserved to mean "invalid". 1545 } 1546 } 1547 LOG(FATAL) << "Out of internal thread ids"; 1548 return 0; 1549 } 1550 1551 void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) { 1552 MutexLock mu(self, *Locks::allocated_thread_ids_lock_); 1553 --id; // Zero is reserved to mean "invalid". 1554 DCHECK(allocated_ids_[id]) << id; 1555 allocated_ids_.reset(id); 1556 } 1557 1558 ScopedSuspendAll::ScopedSuspendAll(const char* cause, bool long_suspend) { 1559 Runtime::Current()->GetThreadList()->SuspendAll(cause, long_suspend); 1560 } 1561 1562 ScopedSuspendAll::~ScopedSuspendAll() { 1563 Runtime::Current()->GetThreadList()->ResumeAll(); 1564 } 1565 1566 } // namespace art 1567