1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "thread_list.h" 18 19 #include <backtrace/BacktraceMap.h> 20 #include <dirent.h> 21 #include <ScopedLocalRef.h> 22 #include <ScopedUtfChars.h> 23 #include <sys/types.h> 24 #include <unistd.h> 25 26 #include <sstream> 27 28 #include "base/histogram-inl.h" 29 #include "base/mutex-inl.h" 30 #include "base/systrace.h" 31 #include "base/time_utils.h" 32 #include "base/timing_logger.h" 33 #include "debugger.h" 34 #include "gc/collector/concurrent_copying.h" 35 #include "jni_internal.h" 36 #include "lock_word.h" 37 #include "monitor.h" 38 #include "scoped_thread_state_change.h" 39 #include "thread.h" 40 #include "trace.h" 41 #include "well_known_classes.h" 42 43 #if ART_USE_FUTEXES 44 #include "linux/futex.h" 45 #include "sys/syscall.h" 46 #ifndef SYS_futex 47 #define SYS_futex __NR_futex 48 #endif 49 #endif // ART_USE_FUTEXES 50 51 namespace art { 52 53 static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5); 54 static constexpr uint64_t kThreadSuspendTimeoutMs = 30 * 1000; // 30s. 55 // Use 0 since we want to yield to prevent blocking for an unpredictable amount of time. 56 static constexpr useconds_t kThreadSuspendInitialSleepUs = 0; 57 static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000; 58 static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000; 59 60 // Whether we should try to dump the native stack of unattached threads. See commit ed8b723 for 61 // some history. 62 // Turned off again. b/29248079 63 static constexpr bool kDumpUnattachedThreadNativeStack = false; 64 65 ThreadList::ThreadList() 66 : suspend_all_count_(0), 67 debug_suspend_all_count_(0), 68 unregistering_count_(0), 69 suspend_all_historam_("suspend all histogram", 16, 64), 70 long_suspend_(false) { 71 CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U))); 72 } 73 74 ThreadList::~ThreadList() { 75 ScopedTrace trace(__PRETTY_FUNCTION__); 76 // Detach the current thread if necessary. If we failed to start, there might not be any threads. 77 // We need to detach the current thread here in case there's another thread waiting to join with 78 // us. 79 bool contains = false; 80 Thread* self = Thread::Current(); 81 { 82 MutexLock mu(self, *Locks::thread_list_lock_); 83 contains = Contains(self); 84 } 85 if (contains) { 86 Runtime::Current()->DetachCurrentThread(); 87 } 88 WaitForOtherNonDaemonThreadsToExit(); 89 // Disable GC and wait for GC to complete in case there are still daemon threads doing 90 // allocations. 91 gc::Heap* const heap = Runtime::Current()->GetHeap(); 92 heap->DisableGCForShutdown(); 93 // In case a GC is in progress, wait for it to finish. 94 heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current()); 95 // TODO: there's an unaddressed race here where a thread may attach during shutdown, see 96 // Thread::Init. 97 SuspendAllDaemonThreadsForShutdown(); 98 } 99 100 bool ThreadList::Contains(Thread* thread) { 101 return find(list_.begin(), list_.end(), thread) != list_.end(); 102 } 103 104 bool ThreadList::Contains(pid_t tid) { 105 for (const auto& thread : list_) { 106 if (thread->GetTid() == tid) { 107 return true; 108 } 109 } 110 return false; 111 } 112 113 pid_t ThreadList::GetLockOwner() { 114 return Locks::thread_list_lock_->GetExclusiveOwnerTid(); 115 } 116 117 void ThreadList::DumpNativeStacks(std::ostream& os) { 118 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 119 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid())); 120 for (const auto& thread : list_) { 121 os << "DUMPING THREAD " << thread->GetTid() << "\n"; 122 DumpNativeStack(os, thread->GetTid(), map.get(), "\t"); 123 os << "\n"; 124 } 125 } 126 127 void ThreadList::DumpForSigQuit(std::ostream& os) { 128 { 129 ScopedObjectAccess soa(Thread::Current()); 130 // Only print if we have samples. 131 if (suspend_all_historam_.SampleSize() > 0) { 132 Histogram<uint64_t>::CumulativeData data; 133 suspend_all_historam_.CreateHistogram(&data); 134 suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data); // Dump time to suspend. 135 } 136 } 137 bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit(); 138 Dump(os, dump_native_stack); 139 DumpUnattachedThreads(os, dump_native_stack); 140 } 141 142 static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack) 143 NO_THREAD_SAFETY_ANALYSIS { 144 // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should 145 // refactor DumpState to avoid skipping analysis. 146 Thread::DumpState(os, nullptr, tid); 147 DumpKernelStack(os, tid, " kernel: ", false); 148 if (dump_native_stack && kDumpUnattachedThreadNativeStack) { 149 DumpNativeStack(os, tid, nullptr, " native: "); 150 } 151 os << "\n"; 152 } 153 154 void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) { 155 DIR* d = opendir("/proc/self/task"); 156 if (!d) { 157 return; 158 } 159 160 Thread* self = Thread::Current(); 161 dirent* e; 162 while ((e = readdir(d)) != nullptr) { 163 char* end; 164 pid_t tid = strtol(e->d_name, &end, 10); 165 if (!*end) { 166 bool contains; 167 { 168 MutexLock mu(self, *Locks::thread_list_lock_); 169 contains = Contains(tid); 170 } 171 if (!contains) { 172 DumpUnattachedThread(os, tid, dump_native_stack); 173 } 174 } 175 } 176 closedir(d); 177 } 178 179 // Dump checkpoint timeout in milliseconds. Larger amount on the target, since the device could be 180 // overloaded with ANR dumps. 181 static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000; 182 183 // A closure used by Thread::Dump. 184 class DumpCheckpoint FINAL : public Closure { 185 public: 186 DumpCheckpoint(std::ostream* os, bool dump_native_stack) 187 : os_(os), 188 barrier_(0), 189 backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr), 190 dump_native_stack_(dump_native_stack) {} 191 192 void Run(Thread* thread) OVERRIDE { 193 // Note thread and self may not be equal if thread was already suspended at the point of the 194 // request. 195 Thread* self = Thread::Current(); 196 std::ostringstream local_os; 197 { 198 ScopedObjectAccess soa(self); 199 thread->Dump(local_os, dump_native_stack_, backtrace_map_.get()); 200 } 201 local_os << "\n"; 202 { 203 // Use the logging lock to ensure serialization when writing to the common ostream. 204 MutexLock mu(self, *Locks::logging_lock_); 205 *os_ << local_os.str(); 206 } 207 barrier_.Pass(self); 208 } 209 210 void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) { 211 Thread* self = Thread::Current(); 212 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 213 bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout); 214 if (timed_out) { 215 // Avoid a recursive abort. 216 LOG((kIsDebugBuild && (gAborting == 0)) ? FATAL : ERROR) 217 << "Unexpected time out during dump checkpoint."; 218 } 219 } 220 221 private: 222 // The common stream that will accumulate all the dumps. 223 std::ostream* const os_; 224 // The barrier to be passed through and for the requestor to wait upon. 225 Barrier barrier_; 226 // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately. 227 std::unique_ptr<BacktraceMap> backtrace_map_; 228 // Whether we should dump the native stack. 229 const bool dump_native_stack_; 230 }; 231 232 void ThreadList::Dump(std::ostream& os, bool dump_native_stack) { 233 { 234 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 235 os << "DALVIK THREADS (" << list_.size() << "):\n"; 236 } 237 DumpCheckpoint checkpoint(&os, dump_native_stack); 238 size_t threads_running_checkpoint = RunCheckpoint(&checkpoint); 239 if (threads_running_checkpoint != 0) { 240 checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint); 241 } 242 } 243 244 void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) { 245 MutexLock mu(self, *Locks::thread_list_lock_); 246 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 247 for (const auto& thread : list_) { 248 if (thread != ignore1 && thread != ignore2) { 249 CHECK(thread->IsSuspended()) 250 << "\nUnsuspended thread: <<" << *thread << "\n" 251 << "self: <<" << *Thread::Current(); 252 } 253 } 254 } 255 256 #if HAVE_TIMED_RWLOCK 257 // Attempt to rectify locks so that we dump thread list with required locks before exiting. 258 NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() { 259 Runtime* runtime = Runtime::Current(); 260 std::ostringstream ss; 261 ss << "Thread suspend timeout\n"; 262 Locks::mutator_lock_->Dump(ss); 263 ss << "\n"; 264 runtime->GetThreadList()->Dump(ss); 265 LOG(FATAL) << ss.str(); 266 exit(0); 267 } 268 #endif 269 270 // Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an 271 // individual thread requires polling. delay_us is the requested sleep wait. If delay_us is 0 then 272 // we use sched_yield instead of calling usleep. 273 static void ThreadSuspendSleep(useconds_t delay_us) { 274 if (delay_us == 0) { 275 sched_yield(); 276 } else { 277 usleep(delay_us); 278 } 279 } 280 281 size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) { 282 Thread* self = Thread::Current(); 283 Locks::mutator_lock_->AssertNotExclusiveHeld(self); 284 Locks::thread_list_lock_->AssertNotHeld(self); 285 Locks::thread_suspend_count_lock_->AssertNotHeld(self); 286 287 std::vector<Thread*> suspended_count_modified_threads; 288 size_t count = 0; 289 { 290 // Call a checkpoint function for each thread, threads which are suspend get their checkpoint 291 // manually called. 292 MutexLock mu(self, *Locks::thread_list_lock_); 293 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 294 count = list_.size(); 295 for (const auto& thread : list_) { 296 if (thread != self) { 297 while (true) { 298 if (thread->RequestCheckpoint(checkpoint_function)) { 299 // This thread will run its checkpoint some time in the near future. 300 break; 301 } else { 302 // We are probably suspended, try to make sure that we stay suspended. 303 // The thread switched back to runnable. 304 if (thread->GetState() == kRunnable) { 305 // Spurious fail, try again. 306 continue; 307 } 308 thread->ModifySuspendCount(self, +1, nullptr, false); 309 suspended_count_modified_threads.push_back(thread); 310 break; 311 } 312 } 313 } 314 } 315 } 316 317 // Run the checkpoint on ourself while we wait for threads to suspend. 318 checkpoint_function->Run(self); 319 320 // Run the checkpoint on the suspended threads. 321 for (const auto& thread : suspended_count_modified_threads) { 322 if (!thread->IsSuspended()) { 323 if (ATRACE_ENABLED()) { 324 std::ostringstream oss; 325 thread->ShortDump(oss); 326 ATRACE_BEGIN((std::string("Waiting for suspension of thread ") + oss.str()).c_str()); 327 } 328 // Busy wait until the thread is suspended. 329 const uint64_t start_time = NanoTime(); 330 do { 331 ThreadSuspendSleep(kThreadSuspendInitialSleepUs); 332 } while (!thread->IsSuspended()); 333 const uint64_t total_delay = NanoTime() - start_time; 334 // Shouldn't need to wait for longer than 1000 microseconds. 335 constexpr uint64_t kLongWaitThreshold = MsToNs(1); 336 ATRACE_END(); 337 if (UNLIKELY(total_delay > kLongWaitThreshold)) { 338 LOG(WARNING) << "Long wait of " << PrettyDuration(total_delay) << " for " 339 << *thread << " suspension!"; 340 } 341 } 342 // We know for sure that the thread is suspended at this point. 343 checkpoint_function->Run(thread); 344 { 345 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 346 thread->ModifySuspendCount(self, -1, nullptr, false); 347 } 348 } 349 350 { 351 // Imitate ResumeAll, threads may be waiting on Thread::resume_cond_ since we raised their 352 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast. 353 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 354 Thread::resume_cond_->Broadcast(self); 355 } 356 357 return count; 358 } 359 360 // Request that a checkpoint function be run on all active (non-suspended) 361 // threads. Returns the number of successful requests. 362 size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) { 363 Thread* self = Thread::Current(); 364 Locks::mutator_lock_->AssertNotExclusiveHeld(self); 365 Locks::thread_list_lock_->AssertNotHeld(self); 366 Locks::thread_suspend_count_lock_->AssertNotHeld(self); 367 CHECK_NE(self->GetState(), kRunnable); 368 369 size_t count = 0; 370 { 371 // Call a checkpoint function for each non-suspended thread. 372 MutexLock mu(self, *Locks::thread_list_lock_); 373 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 374 for (const auto& thread : list_) { 375 if (thread != self) { 376 if (thread->RequestCheckpoint(checkpoint_function)) { 377 // This thread will run its checkpoint some time in the near future. 378 count++; 379 } 380 } 381 } 382 } 383 384 // Return the number of threads that will run the checkpoint function. 385 return count; 386 } 387 388 // A checkpoint/suspend-all hybrid to switch thread roots from 389 // from-space to to-space refs. Used to synchronize threads at a point 390 // to mark the initiation of marking while maintaining the to-space 391 // invariant. 392 size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, 393 Closure* flip_callback, 394 gc::collector::GarbageCollector* collector) { 395 TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings()); 396 const uint64_t start_time = NanoTime(); 397 Thread* self = Thread::Current(); 398 Locks::mutator_lock_->AssertNotHeld(self); 399 Locks::thread_list_lock_->AssertNotHeld(self); 400 Locks::thread_suspend_count_lock_->AssertNotHeld(self); 401 CHECK_NE(self->GetState(), kRunnable); 402 403 SuspendAllInternal(self, self, nullptr); 404 405 // Run the flip callback for the collector. 406 Locks::mutator_lock_->ExclusiveLock(self); 407 flip_callback->Run(self); 408 Locks::mutator_lock_->ExclusiveUnlock(self); 409 collector->RegisterPause(NanoTime() - start_time); 410 411 // Resume runnable threads. 412 std::vector<Thread*> runnable_threads; 413 std::vector<Thread*> other_threads; 414 { 415 MutexLock mu(self, *Locks::thread_list_lock_); 416 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 417 --suspend_all_count_; 418 for (const auto& thread : list_) { 419 if (thread == self) { 420 continue; 421 } 422 // Set the flip function for both runnable and suspended threads 423 // because Thread::DumpState/DumpJavaStack() (invoked by a 424 // checkpoint) may cause the flip function to be run for a 425 // runnable/suspended thread before a runnable threads runs it 426 // for itself or we run it for a suspended thread below. 427 thread->SetFlipFunction(thread_flip_visitor); 428 if (thread->IsSuspendedAtSuspendCheck()) { 429 // The thread will resume right after the broadcast. 430 thread->ModifySuspendCount(self, -1, nullptr, false); 431 runnable_threads.push_back(thread); 432 } else { 433 other_threads.push_back(thread); 434 } 435 } 436 Thread::resume_cond_->Broadcast(self); 437 } 438 439 // Run the closure on the other threads and let them resume. 440 { 441 ReaderMutexLock mu(self, *Locks::mutator_lock_); 442 for (const auto& thread : other_threads) { 443 Closure* flip_func = thread->GetFlipFunction(); 444 if (flip_func != nullptr) { 445 flip_func->Run(thread); 446 } 447 } 448 // Run it for self. 449 thread_flip_visitor->Run(self); 450 } 451 452 // Resume other threads. 453 { 454 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 455 for (const auto& thread : other_threads) { 456 thread->ModifySuspendCount(self, -1, nullptr, false); 457 } 458 Thread::resume_cond_->Broadcast(self); 459 } 460 461 return runnable_threads.size() + other_threads.size() + 1; // +1 for self. 462 } 463 464 void ThreadList::SuspendAll(const char* cause, bool long_suspend) { 465 Thread* self = Thread::Current(); 466 467 if (self != nullptr) { 468 VLOG(threads) << *self << " SuspendAll for " << cause << " starting..."; 469 } else { 470 VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting..."; 471 } 472 { 473 ScopedTrace trace("Suspending mutator threads"); 474 const uint64_t start_time = NanoTime(); 475 476 SuspendAllInternal(self, self); 477 // All threads are known to have suspended (but a thread may still own the mutator lock) 478 // Make sure this thread grabs exclusive access to the mutator lock and its protected data. 479 #if HAVE_TIMED_RWLOCK 480 while (true) { 481 if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) { 482 break; 483 } else if (!long_suspend_) { 484 // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this 485 // could result in a thread suspend timeout. 486 // Timeout if we wait more than kThreadSuspendTimeoutMs seconds. 487 UnsafeLogFatalForThreadSuspendAllTimeout(); 488 } 489 } 490 #else 491 Locks::mutator_lock_->ExclusiveLock(self); 492 #endif 493 494 long_suspend_ = long_suspend; 495 496 const uint64_t end_time = NanoTime(); 497 const uint64_t suspend_time = end_time - start_time; 498 suspend_all_historam_.AdjustAndAddValue(suspend_time); 499 if (suspend_time > kLongThreadSuspendThreshold) { 500 LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time); 501 } 502 503 if (kDebugLocking) { 504 // Debug check that all threads are suspended. 505 AssertThreadsAreSuspended(self, self); 506 } 507 } 508 ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str()); 509 510 if (self != nullptr) { 511 VLOG(threads) << *self << " SuspendAll complete"; 512 } else { 513 VLOG(threads) << "Thread[null] SuspendAll complete"; 514 } 515 } 516 517 // Ensures all threads running Java suspend and that those not running Java don't start. 518 // Debugger thread might be set to kRunnable for a short period of time after the 519 // SuspendAllInternal. This is safe because it will be set back to suspended state before 520 // the SuspendAll returns. 521 void ThreadList::SuspendAllInternal(Thread* self, 522 Thread* ignore1, 523 Thread* ignore2, 524 bool debug_suspend) { 525 Locks::mutator_lock_->AssertNotExclusiveHeld(self); 526 Locks::thread_list_lock_->AssertNotHeld(self); 527 Locks::thread_suspend_count_lock_->AssertNotHeld(self); 528 if (kDebugLocking && self != nullptr) { 529 CHECK_NE(self->GetState(), kRunnable); 530 } 531 532 // First request that all threads suspend, then wait for them to suspend before 533 // returning. This suspension scheme also relies on other behaviour: 534 // 1. Threads cannot be deleted while they are suspended or have a suspend- 535 // request flag set - (see Unregister() below). 536 // 2. When threads are created, they are created in a suspended state (actually 537 // kNative) and will never begin executing Java code without first checking 538 // the suspend-request flag. 539 540 // The atomic counter for number of threads that need to pass the barrier. 541 AtomicInteger pending_threads; 542 uint32_t num_ignored = 0; 543 if (ignore1 != nullptr) { 544 ++num_ignored; 545 } 546 if (ignore2 != nullptr && ignore1 != ignore2) { 547 ++num_ignored; 548 } 549 { 550 MutexLock mu(self, *Locks::thread_list_lock_); 551 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 552 // Update global suspend all state for attaching threads. 553 ++suspend_all_count_; 554 if (debug_suspend) 555 ++debug_suspend_all_count_; 556 pending_threads.StoreRelaxed(list_.size() - num_ignored); 557 // Increment everybody's suspend count (except those that should be ignored). 558 for (const auto& thread : list_) { 559 if (thread == ignore1 || thread == ignore2) { 560 continue; 561 } 562 VLOG(threads) << "requesting thread suspend: " << *thread; 563 while (true) { 564 if (LIKELY(thread->ModifySuspendCount(self, +1, &pending_threads, debug_suspend))) { 565 break; 566 } else { 567 // Failure means the list of active_suspend_barriers is full, we should release the 568 // thread_suspend_count_lock_ (to avoid deadlock) and wait till the target thread has 569 // executed Thread::PassActiveSuspendBarriers(). Note that we could not simply wait for 570 // the thread to change to a suspended state, because it might need to run checkpoint 571 // function before the state change, which also needs thread_suspend_count_lock_. 572 573 // This is very unlikely to happen since more than kMaxSuspendBarriers threads need to 574 // execute SuspendAllInternal() simultaneously, and target thread stays in kRunnable 575 // in the mean time. 576 Locks::thread_suspend_count_lock_->ExclusiveUnlock(self); 577 NanoSleep(100000); 578 Locks::thread_suspend_count_lock_->ExclusiveLock(self); 579 } 580 } 581 582 // Must install the pending_threads counter first, then check thread->IsSuspend() and clear 583 // the counter. Otherwise there's a race with Thread::TransitionFromRunnableToSuspended() 584 // that can lead a thread to miss a call to PassActiveSuspendBarriers(). 585 if (thread->IsSuspended()) { 586 // Only clear the counter for the current thread. 587 thread->ClearSuspendBarrier(&pending_threads); 588 pending_threads.FetchAndSubSequentiallyConsistent(1); 589 } 590 } 591 } 592 593 // Wait for the barrier to be passed by all runnable threads. This wait 594 // is done with a timeout so that we can detect problems. 595 #if ART_USE_FUTEXES 596 timespec wait_timeout; 597 InitTimeSpec(true, CLOCK_MONOTONIC, 10000, 0, &wait_timeout); 598 #endif 599 while (true) { 600 int32_t cur_val = pending_threads.LoadRelaxed(); 601 if (LIKELY(cur_val > 0)) { 602 #if ART_USE_FUTEXES 603 if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) { 604 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. 605 if ((errno != EAGAIN) && (errno != EINTR)) { 606 if (errno == ETIMEDOUT) { 607 LOG(kIsDebugBuild ? FATAL : ERROR) << "Unexpected time out during suspend all."; 608 } else { 609 PLOG(FATAL) << "futex wait failed for SuspendAllInternal()"; 610 } 611 } 612 } else { 613 cur_val = pending_threads.LoadRelaxed(); 614 CHECK_EQ(cur_val, 0); 615 break; 616 } 617 #else 618 // Spin wait. This is likely to be slow, but on most architecture ART_USE_FUTEXES is set. 619 #endif 620 } else { 621 CHECK_EQ(cur_val, 0); 622 break; 623 } 624 } 625 } 626 627 void ThreadList::ResumeAll() { 628 Thread* self = Thread::Current(); 629 630 if (self != nullptr) { 631 VLOG(threads) << *self << " ResumeAll starting"; 632 } else { 633 VLOG(threads) << "Thread[null] ResumeAll starting"; 634 } 635 636 ATRACE_END(); 637 638 ScopedTrace trace("Resuming mutator threads"); 639 640 if (kDebugLocking) { 641 // Debug check that all threads are suspended. 642 AssertThreadsAreSuspended(self, self); 643 } 644 645 long_suspend_ = false; 646 647 Locks::mutator_lock_->ExclusiveUnlock(self); 648 { 649 MutexLock mu(self, *Locks::thread_list_lock_); 650 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 651 // Update global suspend all state for attaching threads. 652 --suspend_all_count_; 653 // Decrement the suspend counts for all threads. 654 for (const auto& thread : list_) { 655 if (thread == self) { 656 continue; 657 } 658 thread->ModifySuspendCount(self, -1, nullptr, false); 659 } 660 661 // Broadcast a notification to all suspended threads, some or all of 662 // which may choose to wake up. No need to wait for them. 663 if (self != nullptr) { 664 VLOG(threads) << *self << " ResumeAll waking others"; 665 } else { 666 VLOG(threads) << "Thread[null] ResumeAll waking others"; 667 } 668 Thread::resume_cond_->Broadcast(self); 669 } 670 671 if (self != nullptr) { 672 VLOG(threads) << *self << " ResumeAll complete"; 673 } else { 674 VLOG(threads) << "Thread[null] ResumeAll complete"; 675 } 676 } 677 678 void ThreadList::Resume(Thread* thread, bool for_debugger) { 679 // This assumes there was an ATRACE_BEGIN when we suspended the thread. 680 ATRACE_END(); 681 682 Thread* self = Thread::Current(); 683 DCHECK_NE(thread, self); 684 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..." 685 << (for_debugger ? " (debugger)" : ""); 686 687 { 688 // To check Contains. 689 MutexLock mu(self, *Locks::thread_list_lock_); 690 // To check IsSuspended. 691 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 692 DCHECK(thread->IsSuspended()); 693 if (!Contains(thread)) { 694 // We only expect threads within the thread-list to have been suspended otherwise we can't 695 // stop such threads from delete-ing themselves. 696 LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread) 697 << ") thread not within thread list"; 698 return; 699 } 700 thread->ModifySuspendCount(self, -1, nullptr, for_debugger); 701 } 702 703 { 704 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") waking others"; 705 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 706 Thread::resume_cond_->Broadcast(self); 707 } 708 709 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete"; 710 } 711 712 static void ThreadSuspendByPeerWarning(Thread* self, 713 LogSeverity severity, 714 const char* message, 715 jobject peer) { 716 JNIEnvExt* env = self->GetJniEnv(); 717 ScopedLocalRef<jstring> 718 scoped_name_string(env, static_cast<jstring>(env->GetObjectField( 719 peer, WellKnownClasses::java_lang_Thread_name))); 720 ScopedUtfChars scoped_name_chars(env, scoped_name_string.get()); 721 if (scoped_name_chars.c_str() == nullptr) { 722 LOG(severity) << message << ": " << peer; 723 env->ExceptionClear(); 724 } else { 725 LOG(severity) << message << ": " << peer << ":" << scoped_name_chars.c_str(); 726 } 727 } 728 729 Thread* ThreadList::SuspendThreadByPeer(jobject peer, 730 bool request_suspension, 731 bool debug_suspension, 732 bool* timed_out) { 733 const uint64_t start_time = NanoTime(); 734 useconds_t sleep_us = kThreadSuspendInitialSleepUs; 735 *timed_out = false; 736 Thread* const self = Thread::Current(); 737 Thread* suspended_thread = nullptr; 738 VLOG(threads) << "SuspendThreadByPeer starting"; 739 while (true) { 740 Thread* thread; 741 { 742 // Note: this will transition to runnable and potentially suspend. We ensure only one thread 743 // is requesting another suspend, to avoid deadlock, by requiring this function be called 744 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather 745 // than request thread suspension, to avoid potential cycles in threads requesting each other 746 // suspend. 747 ScopedObjectAccess soa(self); 748 MutexLock thread_list_mu(self, *Locks::thread_list_lock_); 749 thread = Thread::FromManagedThread(soa, peer); 750 if (thread == nullptr) { 751 if (suspended_thread != nullptr) { 752 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); 753 // If we incremented the suspend count but the thread reset its peer, we need to 754 // re-decrement it since it is shutting down and may deadlock the runtime in 755 // ThreadList::WaitForOtherNonDaemonThreadsToExit. 756 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension); 757 } 758 ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer); 759 return nullptr; 760 } 761 if (!Contains(thread)) { 762 CHECK(suspended_thread == nullptr); 763 VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: " 764 << reinterpret_cast<void*>(thread); 765 return nullptr; 766 } 767 VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread; 768 { 769 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); 770 if (request_suspension) { 771 if (self->GetSuspendCount() > 0) { 772 // We hold the suspend count lock but another thread is trying to suspend us. Its not 773 // safe to try to suspend another thread in case we get a cycle. Start the loop again 774 // which will allow this thread to be suspended. 775 continue; 776 } 777 CHECK(suspended_thread == nullptr); 778 suspended_thread = thread; 779 suspended_thread->ModifySuspendCount(self, +1, nullptr, debug_suspension); 780 request_suspension = false; 781 } else { 782 // If the caller isn't requesting suspension, a suspension should have already occurred. 783 CHECK_GT(thread->GetSuspendCount(), 0); 784 } 785 // IsSuspended on the current thread will fail as the current thread is changed into 786 // Runnable above. As the suspend count is now raised if this is the current thread 787 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler 788 // to just explicitly handle the current thread in the callers to this code. 789 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger"; 790 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend 791 // count, or else we've waited and it has self suspended) or is the current thread, we're 792 // done. 793 if (thread->IsSuspended()) { 794 VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread; 795 if (ATRACE_ENABLED()) { 796 std::string name; 797 thread->GetThreadName(name); 798 ATRACE_BEGIN(StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(), 799 peer).c_str()); 800 } 801 return thread; 802 } 803 const uint64_t total_delay = NanoTime() - start_time; 804 if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) { 805 ThreadSuspendByPeerWarning(self, FATAL, "Thread suspension timed out", peer); 806 if (suspended_thread != nullptr) { 807 CHECK_EQ(suspended_thread, thread); 808 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension); 809 } 810 *timed_out = true; 811 return nullptr; 812 } else if (sleep_us == 0 && 813 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) { 814 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent 815 // excessive CPU usage. 816 sleep_us = kThreadSuspendMaxYieldUs / 2; 817 } 818 } 819 // Release locks and come out of runnable state. 820 } 821 VLOG(threads) << "SuspendThreadByPeer waiting to allow thread chance to suspend"; 822 ThreadSuspendSleep(sleep_us); 823 // This may stay at 0 if sleep_us == 0, but this is WAI since we want to avoid using usleep at 824 // all if possible. This shouldn't be an issue since time to suspend should always be small. 825 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs); 826 } 827 } 828 829 static void ThreadSuspendByThreadIdWarning(LogSeverity severity, 830 const char* message, 831 uint32_t thread_id) { 832 LOG(severity) << StringPrintf("%s: %d", message, thread_id); 833 } 834 835 Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, 836 bool debug_suspension, 837 bool* timed_out) { 838 const uint64_t start_time = NanoTime(); 839 useconds_t sleep_us = kThreadSuspendInitialSleepUs; 840 *timed_out = false; 841 Thread* suspended_thread = nullptr; 842 Thread* const self = Thread::Current(); 843 CHECK_NE(thread_id, kInvalidThreadId); 844 VLOG(threads) << "SuspendThreadByThreadId starting"; 845 while (true) { 846 { 847 // Note: this will transition to runnable and potentially suspend. We ensure only one thread 848 // is requesting another suspend, to avoid deadlock, by requiring this function be called 849 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather 850 // than request thread suspension, to avoid potential cycles in threads requesting each other 851 // suspend. 852 ScopedObjectAccess soa(self); 853 MutexLock thread_list_mu(self, *Locks::thread_list_lock_); 854 Thread* thread = nullptr; 855 for (const auto& it : list_) { 856 if (it->GetThreadId() == thread_id) { 857 thread = it; 858 break; 859 } 860 } 861 if (thread == nullptr) { 862 CHECK(suspended_thread == nullptr) << "Suspended thread " << suspended_thread 863 << " no longer in thread list"; 864 // There's a race in inflating a lock and the owner giving up ownership and then dying. 865 ThreadSuspendByThreadIdWarning(WARNING, "No such thread id for suspend", thread_id); 866 return nullptr; 867 } 868 VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread; 869 DCHECK(Contains(thread)); 870 { 871 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); 872 if (suspended_thread == nullptr) { 873 if (self->GetSuspendCount() > 0) { 874 // We hold the suspend count lock but another thread is trying to suspend us. Its not 875 // safe to try to suspend another thread in case we get a cycle. Start the loop again 876 // which will allow this thread to be suspended. 877 continue; 878 } 879 thread->ModifySuspendCount(self, +1, nullptr, debug_suspension); 880 suspended_thread = thread; 881 } else { 882 CHECK_EQ(suspended_thread, thread); 883 // If the caller isn't requesting suspension, a suspension should have already occurred. 884 CHECK_GT(thread->GetSuspendCount(), 0); 885 } 886 // IsSuspended on the current thread will fail as the current thread is changed into 887 // Runnable above. As the suspend count is now raised if this is the current thread 888 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler 889 // to just explicitly handle the current thread in the callers to this code. 890 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger"; 891 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend 892 // count, or else we've waited and it has self suspended) or is the current thread, we're 893 // done. 894 if (thread->IsSuspended()) { 895 if (ATRACE_ENABLED()) { 896 std::string name; 897 thread->GetThreadName(name); 898 ATRACE_BEGIN(StringPrintf("SuspendThreadByThreadId suspended %s id=%d", 899 name.c_str(), thread_id).c_str()); 900 } 901 VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread; 902 return thread; 903 } 904 const uint64_t total_delay = NanoTime() - start_time; 905 if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) { 906 ThreadSuspendByThreadIdWarning(WARNING, "Thread suspension timed out", thread_id); 907 if (suspended_thread != nullptr) { 908 thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension); 909 } 910 *timed_out = true; 911 return nullptr; 912 } else if (sleep_us == 0 && 913 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) { 914 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent 915 // excessive CPU usage. 916 sleep_us = kThreadSuspendMaxYieldUs / 2; 917 } 918 } 919 // Release locks and come out of runnable state. 920 } 921 VLOG(threads) << "SuspendThreadByThreadId waiting to allow thread chance to suspend"; 922 ThreadSuspendSleep(sleep_us); 923 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs); 924 } 925 } 926 927 Thread* ThreadList::FindThreadByThreadId(uint32_t thread_id) { 928 for (const auto& thread : list_) { 929 if (thread->GetThreadId() == thread_id) { 930 return thread; 931 } 932 } 933 return nullptr; 934 } 935 936 void ThreadList::SuspendAllForDebugger() { 937 Thread* self = Thread::Current(); 938 Thread* debug_thread = Dbg::GetDebugThread(); 939 940 VLOG(threads) << *self << " SuspendAllForDebugger starting..."; 941 942 SuspendAllInternal(self, self, debug_thread, true); 943 // Block on the mutator lock until all Runnable threads release their share of access then 944 // immediately unlock again. 945 #if HAVE_TIMED_RWLOCK 946 // Timeout if we wait more than 30 seconds. 947 if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) { 948 UnsafeLogFatalForThreadSuspendAllTimeout(); 949 } else { 950 Locks::mutator_lock_->ExclusiveUnlock(self); 951 } 952 #else 953 Locks::mutator_lock_->ExclusiveLock(self); 954 Locks::mutator_lock_->ExclusiveUnlock(self); 955 #endif 956 // Disabled for the following race condition: 957 // Thread 1 calls SuspendAllForDebugger, gets preempted after pulsing the mutator lock. 958 // Thread 2 calls SuspendAll and SetStateUnsafe (perhaps from Dbg::Disconnected). 959 // Thread 1 fails assertion that all threads are suspended due to thread 2 being in a runnable 960 // state (from SetStateUnsafe). 961 // AssertThreadsAreSuspended(self, self, debug_thread); 962 963 VLOG(threads) << *self << " SuspendAllForDebugger complete"; 964 } 965 966 void ThreadList::SuspendSelfForDebugger() { 967 Thread* const self = Thread::Current(); 968 self->SetReadyForDebugInvoke(true); 969 970 // The debugger thread must not suspend itself due to debugger activity! 971 Thread* debug_thread = Dbg::GetDebugThread(); 972 CHECK(self != debug_thread); 973 CHECK_NE(self->GetState(), kRunnable); 974 Locks::mutator_lock_->AssertNotHeld(self); 975 976 // The debugger may have detached while we were executing an invoke request. In that case, we 977 // must not suspend ourself. 978 DebugInvokeReq* pReq = self->GetInvokeReq(); 979 const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive()); 980 if (!skip_thread_suspension) { 981 // Collisions with other suspends aren't really interesting. We want 982 // to ensure that we're the only one fiddling with the suspend count 983 // though. 984 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 985 self->ModifySuspendCount(self, +1, nullptr, true); 986 CHECK_GT(self->GetSuspendCount(), 0); 987 988 VLOG(threads) << *self << " self-suspending (debugger)"; 989 } else { 990 // We must no longer be subject to debugger suspension. 991 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 992 CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us"; 993 994 VLOG(threads) << *self << " not self-suspending because debugger detached during invoke"; 995 } 996 997 // If the debugger requested an invoke, we need to send the reply and clear the request. 998 if (pReq != nullptr) { 999 Dbg::FinishInvokeMethod(pReq); 1000 self->ClearDebugInvokeReq(); 1001 pReq = nullptr; // object has been deleted, clear it for safety. 1002 } 1003 1004 // Tell JDWP that we've completed suspension. The JDWP thread can't 1005 // tell us to resume before we're fully asleep because we hold the 1006 // suspend count lock. 1007 Dbg::ClearWaitForEventThread(); 1008 1009 { 1010 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1011 while (self->GetSuspendCount() != 0) { 1012 Thread::resume_cond_->Wait(self); 1013 if (self->GetSuspendCount() != 0) { 1014 // The condition was signaled but we're still suspended. This 1015 // can happen when we suspend then resume all threads to 1016 // update instrumentation or compute monitor info. This can 1017 // also happen if the debugger lets go while a SIGQUIT thread 1018 // dump event is pending (assuming SignalCatcher was resumed for 1019 // just long enough to try to grab the thread-suspend lock). 1020 VLOG(jdwp) << *self << " still suspended after undo " 1021 << "(suspend count=" << self->GetSuspendCount() << ", " 1022 << "debug suspend count=" << self->GetDebugSuspendCount() << ")"; 1023 } 1024 } 1025 CHECK_EQ(self->GetSuspendCount(), 0); 1026 } 1027 1028 self->SetReadyForDebugInvoke(false); 1029 VLOG(threads) << *self << " self-reviving (debugger)"; 1030 } 1031 1032 void ThreadList::ResumeAllForDebugger() { 1033 Thread* self = Thread::Current(); 1034 Thread* debug_thread = Dbg::GetDebugThread(); 1035 1036 VLOG(threads) << *self << " ResumeAllForDebugger starting..."; 1037 1038 // Threads can't resume if we exclusively hold the mutator lock. 1039 Locks::mutator_lock_->AssertNotExclusiveHeld(self); 1040 1041 { 1042 MutexLock thread_list_mu(self, *Locks::thread_list_lock_); 1043 { 1044 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); 1045 // Update global suspend all state for attaching threads. 1046 DCHECK_GE(suspend_all_count_, debug_suspend_all_count_); 1047 if (debug_suspend_all_count_ > 0) { 1048 --suspend_all_count_; 1049 --debug_suspend_all_count_; 1050 } else { 1051 // We've been asked to resume all threads without being asked to 1052 // suspend them all before. That may happen if a debugger tries 1053 // to resume some suspended threads (with suspend count == 1) 1054 // at once with a VirtualMachine.Resume command. Let's print a 1055 // warning. 1056 LOG(WARNING) << "Debugger attempted to resume all threads without " 1057 << "having suspended them all before."; 1058 } 1059 // Decrement everybody's suspend count (except our own). 1060 for (const auto& thread : list_) { 1061 if (thread == self || thread == debug_thread) { 1062 continue; 1063 } 1064 if (thread->GetDebugSuspendCount() == 0) { 1065 // This thread may have been individually resumed with ThreadReference.Resume. 1066 continue; 1067 } 1068 VLOG(threads) << "requesting thread resume: " << *thread; 1069 thread->ModifySuspendCount(self, -1, nullptr, true); 1070 } 1071 } 1072 } 1073 1074 { 1075 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1076 Thread::resume_cond_->Broadcast(self); 1077 } 1078 1079 VLOG(threads) << *self << " ResumeAllForDebugger complete"; 1080 } 1081 1082 void ThreadList::UndoDebuggerSuspensions() { 1083 Thread* self = Thread::Current(); 1084 1085 VLOG(threads) << *self << " UndoDebuggerSuspensions starting"; 1086 1087 { 1088 MutexLock mu(self, *Locks::thread_list_lock_); 1089 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1090 // Update global suspend all state for attaching threads. 1091 suspend_all_count_ -= debug_suspend_all_count_; 1092 debug_suspend_all_count_ = 0; 1093 // Update running threads. 1094 for (const auto& thread : list_) { 1095 if (thread == self || thread->GetDebugSuspendCount() == 0) { 1096 continue; 1097 } 1098 thread->ModifySuspendCount(self, -thread->GetDebugSuspendCount(), nullptr, true); 1099 } 1100 } 1101 1102 { 1103 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1104 Thread::resume_cond_->Broadcast(self); 1105 } 1106 1107 VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete"; 1108 } 1109 1110 void ThreadList::WaitForOtherNonDaemonThreadsToExit() { 1111 ScopedTrace trace(__PRETTY_FUNCTION__); 1112 Thread* self = Thread::Current(); 1113 Locks::mutator_lock_->AssertNotHeld(self); 1114 while (true) { 1115 { 1116 // No more threads can be born after we start to shutdown. 1117 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1118 CHECK(Runtime::Current()->IsShuttingDownLocked()); 1119 CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U); 1120 } 1121 MutexLock mu(self, *Locks::thread_list_lock_); 1122 // Also wait for any threads that are unregistering to finish. This is required so that no 1123 // threads access the thread list after it is deleted. TODO: This may not work for user daemon 1124 // threads since they could unregister at the wrong time. 1125 bool done = unregistering_count_ == 0; 1126 if (done) { 1127 for (const auto& thread : list_) { 1128 if (thread != self && !thread->IsDaemon()) { 1129 done = false; 1130 break; 1131 } 1132 } 1133 } 1134 if (done) { 1135 break; 1136 } 1137 // Wait for another thread to exit before re-checking. 1138 Locks::thread_exit_cond_->Wait(self); 1139 } 1140 } 1141 1142 void ThreadList::SuspendAllDaemonThreadsForShutdown() { 1143 ScopedTrace trace(__PRETTY_FUNCTION__); 1144 Thread* self = Thread::Current(); 1145 MutexLock mu(self, *Locks::thread_list_lock_); 1146 size_t daemons_left = 0; 1147 { // Tell all the daemons it's time to suspend. 1148 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1149 for (const auto& thread : list_) { 1150 // This is only run after all non-daemon threads have exited, so the remainder should all be 1151 // daemons. 1152 CHECK(thread->IsDaemon()) << *thread; 1153 if (thread != self) { 1154 thread->ModifySuspendCount(self, +1, nullptr, false); 1155 ++daemons_left; 1156 } 1157 // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be 1158 // the sleep forever one. 1159 thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions(); 1160 } 1161 } 1162 // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they 1163 // are about to access runtime state and are not in a runnable state. Examples: Monitor code 1164 // or waking up from a condition variable. TODO: Try and see if there is a better way to wait 1165 // for daemon threads to be in a blocked state. 1166 if (daemons_left > 0) { 1167 static constexpr size_t kDaemonSleepTime = 200 * 1000; 1168 usleep(kDaemonSleepTime); 1169 } 1170 // Give the threads a chance to suspend, complaining if they're slow. 1171 bool have_complained = false; 1172 static constexpr size_t kTimeoutMicroseconds = 2000 * 1000; 1173 static constexpr size_t kSleepMicroseconds = 1000; 1174 for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) { 1175 bool all_suspended = true; 1176 for (const auto& thread : list_) { 1177 if (thread != self && thread->GetState() == kRunnable) { 1178 if (!have_complained) { 1179 LOG(WARNING) << "daemon thread not yet suspended: " << *thread; 1180 have_complained = true; 1181 } 1182 all_suspended = false; 1183 } 1184 } 1185 if (all_suspended) { 1186 return; 1187 } 1188 usleep(kSleepMicroseconds); 1189 } 1190 LOG(WARNING) << "timed out suspending all daemon threads"; 1191 } 1192 1193 void ThreadList::Register(Thread* self) { 1194 DCHECK_EQ(self, Thread::Current()); 1195 1196 if (VLOG_IS_ON(threads)) { 1197 std::ostringstream oss; 1198 self->ShortDump(oss); // We don't hold the mutator_lock_ yet and so cannot call Dump. 1199 LOG(INFO) << "ThreadList::Register() " << *self << "\n" << oss.str(); 1200 } 1201 1202 // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing 1203 // SuspendAll requests. 1204 MutexLock mu(self, *Locks::thread_list_lock_); 1205 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1206 CHECK_GE(suspend_all_count_, debug_suspend_all_count_); 1207 // Modify suspend count in increments of 1 to maintain invariants in ModifySuspendCount. While 1208 // this isn't particularly efficient the suspend counts are most commonly 0 or 1. 1209 for (int delta = debug_suspend_all_count_; delta > 0; delta--) { 1210 self->ModifySuspendCount(self, +1, nullptr, true); 1211 } 1212 for (int delta = suspend_all_count_ - debug_suspend_all_count_; delta > 0; delta--) { 1213 self->ModifySuspendCount(self, +1, nullptr, false); 1214 } 1215 CHECK(!Contains(self)); 1216 list_.push_back(self); 1217 if (kUseReadBarrier) { 1218 // Initialize according to the state of the CC collector. 1219 bool is_gc_marking = 1220 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking(); 1221 self->SetIsGcMarking(is_gc_marking); 1222 bool weak_ref_access_enabled = 1223 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsWeakRefAccessEnabled(); 1224 self->SetWeakRefAccessEnabled(weak_ref_access_enabled); 1225 } 1226 } 1227 1228 void ThreadList::Unregister(Thread* self) { 1229 DCHECK_EQ(self, Thread::Current()); 1230 CHECK_NE(self->GetState(), kRunnable); 1231 Locks::mutator_lock_->AssertNotHeld(self); 1232 1233 VLOG(threads) << "ThreadList::Unregister() " << *self; 1234 1235 { 1236 MutexLock mu(self, *Locks::thread_list_lock_); 1237 ++unregistering_count_; 1238 } 1239 1240 // Any time-consuming destruction, plus anything that can call back into managed code or 1241 // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what 1242 // causes the threads to join. It is important to do this after incrementing unregistering_count_ 1243 // since we want the runtime to wait for the daemon threads to exit before deleting the thread 1244 // list. 1245 self->Destroy(); 1246 1247 // If tracing, remember thread id and name before thread exits. 1248 Trace::StoreExitingThreadInfo(self); 1249 1250 uint32_t thin_lock_id = self->GetThreadId(); 1251 while (true) { 1252 // Remove and delete the Thread* while holding the thread_list_lock_ and 1253 // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended. 1254 // Note: deliberately not using MutexLock that could hold a stale self pointer. 1255 MutexLock mu(self, *Locks::thread_list_lock_); 1256 if (!Contains(self)) { 1257 std::string thread_name; 1258 self->GetThreadName(thread_name); 1259 std::ostringstream os; 1260 DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr); 1261 LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str(); 1262 break; 1263 } else { 1264 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1265 if (!self->IsSuspended()) { 1266 list_.remove(self); 1267 break; 1268 } 1269 } 1270 // We failed to remove the thread due to a suspend request, loop and try again. 1271 } 1272 delete self; 1273 1274 // Release the thread ID after the thread is finished and deleted to avoid cases where we can 1275 // temporarily have multiple threads with the same thread id. When this occurs, it causes 1276 // problems in FindThreadByThreadId / SuspendThreadByThreadId. 1277 ReleaseThreadId(nullptr, thin_lock_id); 1278 1279 // Clear the TLS data, so that the underlying native thread is recognizably detached. 1280 // (It may wish to reattach later.) 1281 #ifdef __ANDROID__ 1282 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr; 1283 #else 1284 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self"); 1285 #endif 1286 1287 // Signal that a thread just detached. 1288 MutexLock mu(nullptr, *Locks::thread_list_lock_); 1289 --unregistering_count_; 1290 Locks::thread_exit_cond_->Broadcast(nullptr); 1291 } 1292 1293 void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) { 1294 for (const auto& thread : list_) { 1295 callback(thread, context); 1296 } 1297 } 1298 1299 void ThreadList::VisitRoots(RootVisitor* visitor) const { 1300 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 1301 for (const auto& thread : list_) { 1302 thread->VisitRoots(visitor); 1303 } 1304 } 1305 1306 uint32_t ThreadList::AllocThreadId(Thread* self) { 1307 MutexLock mu(self, *Locks::allocated_thread_ids_lock_); 1308 for (size_t i = 0; i < allocated_ids_.size(); ++i) { 1309 if (!allocated_ids_[i]) { 1310 allocated_ids_.set(i); 1311 return i + 1; // Zero is reserved to mean "invalid". 1312 } 1313 } 1314 LOG(FATAL) << "Out of internal thread ids"; 1315 return 0; 1316 } 1317 1318 void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) { 1319 MutexLock mu(self, *Locks::allocated_thread_ids_lock_); 1320 --id; // Zero is reserved to mean "invalid". 1321 DCHECK(allocated_ids_[id]) << id; 1322 allocated_ids_.reset(id); 1323 } 1324 1325 ScopedSuspendAll::ScopedSuspendAll(const char* cause, bool long_suspend) { 1326 Runtime::Current()->GetThreadList()->SuspendAll(cause, long_suspend); 1327 } 1328 1329 ScopedSuspendAll::~ScopedSuspendAll() { 1330 Runtime::Current()->GetThreadList()->ResumeAll(); 1331 } 1332 1333 } // namespace art 1334