Lines Matching defs:self
80 Thread* self = Thread::Current();
82 MutexLock mu(self, *Locks::thread_list_lock_);
83 contains = Contains(self);
155 DIR* d = opendir("/proc/self/task");
160 Thread* self = Thread::Current();
168 MutexLock mu(self, *Locks::thread_list_lock_);
193 // Note thread and self may not be equal if thread was already suspended at the point of the
195 Thread* self = Thread::Current();
198 ScopedObjectAccess soa(self);
204 MutexLock mu(self, *Locks::logging_lock_);
207 barrier_.Pass(self);
211 Thread* self = Thread::Current();
212 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
213 bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout);
244 void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
245 MutexLock mu(self, *Locks::thread_list_lock_);
246 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
251 << "self: <<" << *Thread::Current();
282 Thread* self = Thread::Current();
283 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
284 Locks::thread_list_lock_->AssertNotHeld(self);
285 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
292 MutexLock mu(self, *Locks::thread_list_lock_);
293 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
296 if (thread != self) {
308 thread->ModifySuspendCount(self, +1, nullptr, false);
318 checkpoint_function->Run(self);
345 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
346 thread->ModifySuspendCount(self, -1, nullptr, false);
353 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
354 Thread::resume_cond_->Broadcast(self);
363 Thread* self = Thread::Current();
364 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
365 Locks::thread_list_lock_->AssertNotHeld(self);
366 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
367 CHECK_NE(self->GetState(), kRunnable);
372 MutexLock mu(self, *Locks::thread_list_lock_);
373 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
375 if (thread != self) {
397 Thread* self = Thread::Current();
398 Locks::mutator_lock_->AssertNotHeld(self);
399 Locks::thread_list_lock_->AssertNotHeld(self);
400 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
401 CHECK_NE(self->GetState(), kRunnable);
403 SuspendAllInternal(self, self, nullptr);
406 Locks::mutator_lock_->ExclusiveLock(self);
407 flip_callback->Run(self);
408 Locks::mutator_lock_->ExclusiveUnlock(self);
415 MutexLock mu(self, *Locks::thread_list_lock_);
416 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
419 if (thread == self) {
430 thread->ModifySuspendCount(self, -1, nullptr, false);
436 Thread::resume_cond_->Broadcast(self);
441 ReaderMutexLock mu(self, *Locks::mutator_lock_);
448 // Run it for self.
449 thread_flip_visitor->Run(self);
454 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
456 thread->ModifySuspendCount(self, -1, nullptr, false);
458 Thread::resume_cond_->Broadcast(self);
461 return runnable_threads.size() + other_threads.size() + 1; // +1 for self.
465 Thread* self = Thread::Current();
467 if (self != nullptr) {
468 VLOG(threads) << *self << " SuspendAll for " << cause << " starting...";
476 SuspendAllInternal(self, self);
481 if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) {
491 Locks::mutator_lock_->ExclusiveLock(self);
505 AssertThreadsAreSuspended(self, self);
510 if (self != nullptr) {
511 VLOG(threads) << *self << " SuspendAll complete";
521 void ThreadList::SuspendAllInternal(Thread* self,
525 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
526 Locks::thread_list_lock_->AssertNotHeld(self);
527 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
528 if (kDebugLocking && self != nullptr) {
529 CHECK_NE(self->GetState(), kRunnable);
550 MutexLock mu(self, *Locks::thread_list_lock_);
551 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
564 if (LIKELY(thread->ModifySuspendCount(self, +1, &pending_threads, debug_suspend))) {
576 Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
578 Locks::thread_suspend_count_lock_->ExclusiveLock(self);
628 Thread* self = Thread::Current();
630 if (self != nullptr) {
631 VLOG(threads) << *self << " ResumeAll starting";
642 AssertThreadsAreSuspended(self, self);
647 Locks::mutator_lock_->ExclusiveUnlock(self);
649 MutexLock mu(self, *Locks::thread_list_lock_);
650 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
655 if (thread == self) {
658 thread->ModifySuspendCount(self, -1, nullptr, false);
663 if (self != nullptr) {
664 VLOG(threads) << *self << " ResumeAll waking others";
668 Thread::resume_cond_->Broadcast(self);
671 if (self != nullptr) {
672 VLOG(threads) << *self << " ResumeAll complete";
682 Thread* self = Thread::Current();
683 DCHECK_NE(thread, self);
689 MutexLock mu(self, *Locks::thread_list_lock_);
691 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
700 thread->ModifySuspendCount(self, -1, nullptr, for_debugger);
705 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
706 Thread::resume_cond_->Broadcast(self);
712 static void ThreadSuspendByPeerWarning(Thread* self,
716 JNIEnvExt* env = self->GetJniEnv();
736 Thread* const self = Thread::Current();
747 ScopedObjectAccess soa(self);
748 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
752 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
756 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
758 ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer);
769 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
771 if (self->GetSuspendCount() > 0) {
779 suspended_thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
787 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
789 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
791 // count, or else we've waited and it has self suspended) or is the current thread, we're
805 ThreadSuspendByPeerWarning(self, FATAL, "Thread suspension timed out", peer);
808 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
842 Thread* const self = Thread::Current();
852 ScopedObjectAccess soa(self);
853 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
871 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
873 if (self->GetSuspendCount() > 0) {
879 thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
888 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
890 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
892 // count, or else we've waited and it has self suspended) or is the current thread, we're
908 thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
937 Thread* self = Thread::Current();
940 VLOG(threads) << *self << " SuspendAllForDebugger starting...";
942 SuspendAllInternal(self, self, debug_thread, true);
947 if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
950 Locks::mutator_lock_->ExclusiveUnlock(self);
953 Locks::mutator_lock_->ExclusiveLock(self);
954 Locks::mutator_lock_->ExclusiveUnlock(self);
961 // AssertThreadsAreSuspended(self, self, debug_thread);
963 VLOG(threads) << *self << " SuspendAllForDebugger complete";
967 Thread* const self = Thread::Current();
968 self->SetReadyForDebugInvoke(true);
972 CHECK(self != debug_thread);
973 CHECK_NE(self->GetState(), kRunnable);
974 Locks::mutator_lock_->AssertNotHeld(self);
978 DebugInvokeReq* pReq = self->GetInvokeReq();
984 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
985 self->ModifySuspendCount(self, +1, nullptr, true);
986 CHECK_GT(self->GetSuspendCount(), 0);
988 VLOG(threads) << *self << " self-suspending (debugger)";
991 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
992 CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us";
994 VLOG(threads) << *self << " not self-suspending because debugger detached during invoke";
1000 self->ClearDebugInvokeReq();
1010 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1011 while (self->GetSuspendCount() != 0) {
1012 Thread::resume_cond_->Wait(self);
1013 if (self->GetSuspendCount() != 0) {
1020 VLOG(jdwp) << *self << " still suspended after undo "
1021 << "(suspend count=" << self->GetSuspendCount() << ", "
1022 << "debug suspend count=" << self->GetDebugSuspendCount() << ")";
1025 CHECK_EQ(self->GetSuspendCount(), 0);
1028 self->SetReadyForDebugInvoke(false);
1029 VLOG(threads) << *self << " self-reviving (debugger)";
1033 Thread* self = Thread::Current();
1036 VLOG(threads) << *self << " ResumeAllForDebugger starting...";
1039 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
1042 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
1044 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1061 if (thread == self || thread == debug_thread) {
1069 thread->ModifySuspendCount(self, -1, nullptr, true);
1075 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1076 Thread::resume_cond_->Broadcast(self);
1079 VLOG(threads) << *self << " ResumeAllForDebugger complete";
1083 Thread* self = Thread::Current();
1085 VLOG(threads) << *self << " UndoDebuggerSuspensions starting";
1088 MutexLock mu(self, *Locks::thread_list_lock_);
1089 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1095 if (thread == self || thread->GetDebugSuspendCount() == 0) {
1098 thread->ModifySuspendCount(self, -thread->GetDebugSuspendCount(), nullptr, true);
1103 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1104 Thread::resume_cond_->Broadcast(self);
1107 VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete";
1112 Thread* self = Thread::Current();
1113 Locks::mutator_lock_->AssertNotHeld(self);
1117 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
1121 MutexLock mu(self, *Locks::thread_list_lock_);
1128 if (thread != self && !thread->IsDaemon()) {
1138 Locks::thread_exit_cond_->Wait(self);
1144 Thread* self = Thread::Current();
1145 MutexLock mu(self, *Locks::thread_list_lock_);
1148 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1153 if (thread != self) {
1154 thread->ModifySuspendCount(self, +1, nullptr, false);
1177 if (thread != self && thread->GetState() == kRunnable) {
1193 void ThreadList::Register(Thread* self) {
1194 DCHECK_EQ(self, Thread::Current());
1198 self->ShortDump(oss); // We don't hold the mutator_lock_ yet and so cannot call Dump.
1199 LOG(INFO) << "ThreadList::Register() " << *self << "\n" << oss.str();
1202 // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing
1204 MutexLock mu(self, *Locks::thread_list_lock_);
1205 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1210 self->ModifySuspendCount(self, +1, nullptr, true);
1213 self->ModifySuspendCount(self, +1, nullptr, false);
1215 CHECK(!Contains(self));
1216 list_.push_back(self);
1221 self->SetIsGcMarking(is_gc_marking);
1224 self->SetWeakRefAccessEnabled(weak_ref_access_enabled);
1228 void ThreadList::Unregister(Thread* self) {
1229 DCHECK_EQ(self, Thread::Current());
1230 CHECK_NE(self->GetState(), kRunnable);
1231 Locks::mutator_lock_->AssertNotHeld(self);
1233 VLOG(threads) << "ThreadList::Unregister() " << *self;
1236 MutexLock mu(self, *Locks::thread_list_lock_);
1241 // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what
1245 self->Destroy();
1248 Trace::StoreExitingThreadInfo(self);
1250 uint32_t thin_lock_id = self->GetThreadId();
1254 // Note: deliberately not using MutexLock that could hold a stale self pointer.
1255 MutexLock mu(self, *Locks::thread_list_lock_);
1256 if (!Contains(self)) {
1258 self->GetThreadName(thread_name);
1264 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1265 if (!self->IsSuspended()) {
1266 list_.remove(self);
1272 delete self;
1284 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
1306 uint32_t ThreadList::AllocThreadId(Thread* self) {
1307 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
1318 void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
1319 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);