1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_THREAD_INL_H_ 18 #define ART_RUNTIME_THREAD_INL_H_ 19 20 #include "thread.h" 21 22 #include "base/aborting.h" 23 #include "base/casts.h" 24 #include "base/mutex-inl.h" 25 #include "base/time_utils.h" 26 #include "jni_env_ext.h" 27 #include "managed_stack-inl.h" 28 #include "obj_ptr.h" 29 #include "thread-current-inl.h" 30 #include "thread_pool.h" 31 32 namespace art { 33 34 // Quickly access the current thread from a JNIEnv. 35 static inline Thread* ThreadForEnv(JNIEnv* env) { 36 JNIEnvExt* full_env(down_cast<JNIEnvExt*>(env)); 37 return full_env->GetSelf(); 38 } 39 40 inline void Thread::AllowThreadSuspension() { 41 DCHECK_EQ(Thread::Current(), this); 42 if (UNLIKELY(TestAllFlags())) { 43 CheckSuspend(); 44 } 45 // Invalidate the current thread's object pointers (ObjPtr) to catch possible moving GC bugs due 46 // to missing handles. 47 PoisonObjectPointers(); 48 } 49 50 inline void Thread::CheckSuspend() { 51 DCHECK_EQ(Thread::Current(), this); 52 for (;;) { 53 if (ReadFlag(kCheckpointRequest)) { 54 RunCheckpointFunction(); 55 } else if (ReadFlag(kSuspendRequest)) { 56 FullSuspendCheck(); 57 } else if (ReadFlag(kEmptyCheckpointRequest)) { 58 RunEmptyCheckpoint(); 59 } else { 60 break; 61 } 62 } 63 } 64 65 inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex) { 66 Thread* self = Thread::Current(); 67 DCHECK_EQ(self, this); 68 for (;;) { 69 if (ReadFlag(kEmptyCheckpointRequest)) { 70 RunEmptyCheckpoint(); 71 // Check we hold only an expected mutex when accessing weak ref. 72 if (kIsDebugBuild) { 73 for (int i = kLockLevelCount - 1; i >= 0; --i) { 74 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i)); 75 if (held_mutex != nullptr && 76 held_mutex != Locks::mutator_lock_ && 77 held_mutex != cond_var_mutex) { 78 CHECK(Locks::IsExpectedOnWeakRefAccess(held_mutex)) 79 << "Holding unexpected mutex " << held_mutex->GetName() 80 << " when accessing weak ref"; 81 } 82 } 83 } 84 } else { 85 break; 86 } 87 } 88 } 89 90 inline void Thread::CheckEmptyCheckpointFromMutex() { 91 DCHECK_EQ(Thread::Current(), this); 92 for (;;) { 93 if (ReadFlag(kEmptyCheckpointRequest)) { 94 RunEmptyCheckpoint(); 95 } else { 96 break; 97 } 98 } 99 } 100 101 inline ThreadState Thread::SetState(ThreadState new_state) { 102 // Should only be used to change between suspended states. 103 // Cannot use this code to change into or from Runnable as changing to Runnable should 104 // fail if old_state_and_flags.suspend_request is true and changing from Runnable might 105 // miss passing an active suspend barrier. 106 DCHECK_NE(new_state, kRunnable); 107 if (kIsDebugBuild && this != Thread::Current()) { 108 std::string name; 109 GetThreadName(name); 110 LOG(FATAL) << "Thread \"" << name << "\"(" << this << " != Thread::Current()=" 111 << Thread::Current() << ") changing state to " << new_state; 112 } 113 union StateAndFlags old_state_and_flags; 114 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 115 CHECK_NE(old_state_and_flags.as_struct.state, kRunnable); 116 tls32_.state_and_flags.as_struct.state = new_state; 117 return static_cast<ThreadState>(old_state_and_flags.as_struct.state); 118 } 119 120 inline bool Thread::IsThreadSuspensionAllowable() const { 121 if (tls32_.no_thread_suspension != 0) { 122 return false; 123 } 124 for (int i = kLockLevelCount - 1; i >= 0; --i) { 125 if (i != kMutatorLock && 126 i != kUserCodeSuspensionLock && 127 GetHeldMutex(static_cast<LockLevel>(i)) != nullptr) { 128 return false; 129 } 130 } 131 // Thread autoanalysis isn't able to understand that the GetHeldMutex(...) or AssertHeld means we 132 // have the mutex meaning we need to do this hack. 133 auto is_suspending_for_user_code = [this]() NO_THREAD_SAFETY_ANALYSIS { 134 return tls32_.user_code_suspend_count != 0; 135 }; 136 if (GetHeldMutex(kUserCodeSuspensionLock) != nullptr && is_suspending_for_user_code()) { 137 return false; 138 } 139 return true; 140 } 141 142 inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const { 143 if (kIsDebugBuild) { 144 if (gAborting == 0) { 145 CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause; 146 } 147 if (check_locks) { 148 bool bad_mutexes_held = false; 149 for (int i = kLockLevelCount - 1; i >= 0; --i) { 150 // We expect no locks except the mutator_lock_. User code suspension lock is OK as long as 151 // we aren't going to be held suspended due to SuspendReason::kForUserCode. 152 if (i != kMutatorLock && i != kUserCodeSuspensionLock) { 153 BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i)); 154 if (held_mutex != nullptr) { 155 LOG(ERROR) << "holding \"" << held_mutex->GetName() 156 << "\" at point where thread suspension is expected"; 157 bad_mutexes_held = true; 158 } 159 } 160 } 161 // Make sure that if we hold the user_code_suspension_lock_ we aren't suspending due to 162 // user_code_suspend_count which would prevent the thread from ever waking up. Thread 163 // autoanalysis isn't able to understand that the GetHeldMutex(...) or AssertHeld means we 164 // have the mutex meaning we need to do this hack. 165 auto is_suspending_for_user_code = [this]() NO_THREAD_SAFETY_ANALYSIS { 166 return tls32_.user_code_suspend_count != 0; 167 }; 168 if (GetHeldMutex(kUserCodeSuspensionLock) != nullptr && is_suspending_for_user_code()) { 169 LOG(ERROR) << "suspending due to user-code while holding \"" 170 << Locks::user_code_suspension_lock_->GetName() << "\"! Thread would never " 171 << "wake up."; 172 bad_mutexes_held = true; 173 } 174 if (gAborting == 0) { 175 CHECK(!bad_mutexes_held); 176 } 177 } 178 } 179 } 180 181 inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) { 182 DCHECK_NE(new_state, kRunnable); 183 DCHECK_EQ(GetState(), kRunnable); 184 union StateAndFlags old_state_and_flags; 185 union StateAndFlags new_state_and_flags; 186 while (true) { 187 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 188 if (UNLIKELY((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0)) { 189 RunCheckpointFunction(); 190 continue; 191 } 192 if (UNLIKELY((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest) != 0)) { 193 RunEmptyCheckpoint(); 194 continue; 195 } 196 // Change the state but keep the current flags (kCheckpointRequest is clear). 197 DCHECK_EQ((old_state_and_flags.as_struct.flags & kCheckpointRequest), 0); 198 DCHECK_EQ((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest), 0); 199 new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags; 200 new_state_and_flags.as_struct.state = new_state; 201 202 // CAS the value with a memory ordering. 203 bool done = 204 tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakRelease(old_state_and_flags.as_int, 205 new_state_and_flags.as_int); 206 if (LIKELY(done)) { 207 break; 208 } 209 } 210 } 211 212 inline void Thread::PassActiveSuspendBarriers() { 213 while (true) { 214 uint16_t current_flags = tls32_.state_and_flags.as_struct.flags; 215 if (LIKELY((current_flags & 216 (kCheckpointRequest | kEmptyCheckpointRequest | kActiveSuspendBarrier)) == 0)) { 217 break; 218 } else if ((current_flags & kActiveSuspendBarrier) != 0) { 219 PassActiveSuspendBarriers(this); 220 } else { 221 // Impossible 222 LOG(FATAL) << "Fatal, thread transitioned into suspended without running the checkpoint"; 223 } 224 } 225 } 226 227 inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) { 228 AssertThreadSuspensionIsAllowable(); 229 PoisonObjectPointersIfDebug(); 230 DCHECK_EQ(this, Thread::Current()); 231 // Change to non-runnable state, thereby appearing suspended to the system. 232 TransitionToSuspendedAndRunCheckpoints(new_state); 233 // Mark the release of the share of the mutator_lock_. 234 Locks::mutator_lock_->TransitionFromRunnableToSuspended(this); 235 // Once suspended - check the active suspend barrier flag 236 PassActiveSuspendBarriers(); 237 } 238 239 inline ThreadState Thread::TransitionFromSuspendedToRunnable() { 240 union StateAndFlags old_state_and_flags; 241 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 242 int16_t old_state = old_state_and_flags.as_struct.state; 243 DCHECK_NE(static_cast<ThreadState>(old_state), kRunnable); 244 do { 245 Locks::mutator_lock_->AssertNotHeld(this); // Otherwise we starve GC.. 246 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 247 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); 248 if (LIKELY(old_state_and_flags.as_struct.flags == 0)) { 249 // Optimize for the return from native code case - this is the fast path. 250 // Atomically change from suspended to runnable if no suspend request pending. 251 union StateAndFlags new_state_and_flags; 252 new_state_and_flags.as_int = old_state_and_flags.as_int; 253 new_state_and_flags.as_struct.state = kRunnable; 254 // CAS the value with a memory barrier. 255 if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakAcquire( 256 old_state_and_flags.as_int, 257 new_state_and_flags.as_int))) { 258 // Mark the acquisition of a share of the mutator_lock_. 259 Locks::mutator_lock_->TransitionFromSuspendedToRunnable(this); 260 break; 261 } 262 } else if ((old_state_and_flags.as_struct.flags & kActiveSuspendBarrier) != 0) { 263 PassActiveSuspendBarriers(this); 264 } else if ((old_state_and_flags.as_struct.flags & 265 (kCheckpointRequest | kEmptyCheckpointRequest)) != 0) { 266 // Impossible 267 LOG(FATAL) << "Transitioning to runnable with checkpoint flag, " 268 << " flags=" << old_state_and_flags.as_struct.flags 269 << " state=" << old_state_and_flags.as_struct.state; 270 } else if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) { 271 // Wait while our suspend count is non-zero. 272 273 // We pass null to the MutexLock as we may be in a situation where the 274 // runtime is shutting down. Guarding ourselves from that situation 275 // requires to take the shutdown lock, which is undesirable here. 276 Thread* thread_to_pass = nullptr; 277 if (kIsDebugBuild && !IsDaemon()) { 278 // We know we can make our debug locking checks on non-daemon threads, 279 // so re-enable them on debug builds. 280 thread_to_pass = this; 281 } 282 MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_); 283 ScopedTransitioningToRunnable scoped_transitioning_to_runnable(this); 284 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 285 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); 286 while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) { 287 // Re-check when Thread::resume_cond_ is notified. 288 Thread::resume_cond_->Wait(thread_to_pass); 289 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 290 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); 291 } 292 DCHECK_EQ(GetSuspendCount(), 0); 293 } 294 } while (true); 295 // Run the flip function, if set. 296 Closure* flip_func = GetFlipFunction(); 297 if (flip_func != nullptr) { 298 flip_func->Run(this); 299 } 300 return static_cast<ThreadState>(old_state); 301 } 302 303 inline mirror::Object* Thread::AllocTlab(size_t bytes) { 304 DCHECK_GE(TlabSize(), bytes); 305 ++tlsPtr_.thread_local_objects; 306 mirror::Object* ret = reinterpret_cast<mirror::Object*>(tlsPtr_.thread_local_pos); 307 tlsPtr_.thread_local_pos += bytes; 308 return ret; 309 } 310 311 inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) { 312 DCHECK_LE(tlsPtr_.thread_local_alloc_stack_top, tlsPtr_.thread_local_alloc_stack_end); 313 if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) { 314 // There's room. 315 DCHECK_LE(reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_top) + 316 sizeof(StackReference<mirror::Object>), 317 reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_end)); 318 DCHECK(tlsPtr_.thread_local_alloc_stack_top->AsMirrorPtr() == nullptr); 319 tlsPtr_.thread_local_alloc_stack_top->Assign(obj); 320 ++tlsPtr_.thread_local_alloc_stack_top; 321 return true; 322 } 323 return false; 324 } 325 326 inline void Thread::SetThreadLocalAllocationStack(StackReference<mirror::Object>* start, 327 StackReference<mirror::Object>* end) { 328 DCHECK(Thread::Current() == this) << "Should be called by self"; 329 DCHECK(start != nullptr); 330 DCHECK(end != nullptr); 331 DCHECK_ALIGNED(start, sizeof(StackReference<mirror::Object>)); 332 DCHECK_ALIGNED(end, sizeof(StackReference<mirror::Object>)); 333 DCHECK_LT(start, end); 334 tlsPtr_.thread_local_alloc_stack_end = end; 335 tlsPtr_.thread_local_alloc_stack_top = start; 336 } 337 338 inline void Thread::RevokeThreadLocalAllocationStack() { 339 if (kIsDebugBuild) { 340 // Note: self is not necessarily equal to this thread since thread may be suspended. 341 Thread* self = Thread::Current(); 342 DCHECK(this == self || IsSuspended() || GetState() == kWaitingPerformingGc) 343 << GetState() << " thread " << this << " self " << self; 344 } 345 tlsPtr_.thread_local_alloc_stack_end = nullptr; 346 tlsPtr_.thread_local_alloc_stack_top = nullptr; 347 } 348 349 inline void Thread::PoisonObjectPointersIfDebug() { 350 if (kObjPtrPoisoning) { 351 Thread::Current()->PoisonObjectPointers(); 352 } 353 } 354 355 inline bool Thread::ModifySuspendCount(Thread* self, 356 int delta, 357 AtomicInteger* suspend_barrier, 358 SuspendReason reason) { 359 if (delta > 0 && ((kUseReadBarrier && this != self) || suspend_barrier != nullptr)) { 360 // When delta > 0 (requesting a suspend), ModifySuspendCountInternal() may fail either if 361 // active_suspend_barriers is full or we are in the middle of a thread flip. Retry in a loop. 362 while (true) { 363 if (LIKELY(ModifySuspendCountInternal(self, delta, suspend_barrier, reason))) { 364 return true; 365 } else { 366 // Failure means the list of active_suspend_barriers is full or we are in the middle of a 367 // thread flip, we should release the thread_suspend_count_lock_ (to avoid deadlock) and 368 // wait till the target thread has executed or Thread::PassActiveSuspendBarriers() or the 369 // flip function. Note that we could not simply wait for the thread to change to a suspended 370 // state, because it might need to run checkpoint function before the state change or 371 // resumes from the resume_cond_, which also needs thread_suspend_count_lock_. 372 // 373 // The list of active_suspend_barriers is very unlikely to be full since more than 374 // kMaxSuspendBarriers threads need to execute SuspendAllInternal() simultaneously, and 375 // target thread stays in kRunnable in the mean time. 376 Locks::thread_suspend_count_lock_->ExclusiveUnlock(self); 377 NanoSleep(100000); 378 Locks::thread_suspend_count_lock_->ExclusiveLock(self); 379 } 380 } 381 } else { 382 return ModifySuspendCountInternal(self, delta, suspend_barrier, reason); 383 } 384 } 385 386 inline ShadowFrame* Thread::PushShadowFrame(ShadowFrame* new_top_frame) { 387 return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame); 388 } 389 390 inline ShadowFrame* Thread::PopShadowFrame() { 391 return tlsPtr_.managed_stack.PopShadowFrame(); 392 } 393 394 } // namespace art 395 396 #endif // ART_RUNTIME_THREAD_INL_H_ 397