1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "mutex.h" 18 19 #include <errno.h> 20 #include <sys/time.h> 21 22 #include "android-base/stringprintf.h" 23 24 #include "base/atomic.h" 25 #include "base/logging.h" 26 #include "base/systrace.h" 27 #include "base/time_utils.h" 28 #include "base/value_object.h" 29 #include "mutex-inl.h" 30 #include "scoped_thread_state_change-inl.h" 31 #include "thread-inl.h" 32 33 namespace art { 34 35 using android::base::StringPrintf; 36 37 struct AllMutexData { 38 // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait). 39 Atomic<const BaseMutex*> all_mutexes_guard; 40 // All created mutexes guarded by all_mutexes_guard_. 41 std::set<BaseMutex*>* all_mutexes; 42 AllMutexData() : all_mutexes(nullptr) {} 43 }; 44 static struct AllMutexData gAllMutexData[kAllMutexDataSize]; 45 46 #if ART_USE_FUTEXES 47 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) { 48 const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds. 49 result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec; 50 result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec; 51 if (result_ts->tv_nsec < 0) { 52 result_ts->tv_sec--; 53 result_ts->tv_nsec += one_sec; 54 } else if (result_ts->tv_nsec > one_sec) { 55 result_ts->tv_sec++; 56 result_ts->tv_nsec -= one_sec; 57 } 58 return result_ts->tv_sec < 0; 59 } 60 #endif 61 62 // Wait for an amount of time that roughly increases in the argument i. 63 // Spin for small arguments and yield/sleep for longer ones. 64 static void BackOff(uint32_t i) { 65 static constexpr uint32_t kSpinMax = 10; 66 static constexpr uint32_t kYieldMax = 20; 67 if (i <= kSpinMax) { 68 // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit 69 // test-and-test-and-set loop in the caller. Possibly skip entirely on a uniprocessor. 70 volatile uint32_t x = 0; 71 const uint32_t spin_count = 10 * i; 72 for (uint32_t spin = 0; spin < spin_count; ++spin) { 73 ++x; // Volatile; hence should not be optimized away. 74 } 75 // TODO: Consider adding x86 PAUSE and/or ARM YIELD here. 76 } else if (i <= kYieldMax) { 77 sched_yield(); 78 } else { 79 NanoSleep(1000ull * (i - kYieldMax)); 80 } 81 } 82 83 class ScopedAllMutexesLock final { 84 public: 85 explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) { 86 for (uint32_t i = 0; 87 !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex); 88 ++i) { 89 BackOff(i); 90 } 91 } 92 93 ~ScopedAllMutexesLock() { 94 DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_); 95 gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release); 96 } 97 98 private: 99 const BaseMutex* const mutex_; 100 }; 101 102 // Scoped class that generates events at the beginning and end of lock contention. 103 class ScopedContentionRecorder final : public ValueObject { 104 public: 105 ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid) 106 : mutex_(kLogLockContentions ? mutex : nullptr), 107 blocked_tid_(kLogLockContentions ? blocked_tid : 0), 108 owner_tid_(kLogLockContentions ? owner_tid : 0), 109 start_nano_time_(kLogLockContentions ? NanoTime() : 0) { 110 if (ATraceEnabled()) { 111 std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")", 112 mutex->GetName(), owner_tid); 113 ATraceBegin(msg.c_str()); 114 } 115 } 116 117 ~ScopedContentionRecorder() { 118 ATraceEnd(); 119 if (kLogLockContentions) { 120 uint64_t end_nano_time = NanoTime(); 121 mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_); 122 } 123 } 124 125 private: 126 BaseMutex* const mutex_; 127 const uint64_t blocked_tid_; 128 const uint64_t owner_tid_; 129 const uint64_t start_nano_time_; 130 }; 131 132 BaseMutex::BaseMutex(const char* name, LockLevel level) 133 : name_(name), 134 level_(level), 135 should_respond_to_empty_checkpoint_request_(false) { 136 if (kLogLockContentions) { 137 ScopedAllMutexesLock mu(this); 138 std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes; 139 if (*all_mutexes_ptr == nullptr) { 140 // We leak the global set of all mutexes to avoid ordering issues in global variable 141 // construction/destruction. 142 *all_mutexes_ptr = new std::set<BaseMutex*>(); 143 } 144 (*all_mutexes_ptr)->insert(this); 145 } 146 } 147 148 BaseMutex::~BaseMutex() { 149 if (kLogLockContentions) { 150 ScopedAllMutexesLock mu(this); 151 gAllMutexData->all_mutexes->erase(this); 152 } 153 } 154 155 void BaseMutex::DumpAll(std::ostream& os) { 156 if (kLogLockContentions) { 157 os << "Mutex logging:\n"; 158 ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1)); 159 std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes; 160 if (all_mutexes == nullptr) { 161 // No mutexes have been created yet during at startup. 162 return; 163 } 164 os << "(Contended)\n"; 165 for (const BaseMutex* mutex : *all_mutexes) { 166 if (mutex->HasEverContended()) { 167 mutex->Dump(os); 168 os << "\n"; 169 } 170 } 171 os << "(Never contented)\n"; 172 for (const BaseMutex* mutex : *all_mutexes) { 173 if (!mutex->HasEverContended()) { 174 mutex->Dump(os); 175 os << "\n"; 176 } 177 } 178 } 179 } 180 181 void BaseMutex::CheckSafeToWait(Thread* self) { 182 if (self == nullptr) { 183 CheckUnattachedThread(level_); 184 return; 185 } 186 if (kDebugLocking) { 187 CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock) 188 << "Waiting on unacquired mutex: " << name_; 189 bool bad_mutexes_held = false; 190 for (int i = kLockLevelCount - 1; i >= 0; --i) { 191 if (i != level_) { 192 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i)); 193 // We allow the thread to wait even if the user_code_suspension_lock_ is held so long. This 194 // just means that gc or some other internal process is suspending the thread while it is 195 // trying to suspend some other thread. So long as the current thread is not being suspended 196 // by a SuspendReason::kForUserCode (which needs the user_code_suspension_lock_ to clear) 197 // this is fine. This is needed due to user_code_suspension_lock_ being the way untrusted 198 // code interacts with suspension. One holds the lock to prevent user-code-suspension from 199 // occurring. Since this is only initiated from user-supplied native-code this is safe. 200 if (held_mutex == Locks::user_code_suspension_lock_) { 201 // No thread safety analysis is fine since we have both the user_code_suspension_lock_ 202 // from the line above and the ThreadSuspendCountLock since it is our level_. We use this 203 // lambda to avoid having to annotate the whole function as NO_THREAD_SAFETY_ANALYSIS. 204 auto is_suspending_for_user_code = [self]() NO_THREAD_SAFETY_ANALYSIS { 205 return self->GetUserCodeSuspendCount() != 0; 206 }; 207 if (is_suspending_for_user_code()) { 208 LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" " 209 << "(level " << LockLevel(i) << ") while performing wait on " 210 << "\"" << name_ << "\" (level " << level_ << ") " 211 << "with SuspendReason::kForUserCode pending suspensions"; 212 bad_mutexes_held = true; 213 } 214 } else if (held_mutex != nullptr) { 215 LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" " 216 << "(level " << LockLevel(i) << ") while performing wait on " 217 << "\"" << name_ << "\" (level " << level_ << ")"; 218 bad_mutexes_held = true; 219 } 220 } 221 } 222 if (gAborting == 0) { // Avoid recursive aborts. 223 CHECK(!bad_mutexes_held) << this; 224 } 225 } 226 } 227 228 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) { 229 if (kLogLockContentions) { 230 // Atomically add value to wait_time. 231 wait_time.fetch_add(value, std::memory_order_seq_cst); 232 } 233 } 234 235 void BaseMutex::RecordContention(uint64_t blocked_tid, 236 uint64_t owner_tid, 237 uint64_t nano_time_blocked) { 238 if (kLogLockContentions) { 239 ContentionLogData* data = contention_log_data_; 240 ++(data->contention_count); 241 data->AddToWaitTime(nano_time_blocked); 242 ContentionLogEntry* log = data->contention_log; 243 // This code is intentionally racy as it is only used for diagnostics. 244 int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed); 245 if (log[slot].blocked_tid == blocked_tid && 246 log[slot].owner_tid == blocked_tid) { 247 ++log[slot].count; 248 } else { 249 uint32_t new_slot; 250 do { 251 slot = data->cur_content_log_entry.load(std::memory_order_relaxed); 252 new_slot = (slot + 1) % kContentionLogSize; 253 } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot)); 254 log[new_slot].blocked_tid = blocked_tid; 255 log[new_slot].owner_tid = owner_tid; 256 log[new_slot].count.store(1, std::memory_order_relaxed); 257 } 258 } 259 } 260 261 void BaseMutex::DumpContention(std::ostream& os) const { 262 if (kLogLockContentions) { 263 const ContentionLogData* data = contention_log_data_; 264 const ContentionLogEntry* log = data->contention_log; 265 uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed); 266 uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed); 267 if (contention_count == 0) { 268 os << "never contended"; 269 } else { 270 os << "contended " << contention_count 271 << " total wait of contender " << PrettyDuration(wait_time) 272 << " average " << PrettyDuration(wait_time / contention_count); 273 SafeMap<uint64_t, size_t> most_common_blocker; 274 SafeMap<uint64_t, size_t> most_common_blocked; 275 for (size_t i = 0; i < kContentionLogSize; ++i) { 276 uint64_t blocked_tid = log[i].blocked_tid; 277 uint64_t owner_tid = log[i].owner_tid; 278 uint32_t count = log[i].count.load(std::memory_order_relaxed); 279 if (count > 0) { 280 auto it = most_common_blocked.find(blocked_tid); 281 if (it != most_common_blocked.end()) { 282 most_common_blocked.Overwrite(blocked_tid, it->second + count); 283 } else { 284 most_common_blocked.Put(blocked_tid, count); 285 } 286 it = most_common_blocker.find(owner_tid); 287 if (it != most_common_blocker.end()) { 288 most_common_blocker.Overwrite(owner_tid, it->second + count); 289 } else { 290 most_common_blocker.Put(owner_tid, count); 291 } 292 } 293 } 294 uint64_t max_tid = 0; 295 size_t max_tid_count = 0; 296 for (const auto& pair : most_common_blocked) { 297 if (pair.second > max_tid_count) { 298 max_tid = pair.first; 299 max_tid_count = pair.second; 300 } 301 } 302 if (max_tid != 0) { 303 os << " sample shows most blocked tid=" << max_tid; 304 } 305 max_tid = 0; 306 max_tid_count = 0; 307 for (const auto& pair : most_common_blocker) { 308 if (pair.second > max_tid_count) { 309 max_tid = pair.first; 310 max_tid_count = pair.second; 311 } 312 } 313 if (max_tid != 0) { 314 os << " sample shows tid=" << max_tid << " owning during this time"; 315 } 316 } 317 } 318 } 319 320 321 Mutex::Mutex(const char* name, LockLevel level, bool recursive) 322 : BaseMutex(name, level), exclusive_owner_(0), recursion_count_(0), recursive_(recursive) { 323 #if ART_USE_FUTEXES 324 DCHECK_EQ(0, state_and_contenders_.load(std::memory_order_relaxed)); 325 #else 326 CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr)); 327 #endif 328 } 329 330 // Helper to allow checking shutdown while locking for thread safety. 331 static bool IsSafeToCallAbortSafe() { 332 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); 333 return Locks::IsSafeToCallAbortRacy(); 334 } 335 336 Mutex::~Mutex() { 337 bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy(); 338 #if ART_USE_FUTEXES 339 if (state_and_contenders_.load(std::memory_order_relaxed) != 0) { 340 LOG(safe_to_call_abort ? FATAL : WARNING) 341 << "destroying mutex with owner or contenders. Owner:" << GetExclusiveOwnerTid(); 342 } else { 343 if (GetExclusiveOwnerTid() != 0) { 344 LOG(safe_to_call_abort ? FATAL : WARNING) 345 << "unexpectedly found an owner on unlocked mutex " << name_; 346 } 347 } 348 #else 349 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread 350 // may still be using locks. 351 int rc = pthread_mutex_destroy(&mutex_); 352 if (rc != 0) { 353 errno = rc; 354 PLOG(safe_to_call_abort ? FATAL : WARNING) 355 << "pthread_mutex_destroy failed for " << name_; 356 } 357 #endif 358 } 359 360 void Mutex::ExclusiveLock(Thread* self) { 361 DCHECK(self == nullptr || self == Thread::Current()); 362 if (kDebugLocking && !recursive_) { 363 AssertNotHeld(self); 364 } 365 if (!recursive_ || !IsExclusiveHeld(self)) { 366 #if ART_USE_FUTEXES 367 bool done = false; 368 do { 369 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed); 370 if (LIKELY((cur_state & kHeldMask) == 0) /* lock not held */) { 371 done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask); 372 } else { 373 // Failed to acquire, hang up. 374 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 375 // Increment contender count. We can't create enough threads for this to overflow. 376 increment_contenders(); 377 // Make cur_state again reflect the expected value of state_and_contenders. 378 cur_state += kContenderIncrement; 379 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { 380 self->CheckEmptyCheckpointFromMutex(); 381 } 382 if (futex(state_and_contenders_.Address(), FUTEX_WAIT_PRIVATE, cur_state, 383 nullptr, nullptr, 0) != 0) { 384 // We only went to sleep after incrementing and contenders and checking that the lock 385 // is still held by someone else. 386 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. 387 // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. 388 if ((errno != EAGAIN) && (errno != EINTR)) { 389 PLOG(FATAL) << "futex wait failed for " << name_; 390 } 391 } 392 decrement_contenders(); 393 } 394 } while (!done); 395 // Confirm that lock is now held. 396 DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0); 397 #else 398 CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); 399 #endif 400 DCHECK_EQ(GetExclusiveOwnerTid(), 0); 401 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); 402 RegisterAsLocked(self); 403 } 404 recursion_count_++; 405 if (kDebugLocking) { 406 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " 407 << name_ << " " << recursion_count_; 408 AssertHeld(self); 409 } 410 } 411 412 bool Mutex::ExclusiveTryLock(Thread* self) { 413 DCHECK(self == nullptr || self == Thread::Current()); 414 if (kDebugLocking && !recursive_) { 415 AssertNotHeld(self); 416 } 417 if (!recursive_ || !IsExclusiveHeld(self)) { 418 #if ART_USE_FUTEXES 419 bool done = false; 420 do { 421 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed); 422 if ((cur_state & kHeldMask) == 0) { 423 // Change state to held and impose load/store ordering appropriate for lock acquisition. 424 done = state_and_contenders_.CompareAndSetWeakAcquire(cur_state, cur_state | kHeldMask); 425 } else { 426 return false; 427 } 428 } while (!done); 429 DCHECK_NE(state_and_contenders_.load(std::memory_order_relaxed) & kHeldMask, 0); 430 #else 431 int result = pthread_mutex_trylock(&mutex_); 432 if (result == EBUSY) { 433 return false; 434 } 435 if (result != 0) { 436 errno = result; 437 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; 438 } 439 #endif 440 DCHECK_EQ(GetExclusiveOwnerTid(), 0); 441 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); 442 RegisterAsLocked(self); 443 } 444 recursion_count_++; 445 if (kDebugLocking) { 446 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " 447 << name_ << " " << recursion_count_; 448 AssertHeld(self); 449 } 450 return true; 451 } 452 453 void Mutex::ExclusiveUnlock(Thread* self) { 454 if (kIsDebugBuild && self != nullptr && self != Thread::Current()) { 455 std::string name1 = "<null>"; 456 std::string name2 = "<null>"; 457 if (self != nullptr) { 458 self->GetThreadName(name1); 459 } 460 if (Thread::Current() != nullptr) { 461 Thread::Current()->GetThreadName(name2); 462 } 463 LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1 464 << " Thread::Current()=" << name2; 465 } 466 AssertHeld(self); 467 DCHECK_NE(GetExclusiveOwnerTid(), 0); 468 recursion_count_--; 469 if (!recursive_ || recursion_count_ == 0) { 470 if (kDebugLocking) { 471 CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: " 472 << name_ << " " << recursion_count_; 473 } 474 RegisterAsUnlocked(self); 475 #if ART_USE_FUTEXES 476 bool done = false; 477 do { 478 int32_t cur_state = state_and_contenders_.load(std::memory_order_relaxed); 479 if (LIKELY((cur_state & kHeldMask) != 0)) { 480 // We're no longer the owner. 481 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); 482 // Change state to not held and impose load/store ordering appropriate for lock release. 483 uint32_t new_state = cur_state & ~kHeldMask; // Same number of contenders. 484 done = state_and_contenders_.CompareAndSetWeakRelease(cur_state, new_state); 485 if (LIKELY(done)) { // Spurious fail or waiters changed ? 486 if (UNLIKELY(new_state != 0) /* have contenders */) { 487 futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeOne, 488 nullptr, nullptr, 0); 489 } 490 // We only do a futex wait after incrementing contenders and verifying the lock was 491 // still held. If we didn't see waiters, then there couldn't have been any futexes 492 // waiting on this lock when we did the CAS. New arrivals after that cannot wait for us, 493 // since the futex wait call would see the lock available and immediately return. 494 } 495 } else { 496 // Logging acquires the logging lock, avoid infinite recursion in that case. 497 if (this != Locks::logging_lock_) { 498 LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_; 499 } else { 500 LogHelper::LogLineLowStack(__FILE__, 501 __LINE__, 502 ::android::base::FATAL_WITHOUT_ABORT, 503 StringPrintf("Unexpected state_ %d in unlock for %s", 504 cur_state, name_).c_str()); 505 _exit(1); 506 } 507 } 508 } while (!done); 509 #else 510 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); 511 CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); 512 #endif 513 } 514 } 515 516 void Mutex::Dump(std::ostream& os) const { 517 os << (recursive_ ? "recursive " : "non-recursive ") 518 << name_ 519 << " level=" << static_cast<int>(level_) 520 << " rec=" << recursion_count_ 521 << " owner=" << GetExclusiveOwnerTid() << " "; 522 DumpContention(os); 523 } 524 525 std::ostream& operator<<(std::ostream& os, const Mutex& mu) { 526 mu.Dump(os); 527 return os; 528 } 529 530 void Mutex::WakeupToRespondToEmptyCheckpoint() { 531 #if ART_USE_FUTEXES 532 // Wake up all the waiters so they will respond to the emtpy checkpoint. 533 DCHECK(should_respond_to_empty_checkpoint_request_); 534 if (UNLIKELY(get_contenders() != 0)) { 535 futex(state_and_contenders_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0); 536 } 537 #else 538 LOG(FATAL) << "Non futex case isn't supported."; 539 #endif 540 } 541 542 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) 543 : BaseMutex(name, level) 544 #if ART_USE_FUTEXES 545 , state_(0), num_pending_readers_(0), num_pending_writers_(0) 546 #endif 547 { 548 #if !ART_USE_FUTEXES 549 CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr)); 550 #endif 551 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); 552 } 553 554 ReaderWriterMutex::~ReaderWriterMutex() { 555 #if ART_USE_FUTEXES 556 CHECK_EQ(state_.load(std::memory_order_relaxed), 0); 557 CHECK_EQ(GetExclusiveOwnerTid(), 0); 558 CHECK_EQ(num_pending_readers_.load(std::memory_order_relaxed), 0); 559 CHECK_EQ(num_pending_writers_.load(std::memory_order_relaxed), 0); 560 #else 561 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread 562 // may still be using locks. 563 int rc = pthread_rwlock_destroy(&rwlock_); 564 if (rc != 0) { 565 errno = rc; 566 bool is_safe_to_call_abort = IsSafeToCallAbortSafe(); 567 PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_rwlock_destroy failed for " << name_; 568 } 569 #endif 570 } 571 572 void ReaderWriterMutex::ExclusiveLock(Thread* self) { 573 DCHECK(self == nullptr || self == Thread::Current()); 574 AssertNotExclusiveHeld(self); 575 #if ART_USE_FUTEXES 576 bool done = false; 577 do { 578 int32_t cur_state = state_.load(std::memory_order_relaxed); 579 if (LIKELY(cur_state == 0)) { 580 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. 581 done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */); 582 } else { 583 // Failed to acquire, hang up. 584 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 585 ++num_pending_writers_; 586 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { 587 self->CheckEmptyCheckpointFromMutex(); 588 } 589 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { 590 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. 591 // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. 592 if ((errno != EAGAIN) && (errno != EINTR)) { 593 PLOG(FATAL) << "futex wait failed for " << name_; 594 } 595 } 596 --num_pending_writers_; 597 } 598 } while (!done); 599 DCHECK_EQ(state_.load(std::memory_order_relaxed), -1); 600 #else 601 CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_)); 602 #endif 603 DCHECK_EQ(GetExclusiveOwnerTid(), 0); 604 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); 605 RegisterAsLocked(self); 606 AssertExclusiveHeld(self); 607 } 608 609 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { 610 DCHECK(self == nullptr || self == Thread::Current()); 611 AssertExclusiveHeld(self); 612 RegisterAsUnlocked(self); 613 DCHECK_NE(GetExclusiveOwnerTid(), 0); 614 #if ART_USE_FUTEXES 615 bool done = false; 616 do { 617 int32_t cur_state = state_.load(std::memory_order_relaxed); 618 if (LIKELY(cur_state == -1)) { 619 // We're no longer the owner. 620 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); 621 // Change state from -1 to 0 and impose load/store ordering appropriate for lock release. 622 // Note, the relaxed loads below musn't reorder before the CompareAndSet. 623 // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing 624 // a status bit into the state on contention. 625 done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */); 626 if (LIKELY(done)) { // Weak CAS may fail spuriously. 627 // Wake any waiters. 628 if (UNLIKELY(num_pending_readers_.load(std::memory_order_seq_cst) > 0 || 629 num_pending_writers_.load(std::memory_order_seq_cst) > 0)) { 630 futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0); 631 } 632 } 633 } else { 634 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; 635 } 636 } while (!done); 637 #else 638 exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); 639 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); 640 #endif 641 } 642 643 #if HAVE_TIMED_RWLOCK 644 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) { 645 DCHECK(self == nullptr || self == Thread::Current()); 646 #if ART_USE_FUTEXES 647 bool done = false; 648 timespec end_abs_ts; 649 InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts); 650 do { 651 int32_t cur_state = state_.load(std::memory_order_relaxed); 652 if (cur_state == 0) { 653 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. 654 done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */); 655 } else { 656 // Failed to acquire, hang up. 657 timespec now_abs_ts; 658 InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts); 659 timespec rel_ts; 660 if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) { 661 return false; // Timed out. 662 } 663 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 664 ++num_pending_writers_; 665 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { 666 self->CheckEmptyCheckpointFromMutex(); 667 } 668 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) { 669 if (errno == ETIMEDOUT) { 670 --num_pending_writers_; 671 return false; // Timed out. 672 } else if ((errno != EAGAIN) && (errno != EINTR)) { 673 // EAGAIN and EINTR both indicate a spurious failure, 674 // recompute the relative time out from now and try again. 675 // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts; 676 PLOG(FATAL) << "timed futex wait failed for " << name_; 677 } 678 } 679 --num_pending_writers_; 680 } 681 } while (!done); 682 #else 683 timespec ts; 684 InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts); 685 int result = pthread_rwlock_timedwrlock(&rwlock_, &ts); 686 if (result == ETIMEDOUT) { 687 return false; 688 } 689 if (result != 0) { 690 errno = result; 691 PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_; 692 } 693 #endif 694 exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); 695 RegisterAsLocked(self); 696 AssertSharedHeld(self); 697 return true; 698 } 699 #endif 700 701 #if ART_USE_FUTEXES 702 void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) { 703 // Owner holds it exclusively, hang up. 704 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 705 ++num_pending_readers_; 706 if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { 707 self->CheckEmptyCheckpointFromMutex(); 708 } 709 if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { 710 if (errno != EAGAIN && errno != EINTR) { 711 PLOG(FATAL) << "futex wait failed for " << name_; 712 } 713 } 714 --num_pending_readers_; 715 } 716 #endif 717 718 bool ReaderWriterMutex::SharedTryLock(Thread* self) { 719 DCHECK(self == nullptr || self == Thread::Current()); 720 #if ART_USE_FUTEXES 721 bool done = false; 722 do { 723 int32_t cur_state = state_.load(std::memory_order_relaxed); 724 if (cur_state >= 0) { 725 // Add as an extra reader and impose load/store ordering appropriate for lock acquisition. 726 done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1); 727 } else { 728 // Owner holds it exclusively. 729 return false; 730 } 731 } while (!done); 732 #else 733 int result = pthread_rwlock_tryrdlock(&rwlock_); 734 if (result == EBUSY) { 735 return false; 736 } 737 if (result != 0) { 738 errno = result; 739 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; 740 } 741 #endif 742 RegisterAsLocked(self); 743 AssertSharedHeld(self); 744 return true; 745 } 746 747 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const { 748 DCHECK(self == nullptr || self == Thread::Current()); 749 bool result; 750 if (UNLIKELY(self == nullptr)) { // Handle unattached threads. 751 result = IsExclusiveHeld(self); // TODO: a better best effort here. 752 } else { 753 result = (self->GetHeldMutex(level_) == this); 754 } 755 return result; 756 } 757 758 void ReaderWriterMutex::Dump(std::ostream& os) const { 759 os << name_ 760 << " level=" << static_cast<int>(level_) 761 << " owner=" << GetExclusiveOwnerTid() 762 #if ART_USE_FUTEXES 763 << " state=" << state_.load(std::memory_order_seq_cst) 764 << " num_pending_writers=" << num_pending_writers_.load(std::memory_order_seq_cst) 765 << " num_pending_readers=" << num_pending_readers_.load(std::memory_order_seq_cst) 766 #endif 767 << " "; 768 DumpContention(os); 769 } 770 771 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) { 772 mu.Dump(os); 773 return os; 774 } 775 776 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) { 777 mu.Dump(os); 778 return os; 779 } 780 781 void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() { 782 #if ART_USE_FUTEXES 783 // Wake up all the waiters so they will respond to the emtpy checkpoint. 784 DCHECK(should_respond_to_empty_checkpoint_request_); 785 if (UNLIKELY(num_pending_readers_.load(std::memory_order_relaxed) > 0 || 786 num_pending_writers_.load(std::memory_order_relaxed) > 0)) { 787 futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0); 788 } 789 #else 790 LOG(FATAL) << "Non futex case isn't supported."; 791 #endif 792 } 793 794 ConditionVariable::ConditionVariable(const char* name, Mutex& guard) 795 : name_(name), guard_(guard) { 796 #if ART_USE_FUTEXES 797 DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed)); 798 num_waiters_ = 0; 799 #else 800 pthread_condattr_t cond_attrs; 801 CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs)); 802 #if !defined(__APPLE__) 803 // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock. 804 CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC)); 805 #endif 806 CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs)); 807 #endif 808 } 809 810 ConditionVariable::~ConditionVariable() { 811 #if ART_USE_FUTEXES 812 if (num_waiters_!= 0) { 813 bool is_safe_to_call_abort = IsSafeToCallAbortSafe(); 814 LOG(is_safe_to_call_abort ? FATAL : WARNING) 815 << "ConditionVariable::~ConditionVariable for " << name_ 816 << " called with " << num_waiters_ << " waiters."; 817 } 818 #else 819 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread 820 // may still be using condition variables. 821 int rc = pthread_cond_destroy(&cond_); 822 if (rc != 0) { 823 errno = rc; 824 bool is_safe_to_call_abort = IsSafeToCallAbortSafe(); 825 PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_cond_destroy failed for " << name_; 826 } 827 #endif 828 } 829 830 void ConditionVariable::Broadcast(Thread* self) { 831 DCHECK(self == nullptr || self == Thread::Current()); 832 // TODO: enable below, there's a race in thread creation that causes false failures currently. 833 // guard_.AssertExclusiveHeld(self); 834 DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self)); 835 #if ART_USE_FUTEXES 836 RequeueWaiters(std::numeric_limits<int32_t>::max()); 837 #else 838 CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_)); 839 #endif 840 } 841 842 #if ART_USE_FUTEXES 843 void ConditionVariable::RequeueWaiters(int32_t count) { 844 if (num_waiters_ > 0) { 845 sequence_++; // Indicate a signal occurred. 846 // Move waiters from the condition variable's futex to the guard's futex, 847 // so that they will be woken up when the mutex is released. 848 bool done = futex(sequence_.Address(), 849 FUTEX_REQUEUE_PRIVATE, 850 /* Threads to wake */ 0, 851 /* Threads to requeue*/ reinterpret_cast<const timespec*>(count), 852 guard_.state_and_contenders_.Address(), 853 0) != -1; 854 if (!done && errno != EAGAIN && errno != EINTR) { 855 PLOG(FATAL) << "futex requeue failed for " << name_; 856 } 857 } 858 } 859 #endif 860 861 862 void ConditionVariable::Signal(Thread* self) { 863 DCHECK(self == nullptr || self == Thread::Current()); 864 guard_.AssertExclusiveHeld(self); 865 #if ART_USE_FUTEXES 866 RequeueWaiters(1); 867 #else 868 CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_)); 869 #endif 870 } 871 872 void ConditionVariable::Wait(Thread* self) { 873 guard_.CheckSafeToWait(self); 874 WaitHoldingLocks(self); 875 } 876 877 void ConditionVariable::WaitHoldingLocks(Thread* self) { 878 DCHECK(self == nullptr || self == Thread::Current()); 879 guard_.AssertExclusiveHeld(self); 880 unsigned int old_recursion_count = guard_.recursion_count_; 881 #if ART_USE_FUTEXES 882 num_waiters_++; 883 // Ensure the Mutex is contended so that requeued threads are awoken. 884 guard_.increment_contenders(); 885 guard_.recursion_count_ = 1; 886 int32_t cur_sequence = sequence_.load(std::memory_order_relaxed); 887 guard_.ExclusiveUnlock(self); 888 if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) { 889 // Futex failed, check it is an expected error. 890 // EAGAIN == EWOULDBLK, so we let the caller try again. 891 // EINTR implies a signal was sent to this thread. 892 if ((errno != EINTR) && (errno != EAGAIN)) { 893 PLOG(FATAL) << "futex wait failed for " << name_; 894 } 895 } 896 if (self != nullptr) { 897 JNIEnvExt* const env = self->GetJniEnv(); 898 if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) { 899 CHECK(self->IsDaemon()); 900 // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may 901 // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with 902 // --host and --gdb. 903 // After we wake up, the runtime may have been shutdown, which means that this condition may 904 // have been deleted. It is not safe to retry the wait. 905 SleepForever(); 906 } 907 } 908 guard_.ExclusiveLock(self); 909 CHECK_GT(num_waiters_, 0); 910 num_waiters_--; 911 // We awoke and so no longer require awakes from the guard_'s unlock. 912 CHECK_GT(guard_.get_contenders(), 0); 913 guard_.decrement_contenders(); 914 #else 915 pid_t old_owner = guard_.GetExclusiveOwnerTid(); 916 guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); 917 guard_.recursion_count_ = 0; 918 CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_)); 919 guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed); 920 #endif 921 guard_.recursion_count_ = old_recursion_count; 922 } 923 924 bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { 925 DCHECK(self == nullptr || self == Thread::Current()); 926 bool timed_out = false; 927 guard_.AssertExclusiveHeld(self); 928 guard_.CheckSafeToWait(self); 929 unsigned int old_recursion_count = guard_.recursion_count_; 930 #if ART_USE_FUTEXES 931 timespec rel_ts; 932 InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts); 933 num_waiters_++; 934 // Ensure the Mutex is contended so that requeued threads are awoken. 935 guard_.increment_contenders(); 936 guard_.recursion_count_ = 1; 937 int32_t cur_sequence = sequence_.load(std::memory_order_relaxed); 938 guard_.ExclusiveUnlock(self); 939 if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) { 940 if (errno == ETIMEDOUT) { 941 // Timed out we're done. 942 timed_out = true; 943 } else if ((errno == EAGAIN) || (errno == EINTR)) { 944 // A signal or ConditionVariable::Signal/Broadcast has come in. 945 } else { 946 PLOG(FATAL) << "timed futex wait failed for " << name_; 947 } 948 } 949 guard_.ExclusiveLock(self); 950 CHECK_GT(num_waiters_, 0); 951 num_waiters_--; 952 // We awoke and so no longer require awakes from the guard_'s unlock. 953 CHECK_GT(guard_.get_contenders(), 0); 954 guard_.decrement_contenders(); 955 #else 956 #if !defined(__APPLE__) 957 int clock = CLOCK_MONOTONIC; 958 #else 959 int clock = CLOCK_REALTIME; 960 #endif 961 pid_t old_owner = guard_.GetExclusiveOwnerTid(); 962 guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); 963 guard_.recursion_count_ = 0; 964 timespec ts; 965 InitTimeSpec(true, clock, ms, ns, &ts); 966 int rc; 967 while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) { 968 continue; 969 } 970 971 if (rc == ETIMEDOUT) { 972 timed_out = true; 973 } else if (rc != 0) { 974 errno = rc; 975 PLOG(FATAL) << "TimedWait failed for " << name_; 976 } 977 guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed); 978 #endif 979 guard_.recursion_count_ = old_recursion_count; 980 return timed_out; 981 } 982 983 } // namespace art 984