1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "mutex.h" 18 19 #include <errno.h> 20 #include <sys/time.h> 21 22 #include "atomic.h" 23 #include "base/logging.h" 24 #include "mutex-inl.h" 25 #include "runtime.h" 26 #include "scoped_thread_state_change.h" 27 #include "thread-inl.h" 28 #include "utils.h" 29 30 namespace art { 31 32 Mutex* Locks::abort_lock_ = nullptr; 33 Mutex* Locks::alloc_tracker_lock_ = nullptr; 34 Mutex* Locks::allocated_monitor_ids_lock_ = nullptr; 35 Mutex* Locks::allocated_thread_ids_lock_ = nullptr; 36 ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr; 37 ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr; 38 Mutex* Locks::deoptimization_lock_ = nullptr; 39 ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr; 40 Mutex* Locks::instrument_entrypoints_lock_ = nullptr; 41 Mutex* Locks::intern_table_lock_ = nullptr; 42 Mutex* Locks::logging_lock_ = nullptr; 43 Mutex* Locks::mem_maps_lock_ = nullptr; 44 Mutex* Locks::modify_ldt_lock_ = nullptr; 45 ReaderWriterMutex* Locks::mutator_lock_ = nullptr; 46 Mutex* Locks::profiler_lock_ = nullptr; 47 Mutex* Locks::reference_processor_lock_ = nullptr; 48 Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr; 49 Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr; 50 Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr; 51 Mutex* Locks::reference_queue_soft_references_lock_ = nullptr; 52 Mutex* Locks::reference_queue_weak_references_lock_ = nullptr; 53 Mutex* Locks::runtime_shutdown_lock_ = nullptr; 54 Mutex* Locks::thread_list_lock_ = nullptr; 55 Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr; 56 Mutex* Locks::thread_suspend_count_lock_ = nullptr; 57 Mutex* Locks::trace_lock_ = nullptr; 58 Mutex* Locks::unexpected_signal_lock_ = nullptr; 59 60 struct AllMutexData { 61 // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait). 62 Atomic<const BaseMutex*> all_mutexes_guard; 63 // All created mutexes guarded by all_mutexes_guard_. 64 std::set<BaseMutex*>* all_mutexes; 65 AllMutexData() : all_mutexes(NULL) {} 66 }; 67 static struct AllMutexData gAllMutexData[kAllMutexDataSize]; 68 69 #if ART_USE_FUTEXES 70 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) { 71 const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds. 72 result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec; 73 result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec; 74 if (result_ts->tv_nsec < 0) { 75 result_ts->tv_sec--; 76 result_ts->tv_nsec += one_sec; 77 } else if (result_ts->tv_nsec > one_sec) { 78 result_ts->tv_sec++; 79 result_ts->tv_nsec -= one_sec; 80 } 81 return result_ts->tv_sec < 0; 82 } 83 #endif 84 85 class ScopedAllMutexesLock { 86 public: 87 explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) { 88 while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) { 89 NanoSleep(100); 90 } 91 } 92 ~ScopedAllMutexesLock() { 93 while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) { 94 NanoSleep(100); 95 } 96 } 97 private: 98 const BaseMutex* const mutex_; 99 }; 100 101 BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) { 102 if (kLogLockContentions) { 103 ScopedAllMutexesLock mu(this); 104 std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes; 105 if (*all_mutexes_ptr == NULL) { 106 // We leak the global set of all mutexes to avoid ordering issues in global variable 107 // construction/destruction. 108 *all_mutexes_ptr = new std::set<BaseMutex*>(); 109 } 110 (*all_mutexes_ptr)->insert(this); 111 } 112 } 113 114 BaseMutex::~BaseMutex() { 115 if (kLogLockContentions) { 116 ScopedAllMutexesLock mu(this); 117 gAllMutexData->all_mutexes->erase(this); 118 } 119 } 120 121 void BaseMutex::DumpAll(std::ostream& os) { 122 if (kLogLockContentions) { 123 os << "Mutex logging:\n"; 124 ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1)); 125 std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes; 126 if (all_mutexes == NULL) { 127 // No mutexes have been created yet during at startup. 128 return; 129 } 130 typedef std::set<BaseMutex*>::const_iterator It; 131 os << "(Contended)\n"; 132 for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) { 133 BaseMutex* mutex = *it; 134 if (mutex->HasEverContended()) { 135 mutex->Dump(os); 136 os << "\n"; 137 } 138 } 139 os << "(Never contented)\n"; 140 for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) { 141 BaseMutex* mutex = *it; 142 if (!mutex->HasEverContended()) { 143 mutex->Dump(os); 144 os << "\n"; 145 } 146 } 147 } 148 } 149 150 void BaseMutex::CheckSafeToWait(Thread* self) { 151 if (self == NULL) { 152 CheckUnattachedThread(level_); 153 return; 154 } 155 if (kDebugLocking) { 156 CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock) 157 << "Waiting on unacquired mutex: " << name_; 158 bool bad_mutexes_held = false; 159 for (int i = kLockLevelCount - 1; i >= 0; --i) { 160 if (i != level_) { 161 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i)); 162 // We expect waits to happen while holding the thread list suspend thread lock. 163 if (held_mutex != NULL && i != kThreadListSuspendThreadLock) { 164 LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" " 165 << "(level " << LockLevel(i) << ") while performing wait on " 166 << "\"" << name_ << "\" (level " << level_ << ")"; 167 bad_mutexes_held = true; 168 } 169 } 170 } 171 CHECK(!bad_mutexes_held); 172 } 173 } 174 175 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) { 176 if (kLogLockContentions) { 177 // Atomically add value to wait_time. 178 wait_time.FetchAndAddSequentiallyConsistent(value); 179 } 180 } 181 182 void BaseMutex::RecordContention(uint64_t blocked_tid, 183 uint64_t owner_tid, 184 uint64_t nano_time_blocked) { 185 if (kLogLockContentions) { 186 ContentionLogData* data = contention_log_data_; 187 ++(data->contention_count); 188 data->AddToWaitTime(nano_time_blocked); 189 ContentionLogEntry* log = data->contention_log; 190 // This code is intentionally racy as it is only used for diagnostics. 191 uint32_t slot = data->cur_content_log_entry.LoadRelaxed(); 192 if (log[slot].blocked_tid == blocked_tid && 193 log[slot].owner_tid == blocked_tid) { 194 ++log[slot].count; 195 } else { 196 uint32_t new_slot; 197 do { 198 slot = data->cur_content_log_entry.LoadRelaxed(); 199 new_slot = (slot + 1) % kContentionLogSize; 200 } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot)); 201 log[new_slot].blocked_tid = blocked_tid; 202 log[new_slot].owner_tid = owner_tid; 203 log[new_slot].count.StoreRelaxed(1); 204 } 205 } 206 } 207 208 void BaseMutex::DumpContention(std::ostream& os) const { 209 if (kLogLockContentions) { 210 const ContentionLogData* data = contention_log_data_; 211 const ContentionLogEntry* log = data->contention_log; 212 uint64_t wait_time = data->wait_time.LoadRelaxed(); 213 uint32_t contention_count = data->contention_count.LoadRelaxed(); 214 if (contention_count == 0) { 215 os << "never contended"; 216 } else { 217 os << "contended " << contention_count 218 << " total wait of contender " << PrettyDuration(wait_time) 219 << " average " << PrettyDuration(wait_time / contention_count); 220 SafeMap<uint64_t, size_t> most_common_blocker; 221 SafeMap<uint64_t, size_t> most_common_blocked; 222 for (size_t i = 0; i < kContentionLogSize; ++i) { 223 uint64_t blocked_tid = log[i].blocked_tid; 224 uint64_t owner_tid = log[i].owner_tid; 225 uint32_t count = log[i].count.LoadRelaxed(); 226 if (count > 0) { 227 auto it = most_common_blocked.find(blocked_tid); 228 if (it != most_common_blocked.end()) { 229 most_common_blocked.Overwrite(blocked_tid, it->second + count); 230 } else { 231 most_common_blocked.Put(blocked_tid, count); 232 } 233 it = most_common_blocker.find(owner_tid); 234 if (it != most_common_blocker.end()) { 235 most_common_blocker.Overwrite(owner_tid, it->second + count); 236 } else { 237 most_common_blocker.Put(owner_tid, count); 238 } 239 } 240 } 241 uint64_t max_tid = 0; 242 size_t max_tid_count = 0; 243 for (const auto& pair : most_common_blocked) { 244 if (pair.second > max_tid_count) { 245 max_tid = pair.first; 246 max_tid_count = pair.second; 247 } 248 } 249 if (max_tid != 0) { 250 os << " sample shows most blocked tid=" << max_tid; 251 } 252 max_tid = 0; 253 max_tid_count = 0; 254 for (const auto& pair : most_common_blocker) { 255 if (pair.second > max_tid_count) { 256 max_tid = pair.first; 257 max_tid_count = pair.second; 258 } 259 } 260 if (max_tid != 0) { 261 os << " sample shows tid=" << max_tid << " owning during this time"; 262 } 263 } 264 } 265 } 266 267 268 Mutex::Mutex(const char* name, LockLevel level, bool recursive) 269 : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) { 270 #if ART_USE_FUTEXES 271 DCHECK_EQ(0, state_.LoadRelaxed()); 272 DCHECK_EQ(0, num_contenders_.LoadRelaxed()); 273 #else 274 CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr)); 275 #endif 276 exclusive_owner_ = 0; 277 } 278 279 // Helper to ignore the lock requirement. 280 static bool IsShuttingDown() NO_THREAD_SAFETY_ANALYSIS { 281 Runtime* runtime = Runtime::Current(); 282 return runtime == nullptr || runtime->IsShuttingDownLocked(); 283 } 284 285 Mutex::~Mutex() { 286 bool shutting_down = IsShuttingDown(); 287 #if ART_USE_FUTEXES 288 if (state_.LoadRelaxed() != 0) { 289 LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " 290 << exclusive_owner_; 291 } else { 292 if (exclusive_owner_ != 0) { 293 LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found an owner on unlocked mutex " 294 << name_; 295 } 296 if (num_contenders_.LoadSequentiallyConsistent() != 0) { 297 LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found a contender on mutex " << name_; 298 } 299 } 300 #else 301 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread 302 // may still be using locks. 303 int rc = pthread_mutex_destroy(&mutex_); 304 if (rc != 0) { 305 errno = rc; 306 // TODO: should we just not log at all if shutting down? this could be the logging mutex! 307 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); 308 PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_; 309 } 310 #endif 311 } 312 313 void Mutex::ExclusiveLock(Thread* self) { 314 DCHECK(self == NULL || self == Thread::Current()); 315 if (kDebugLocking && !recursive_) { 316 AssertNotHeld(self); 317 } 318 if (!recursive_ || !IsExclusiveHeld(self)) { 319 #if ART_USE_FUTEXES 320 bool done = false; 321 do { 322 int32_t cur_state = state_.LoadRelaxed(); 323 if (LIKELY(cur_state == 0)) { 324 // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. 325 done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */); 326 } else { 327 // Failed to acquire, hang up. 328 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 329 num_contenders_++; 330 if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) { 331 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. 332 // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. 333 if ((errno != EAGAIN) && (errno != EINTR)) { 334 PLOG(FATAL) << "futex wait failed for " << name_; 335 } 336 } 337 num_contenders_--; 338 } 339 } while (!done); 340 DCHECK_EQ(state_.LoadRelaxed(), 1); 341 #else 342 CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); 343 #endif 344 DCHECK_EQ(exclusive_owner_, 0U); 345 exclusive_owner_ = SafeGetTid(self); 346 RegisterAsLocked(self); 347 } 348 recursion_count_++; 349 if (kDebugLocking) { 350 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " 351 << name_ << " " << recursion_count_; 352 AssertHeld(self); 353 } 354 } 355 356 bool Mutex::ExclusiveTryLock(Thread* self) { 357 DCHECK(self == NULL || self == Thread::Current()); 358 if (kDebugLocking && !recursive_) { 359 AssertNotHeld(self); 360 } 361 if (!recursive_ || !IsExclusiveHeld(self)) { 362 #if ART_USE_FUTEXES 363 bool done = false; 364 do { 365 int32_t cur_state = state_.LoadRelaxed(); 366 if (cur_state == 0) { 367 // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. 368 done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */); 369 } else { 370 return false; 371 } 372 } while (!done); 373 DCHECK_EQ(state_.LoadRelaxed(), 1); 374 #else 375 int result = pthread_mutex_trylock(&mutex_); 376 if (result == EBUSY) { 377 return false; 378 } 379 if (result != 0) { 380 errno = result; 381 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; 382 } 383 #endif 384 DCHECK_EQ(exclusive_owner_, 0U); 385 exclusive_owner_ = SafeGetTid(self); 386 RegisterAsLocked(self); 387 } 388 recursion_count_++; 389 if (kDebugLocking) { 390 CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " 391 << name_ << " " << recursion_count_; 392 AssertHeld(self); 393 } 394 return true; 395 } 396 397 void Mutex::ExclusiveUnlock(Thread* self) { 398 DCHECK(self == NULL || self == Thread::Current()); 399 AssertHeld(self); 400 DCHECK_NE(exclusive_owner_, 0U); 401 recursion_count_--; 402 if (!recursive_ || recursion_count_ == 0) { 403 if (kDebugLocking) { 404 CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: " 405 << name_ << " " << recursion_count_; 406 } 407 RegisterAsUnlocked(self); 408 #if ART_USE_FUTEXES 409 bool done = false; 410 do { 411 int32_t cur_state = state_.LoadRelaxed(); 412 if (LIKELY(cur_state == 1)) { 413 // We're no longer the owner. 414 exclusive_owner_ = 0; 415 // Change state to 0 and impose load/store ordering appropriate for lock release. 416 // Note, the relaxed loads below musn't reorder before the CompareExchange. 417 // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing 418 // a status bit into the state on contention. 419 done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */); 420 if (LIKELY(done)) { // Spurious fail? 421 // Wake a contender. 422 if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) { 423 futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0); 424 } 425 } 426 } else { 427 // Logging acquires the logging lock, avoid infinite recursion in that case. 428 if (this != Locks::logging_lock_) { 429 LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_; 430 } else { 431 LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1); 432 LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s", 433 cur_state, name_).c_str()); 434 _exit(1); 435 } 436 } 437 } while (!done); 438 #else 439 exclusive_owner_ = 0; 440 CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); 441 #endif 442 } 443 } 444 445 void Mutex::Dump(std::ostream& os) const { 446 os << (recursive_ ? "recursive " : "non-recursive ") 447 << name_ 448 << " level=" << static_cast<int>(level_) 449 << " rec=" << recursion_count_ 450 << " owner=" << GetExclusiveOwnerTid() << " "; 451 DumpContention(os); 452 } 453 454 std::ostream& operator<<(std::ostream& os, const Mutex& mu) { 455 mu.Dump(os); 456 return os; 457 } 458 459 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) 460 : BaseMutex(name, level) 461 #if ART_USE_FUTEXES 462 , state_(0), num_pending_readers_(0), num_pending_writers_(0) 463 #endif 464 { // NOLINT(whitespace/braces) 465 #if !ART_USE_FUTEXES 466 CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr)); 467 #endif 468 exclusive_owner_ = 0; 469 } 470 471 ReaderWriterMutex::~ReaderWriterMutex() { 472 #if ART_USE_FUTEXES 473 CHECK_EQ(state_.LoadRelaxed(), 0); 474 CHECK_EQ(exclusive_owner_, 0U); 475 CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0); 476 CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0); 477 #else 478 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread 479 // may still be using locks. 480 int rc = pthread_rwlock_destroy(&rwlock_); 481 if (rc != 0) { 482 errno = rc; 483 // TODO: should we just not log at all if shutting down? this could be the logging mutex! 484 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); 485 Runtime* runtime = Runtime::Current(); 486 bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked(); 487 PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_; 488 } 489 #endif 490 } 491 492 void ReaderWriterMutex::ExclusiveLock(Thread* self) { 493 DCHECK(self == NULL || self == Thread::Current()); 494 AssertNotExclusiveHeld(self); 495 #if ART_USE_FUTEXES 496 bool done = false; 497 do { 498 int32_t cur_state = state_.LoadRelaxed(); 499 if (LIKELY(cur_state == 0)) { 500 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. 501 done = state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */); 502 } else { 503 // Failed to acquire, hang up. 504 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 505 ++num_pending_writers_; 506 if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) { 507 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. 508 // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. 509 if ((errno != EAGAIN) && (errno != EINTR)) { 510 PLOG(FATAL) << "futex wait failed for " << name_; 511 } 512 } 513 --num_pending_writers_; 514 } 515 } while (!done); 516 DCHECK_EQ(state_.LoadRelaxed(), -1); 517 #else 518 CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_)); 519 #endif 520 DCHECK_EQ(exclusive_owner_, 0U); 521 exclusive_owner_ = SafeGetTid(self); 522 RegisterAsLocked(self); 523 AssertExclusiveHeld(self); 524 } 525 526 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { 527 DCHECK(self == NULL || self == Thread::Current()); 528 AssertExclusiveHeld(self); 529 RegisterAsUnlocked(self); 530 DCHECK_NE(exclusive_owner_, 0U); 531 #if ART_USE_FUTEXES 532 bool done = false; 533 do { 534 int32_t cur_state = state_.LoadRelaxed(); 535 if (LIKELY(cur_state == -1)) { 536 // We're no longer the owner. 537 exclusive_owner_ = 0; 538 // Change state from -1 to 0 and impose load/store ordering appropriate for lock release. 539 // Note, the relaxed loads below musn't reorder before the CompareExchange. 540 // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing 541 // a status bit into the state on contention. 542 done = state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */); 543 if (LIKELY(done)) { // Weak CAS may fail spuriously. 544 // Wake any waiters. 545 if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 || 546 num_pending_writers_.LoadRelaxed() > 0)) { 547 futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0); 548 } 549 } 550 } else { 551 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; 552 } 553 } while (!done); 554 #else 555 exclusive_owner_ = 0; 556 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); 557 #endif 558 } 559 560 #if HAVE_TIMED_RWLOCK 561 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) { 562 DCHECK(self == NULL || self == Thread::Current()); 563 #if ART_USE_FUTEXES 564 bool done = false; 565 timespec end_abs_ts; 566 InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts); 567 do { 568 int32_t cur_state = state_.LoadRelaxed(); 569 if (cur_state == 0) { 570 // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. 571 done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */); 572 } else { 573 // Failed to acquire, hang up. 574 timespec now_abs_ts; 575 InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts); 576 timespec rel_ts; 577 if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) { 578 return false; // Timed out. 579 } 580 ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); 581 ++num_pending_writers_; 582 if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) { 583 if (errno == ETIMEDOUT) { 584 --num_pending_writers_; 585 return false; // Timed out. 586 } else if ((errno != EAGAIN) && (errno != EINTR)) { 587 // EAGAIN and EINTR both indicate a spurious failure, 588 // recompute the relative time out from now and try again. 589 // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts; 590 PLOG(FATAL) << "timed futex wait failed for " << name_; 591 } 592 } 593 --num_pending_writers_; 594 } 595 } while (!done); 596 #else 597 timespec ts; 598 InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts); 599 int result = pthread_rwlock_timedwrlock(&rwlock_, &ts); 600 if (result == ETIMEDOUT) { 601 return false; 602 } 603 if (result != 0) { 604 errno = result; 605 PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_; 606 } 607 #endif 608 exclusive_owner_ = SafeGetTid(self); 609 RegisterAsLocked(self); 610 AssertSharedHeld(self); 611 return true; 612 } 613 #endif 614 615 bool ReaderWriterMutex::SharedTryLock(Thread* self) { 616 DCHECK(self == NULL || self == Thread::Current()); 617 #if ART_USE_FUTEXES 618 bool done = false; 619 do { 620 int32_t cur_state = state_.LoadRelaxed(); 621 if (cur_state >= 0) { 622 // Add as an extra reader and impose load/store ordering appropriate for lock acquisition. 623 done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1); 624 } else { 625 // Owner holds it exclusively. 626 return false; 627 } 628 } while (!done); 629 #else 630 int result = pthread_rwlock_tryrdlock(&rwlock_); 631 if (result == EBUSY) { 632 return false; 633 } 634 if (result != 0) { 635 errno = result; 636 PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; 637 } 638 #endif 639 RegisterAsLocked(self); 640 AssertSharedHeld(self); 641 return true; 642 } 643 644 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const { 645 DCHECK(self == NULL || self == Thread::Current()); 646 bool result; 647 if (UNLIKELY(self == NULL)) { // Handle unattached threads. 648 result = IsExclusiveHeld(self); // TODO: a better best effort here. 649 } else { 650 result = (self->GetHeldMutex(level_) == this); 651 } 652 return result; 653 } 654 655 void ReaderWriterMutex::Dump(std::ostream& os) const { 656 os << name_ 657 << " level=" << static_cast<int>(level_) 658 << " owner=" << GetExclusiveOwnerTid() 659 #if ART_USE_FUTEXES 660 << " state=" << state_.LoadSequentiallyConsistent() 661 << " num_pending_writers=" << num_pending_writers_.LoadSequentiallyConsistent() 662 << " num_pending_readers=" << num_pending_readers_.LoadSequentiallyConsistent() 663 #endif 664 << " "; 665 DumpContention(os); 666 } 667 668 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) { 669 mu.Dump(os); 670 return os; 671 } 672 673 ConditionVariable::ConditionVariable(const char* name, Mutex& guard) 674 : name_(name), guard_(guard) { 675 #if ART_USE_FUTEXES 676 DCHECK_EQ(0, sequence_.LoadRelaxed()); 677 num_waiters_ = 0; 678 #else 679 pthread_condattr_t cond_attrs; 680 CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs)); 681 #if !defined(__APPLE__) 682 // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock. 683 CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC)); 684 #endif 685 CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs)); 686 #endif 687 } 688 689 ConditionVariable::~ConditionVariable() { 690 #if ART_USE_FUTEXES 691 if (num_waiters_!= 0) { 692 Runtime* runtime = Runtime::Current(); 693 bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current()); 694 LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_ 695 << " called with " << num_waiters_ << " waiters."; 696 } 697 #else 698 // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread 699 // may still be using condition variables. 700 int rc = pthread_cond_destroy(&cond_); 701 if (rc != 0) { 702 errno = rc; 703 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); 704 Runtime* runtime = Runtime::Current(); 705 bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked(); 706 PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_; 707 } 708 #endif 709 } 710 711 void ConditionVariable::Broadcast(Thread* self) { 712 DCHECK(self == NULL || self == Thread::Current()); 713 // TODO: enable below, there's a race in thread creation that causes false failures currently. 714 // guard_.AssertExclusiveHeld(self); 715 DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self)); 716 #if ART_USE_FUTEXES 717 if (num_waiters_ > 0) { 718 sequence_++; // Indicate the broadcast occurred. 719 bool done = false; 720 do { 721 int32_t cur_sequence = sequence_.LoadRelaxed(); 722 // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring 723 // mutex unlocks will awaken the requeued waiter thread. 724 done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0, 725 reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()), 726 guard_.state_.Address(), cur_sequence) != -1; 727 if (!done) { 728 if (errno != EAGAIN) { 729 PLOG(FATAL) << "futex cmp requeue failed for " << name_; 730 } 731 } 732 } while (!done); 733 } 734 #else 735 CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_)); 736 #endif 737 } 738 739 void ConditionVariable::Signal(Thread* self) { 740 DCHECK(self == NULL || self == Thread::Current()); 741 guard_.AssertExclusiveHeld(self); 742 #if ART_USE_FUTEXES 743 if (num_waiters_ > 0) { 744 sequence_++; // Indicate a signal occurred. 745 // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them 746 // to avoid this, however, requeueing can only move all waiters. 747 int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0); 748 // Check something was woken or else we changed sequence_ before they had chance to wait. 749 CHECK((num_woken == 0) || (num_woken == 1)); 750 } 751 #else 752 CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_)); 753 #endif 754 } 755 756 void ConditionVariable::Wait(Thread* self) { 757 guard_.CheckSafeToWait(self); 758 WaitHoldingLocks(self); 759 } 760 761 void ConditionVariable::WaitHoldingLocks(Thread* self) { 762 DCHECK(self == NULL || self == Thread::Current()); 763 guard_.AssertExclusiveHeld(self); 764 unsigned int old_recursion_count = guard_.recursion_count_; 765 #if ART_USE_FUTEXES 766 num_waiters_++; 767 // Ensure the Mutex is contended so that requeued threads are awoken. 768 guard_.num_contenders_++; 769 guard_.recursion_count_ = 1; 770 int32_t cur_sequence = sequence_.LoadRelaxed(); 771 guard_.ExclusiveUnlock(self); 772 if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) { 773 // Futex failed, check it is an expected error. 774 // EAGAIN == EWOULDBLK, so we let the caller try again. 775 // EINTR implies a signal was sent to this thread. 776 if ((errno != EINTR) && (errno != EAGAIN)) { 777 PLOG(FATAL) << "futex wait failed for " << name_; 778 } 779 } 780 guard_.ExclusiveLock(self); 781 CHECK_GE(num_waiters_, 0); 782 num_waiters_--; 783 // We awoke and so no longer require awakes from the guard_'s unlock. 784 CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0); 785 guard_.num_contenders_--; 786 #else 787 uint64_t old_owner = guard_.exclusive_owner_; 788 guard_.exclusive_owner_ = 0; 789 guard_.recursion_count_ = 0; 790 CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_)); 791 guard_.exclusive_owner_ = old_owner; 792 #endif 793 guard_.recursion_count_ = old_recursion_count; 794 } 795 796 void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { 797 DCHECK(self == NULL || self == Thread::Current()); 798 guard_.AssertExclusiveHeld(self); 799 guard_.CheckSafeToWait(self); 800 unsigned int old_recursion_count = guard_.recursion_count_; 801 #if ART_USE_FUTEXES 802 timespec rel_ts; 803 InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts); 804 num_waiters_++; 805 // Ensure the Mutex is contended so that requeued threads are awoken. 806 guard_.num_contenders_++; 807 guard_.recursion_count_ = 1; 808 int32_t cur_sequence = sequence_.LoadRelaxed(); 809 guard_.ExclusiveUnlock(self); 810 if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) { 811 if (errno == ETIMEDOUT) { 812 // Timed out we're done. 813 } else if ((errno == EAGAIN) || (errno == EINTR)) { 814 // A signal or ConditionVariable::Signal/Broadcast has come in. 815 } else { 816 PLOG(FATAL) << "timed futex wait failed for " << name_; 817 } 818 } 819 guard_.ExclusiveLock(self); 820 CHECK_GE(num_waiters_, 0); 821 num_waiters_--; 822 // We awoke and so no longer require awakes from the guard_'s unlock. 823 CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0); 824 guard_.num_contenders_--; 825 #else 826 #if !defined(__APPLE__) 827 int clock = CLOCK_MONOTONIC; 828 #else 829 int clock = CLOCK_REALTIME; 830 #endif 831 uint64_t old_owner = guard_.exclusive_owner_; 832 guard_.exclusive_owner_ = 0; 833 guard_.recursion_count_ = 0; 834 timespec ts; 835 InitTimeSpec(true, clock, ms, ns, &ts); 836 int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)); 837 if (rc != 0 && rc != ETIMEDOUT) { 838 errno = rc; 839 PLOG(FATAL) << "TimedWait failed for " << name_; 840 } 841 guard_.exclusive_owner_ = old_owner; 842 #endif 843 guard_.recursion_count_ = old_recursion_count; 844 } 845 846 void Locks::Init() { 847 if (logging_lock_ != nullptr) { 848 // Already initialized. 849 if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) { 850 DCHECK(modify_ldt_lock_ != nullptr); 851 } else { 852 DCHECK(modify_ldt_lock_ == nullptr); 853 } 854 DCHECK(abort_lock_ != nullptr); 855 DCHECK(alloc_tracker_lock_ != nullptr); 856 DCHECK(allocated_monitor_ids_lock_ != nullptr); 857 DCHECK(allocated_thread_ids_lock_ != nullptr); 858 DCHECK(breakpoint_lock_ != nullptr); 859 DCHECK(classlinker_classes_lock_ != nullptr); 860 DCHECK(deoptimization_lock_ != nullptr); 861 DCHECK(heap_bitmap_lock_ != nullptr); 862 DCHECK(intern_table_lock_ != nullptr); 863 DCHECK(logging_lock_ != nullptr); 864 DCHECK(mutator_lock_ != nullptr); 865 DCHECK(profiler_lock_ != nullptr); 866 DCHECK(thread_list_lock_ != nullptr); 867 DCHECK(thread_list_suspend_thread_lock_ != nullptr); 868 DCHECK(thread_suspend_count_lock_ != nullptr); 869 DCHECK(trace_lock_ != nullptr); 870 DCHECK(unexpected_signal_lock_ != nullptr); 871 } else { 872 // Create global locks in level order from highest lock level to lowest. 873 LockLevel current_lock_level = kThreadListSuspendThreadLock; 874 DCHECK(thread_list_suspend_thread_lock_ == nullptr); 875 thread_list_suspend_thread_lock_ = 876 new Mutex("thread list suspend thread by .. lock", current_lock_level); 877 878 #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \ 879 if (new_level >= current_lock_level) { \ 880 /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \ 881 fprintf(stderr, "New local level %d is not less than current level %d\n", \ 882 new_level, current_lock_level); \ 883 exit(1); \ 884 } \ 885 current_lock_level = new_level; 886 887 UPDATE_CURRENT_LOCK_LEVEL(kInstrumentEntrypointsLock); 888 DCHECK(instrument_entrypoints_lock_ == nullptr); 889 instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level); 890 891 UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock); 892 DCHECK(mutator_lock_ == nullptr); 893 mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level); 894 895 UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock); 896 DCHECK(heap_bitmap_lock_ == nullptr); 897 heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level); 898 899 UPDATE_CURRENT_LOCK_LEVEL(kTraceLock); 900 DCHECK(trace_lock_ == nullptr); 901 trace_lock_ = new Mutex("trace lock", current_lock_level); 902 903 UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock); 904 DCHECK(runtime_shutdown_lock_ == nullptr); 905 runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level); 906 907 UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock); 908 DCHECK(profiler_lock_ == nullptr); 909 profiler_lock_ = new Mutex("profiler lock", current_lock_level); 910 911 UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock); 912 DCHECK(deoptimization_lock_ == nullptr); 913 deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level); 914 915 UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock); 916 DCHECK(alloc_tracker_lock_ == nullptr); 917 alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level); 918 919 UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock); 920 DCHECK(thread_list_lock_ == nullptr); 921 thread_list_lock_ = new Mutex("thread list lock", current_lock_level); 922 923 UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock); 924 DCHECK(breakpoint_lock_ == nullptr); 925 breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level); 926 927 UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock); 928 DCHECK(classlinker_classes_lock_ == nullptr); 929 classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock", 930 current_lock_level); 931 932 UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock); 933 DCHECK(allocated_monitor_ids_lock_ == nullptr); 934 allocated_monitor_ids_lock_ = new Mutex("allocated monitor ids lock", current_lock_level); 935 936 UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock); 937 DCHECK(allocated_thread_ids_lock_ == nullptr); 938 allocated_thread_ids_lock_ = new Mutex("allocated thread ids lock", current_lock_level); 939 940 if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) { 941 UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock); 942 DCHECK(modify_ldt_lock_ == nullptr); 943 modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level); 944 } 945 946 UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock); 947 DCHECK(intern_table_lock_ == nullptr); 948 intern_table_lock_ = new Mutex("InternTable lock", current_lock_level); 949 950 UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock); 951 DCHECK(reference_processor_lock_ == nullptr); 952 reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level); 953 954 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock); 955 DCHECK(reference_queue_cleared_references_lock_ == nullptr); 956 reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level); 957 958 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock); 959 DCHECK(reference_queue_weak_references_lock_ == nullptr); 960 reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level); 961 962 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock); 963 DCHECK(reference_queue_finalizer_references_lock_ == nullptr); 964 reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level); 965 966 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock); 967 DCHECK(reference_queue_phantom_references_lock_ == nullptr); 968 reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level); 969 970 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock); 971 DCHECK(reference_queue_soft_references_lock_ == nullptr); 972 reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level); 973 974 UPDATE_CURRENT_LOCK_LEVEL(kAbortLock); 975 DCHECK(abort_lock_ == nullptr); 976 abort_lock_ = new Mutex("abort lock", current_lock_level, true); 977 978 UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock); 979 DCHECK(thread_suspend_count_lock_ == nullptr); 980 thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level); 981 982 UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock); 983 DCHECK(unexpected_signal_lock_ == nullptr); 984 unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true); 985 986 UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock); 987 DCHECK(mem_maps_lock_ == nullptr); 988 mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level); 989 990 UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock); 991 DCHECK(logging_lock_ == nullptr); 992 logging_lock_ = new Mutex("logging lock", current_lock_level, true); 993 994 #undef UPDATE_CURRENT_LOCK_LEVEL 995 } 996 } 997 998 999 } // namespace art 1000