Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "mutex.h"
     18 
     19 #include <errno.h>
     20 #include <sys/time.h>
     21 
     22 #include "atomic.h"
     23 #include "base/logging.h"
     24 #include "mutex-inl.h"
     25 #include "runtime.h"
     26 #include "scoped_thread_state_change.h"
     27 #include "thread-inl.h"
     28 #include "utils.h"
     29 
     30 namespace art {
     31 
     32 Mutex* Locks::abort_lock_ = nullptr;
     33 Mutex* Locks::alloc_tracker_lock_ = nullptr;
     34 Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
     35 Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
     36 ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
     37 ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
     38 Mutex* Locks::deoptimization_lock_ = nullptr;
     39 ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
     40 Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
     41 Mutex* Locks::intern_table_lock_ = nullptr;
     42 Mutex* Locks::logging_lock_ = nullptr;
     43 Mutex* Locks::mem_maps_lock_ = nullptr;
     44 Mutex* Locks::modify_ldt_lock_ = nullptr;
     45 ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
     46 Mutex* Locks::profiler_lock_ = nullptr;
     47 Mutex* Locks::reference_processor_lock_ = nullptr;
     48 Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
     49 Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
     50 Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
     51 Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
     52 Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
     53 Mutex* Locks::runtime_shutdown_lock_ = nullptr;
     54 Mutex* Locks::thread_list_lock_ = nullptr;
     55 Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
     56 Mutex* Locks::thread_suspend_count_lock_ = nullptr;
     57 Mutex* Locks::trace_lock_ = nullptr;
     58 Mutex* Locks::unexpected_signal_lock_ = nullptr;
     59 
     60 struct AllMutexData {
     61   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
     62   Atomic<const BaseMutex*> all_mutexes_guard;
     63   // All created mutexes guarded by all_mutexes_guard_.
     64   std::set<BaseMutex*>* all_mutexes;
     65   AllMutexData() : all_mutexes(NULL) {}
     66 };
     67 static struct AllMutexData gAllMutexData[kAllMutexDataSize];
     68 
     69 #if ART_USE_FUTEXES
     70 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
     71   const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
     72   result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
     73   result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
     74   if (result_ts->tv_nsec < 0) {
     75     result_ts->tv_sec--;
     76     result_ts->tv_nsec += one_sec;
     77   } else if (result_ts->tv_nsec > one_sec) {
     78     result_ts->tv_sec++;
     79     result_ts->tv_nsec -= one_sec;
     80   }
     81   return result_ts->tv_sec < 0;
     82 }
     83 #endif
     84 
     85 class ScopedAllMutexesLock {
     86  public:
     87   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
     88     while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
     89       NanoSleep(100);
     90     }
     91   }
     92   ~ScopedAllMutexesLock() {
     93     while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
     94       NanoSleep(100);
     95     }
     96   }
     97  private:
     98   const BaseMutex* const mutex_;
     99 };
    100 
    101 BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
    102   if (kLogLockContentions) {
    103     ScopedAllMutexesLock mu(this);
    104     std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
    105     if (*all_mutexes_ptr == NULL) {
    106       // We leak the global set of all mutexes to avoid ordering issues in global variable
    107       // construction/destruction.
    108       *all_mutexes_ptr = new std::set<BaseMutex*>();
    109     }
    110     (*all_mutexes_ptr)->insert(this);
    111   }
    112 }
    113 
    114 BaseMutex::~BaseMutex() {
    115   if (kLogLockContentions) {
    116     ScopedAllMutexesLock mu(this);
    117     gAllMutexData->all_mutexes->erase(this);
    118   }
    119 }
    120 
    121 void BaseMutex::DumpAll(std::ostream& os) {
    122   if (kLogLockContentions) {
    123     os << "Mutex logging:\n";
    124     ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
    125     std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
    126     if (all_mutexes == NULL) {
    127       // No mutexes have been created yet during at startup.
    128       return;
    129     }
    130     typedef std::set<BaseMutex*>::const_iterator It;
    131     os << "(Contended)\n";
    132     for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
    133       BaseMutex* mutex = *it;
    134       if (mutex->HasEverContended()) {
    135         mutex->Dump(os);
    136         os << "\n";
    137       }
    138     }
    139     os << "(Never contented)\n";
    140     for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
    141       BaseMutex* mutex = *it;
    142       if (!mutex->HasEverContended()) {
    143         mutex->Dump(os);
    144         os << "\n";
    145       }
    146     }
    147   }
    148 }
    149 
    150 void BaseMutex::CheckSafeToWait(Thread* self) {
    151   if (self == NULL) {
    152     CheckUnattachedThread(level_);
    153     return;
    154   }
    155   if (kDebugLocking) {
    156     CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
    157         << "Waiting on unacquired mutex: " << name_;
    158     bool bad_mutexes_held = false;
    159     for (int i = kLockLevelCount - 1; i >= 0; --i) {
    160       if (i != level_) {
    161         BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
    162         // We expect waits to happen while holding the thread list suspend thread lock.
    163         if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
    164           LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
    165                      << "(level " << LockLevel(i) << ") while performing wait on "
    166                      << "\"" << name_ << "\" (level " << level_ << ")";
    167           bad_mutexes_held = true;
    168         }
    169       }
    170     }
    171     CHECK(!bad_mutexes_held);
    172   }
    173 }
    174 
    175 void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
    176   if (kLogLockContentions) {
    177     // Atomically add value to wait_time.
    178     wait_time.FetchAndAddSequentiallyConsistent(value);
    179   }
    180 }
    181 
    182 void BaseMutex::RecordContention(uint64_t blocked_tid,
    183                                  uint64_t owner_tid,
    184                                  uint64_t nano_time_blocked) {
    185   if (kLogLockContentions) {
    186     ContentionLogData* data = contention_log_data_;
    187     ++(data->contention_count);
    188     data->AddToWaitTime(nano_time_blocked);
    189     ContentionLogEntry* log = data->contention_log;
    190     // This code is intentionally racy as it is only used for diagnostics.
    191     uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
    192     if (log[slot].blocked_tid == blocked_tid &&
    193         log[slot].owner_tid == blocked_tid) {
    194       ++log[slot].count;
    195     } else {
    196       uint32_t new_slot;
    197       do {
    198         slot = data->cur_content_log_entry.LoadRelaxed();
    199         new_slot = (slot + 1) % kContentionLogSize;
    200       } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
    201       log[new_slot].blocked_tid = blocked_tid;
    202       log[new_slot].owner_tid = owner_tid;
    203       log[new_slot].count.StoreRelaxed(1);
    204     }
    205   }
    206 }
    207 
    208 void BaseMutex::DumpContention(std::ostream& os) const {
    209   if (kLogLockContentions) {
    210     const ContentionLogData* data = contention_log_data_;
    211     const ContentionLogEntry* log = data->contention_log;
    212     uint64_t wait_time = data->wait_time.LoadRelaxed();
    213     uint32_t contention_count = data->contention_count.LoadRelaxed();
    214     if (contention_count == 0) {
    215       os << "never contended";
    216     } else {
    217       os << "contended " << contention_count
    218          << " total wait of contender " << PrettyDuration(wait_time)
    219          << " average " << PrettyDuration(wait_time / contention_count);
    220       SafeMap<uint64_t, size_t> most_common_blocker;
    221       SafeMap<uint64_t, size_t> most_common_blocked;
    222       for (size_t i = 0; i < kContentionLogSize; ++i) {
    223         uint64_t blocked_tid = log[i].blocked_tid;
    224         uint64_t owner_tid = log[i].owner_tid;
    225         uint32_t count = log[i].count.LoadRelaxed();
    226         if (count > 0) {
    227           auto it = most_common_blocked.find(blocked_tid);
    228           if (it != most_common_blocked.end()) {
    229             most_common_blocked.Overwrite(blocked_tid, it->second + count);
    230           } else {
    231             most_common_blocked.Put(blocked_tid, count);
    232           }
    233           it = most_common_blocker.find(owner_tid);
    234           if (it != most_common_blocker.end()) {
    235             most_common_blocker.Overwrite(owner_tid, it->second + count);
    236           } else {
    237             most_common_blocker.Put(owner_tid, count);
    238           }
    239         }
    240       }
    241       uint64_t max_tid = 0;
    242       size_t max_tid_count = 0;
    243       for (const auto& pair : most_common_blocked) {
    244         if (pair.second > max_tid_count) {
    245           max_tid = pair.first;
    246           max_tid_count = pair.second;
    247         }
    248       }
    249       if (max_tid != 0) {
    250         os << " sample shows most blocked tid=" << max_tid;
    251       }
    252       max_tid = 0;
    253       max_tid_count = 0;
    254       for (const auto& pair : most_common_blocker) {
    255         if (pair.second > max_tid_count) {
    256           max_tid = pair.first;
    257           max_tid_count = pair.second;
    258         }
    259       }
    260       if (max_tid != 0) {
    261         os << " sample shows tid=" << max_tid << " owning during this time";
    262       }
    263     }
    264   }
    265 }
    266 
    267 
    268 Mutex::Mutex(const char* name, LockLevel level, bool recursive)
    269     : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
    270 #if ART_USE_FUTEXES
    271   DCHECK_EQ(0, state_.LoadRelaxed());
    272   DCHECK_EQ(0, num_contenders_.LoadRelaxed());
    273 #else
    274   CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr));
    275 #endif
    276   exclusive_owner_ = 0;
    277 }
    278 
    279 Mutex::~Mutex() {
    280 #if ART_USE_FUTEXES
    281   if (state_.LoadRelaxed() != 0) {
    282     Runtime* runtime = Runtime::Current();
    283     bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
    284     LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
    285   } else {
    286     CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
    287     CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
    288         << "unexpectedly found a contender on mutex " << name_;
    289   }
    290 #else
    291   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
    292   // may still be using locks.
    293   int rc = pthread_mutex_destroy(&mutex_);
    294   if (rc != 0) {
    295     errno = rc;
    296     // TODO: should we just not log at all if shutting down? this could be the logging mutex!
    297     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
    298     Runtime* runtime = Runtime::Current();
    299     bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
    300     PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
    301   }
    302 #endif
    303 }
    304 
    305 void Mutex::ExclusiveLock(Thread* self) {
    306   DCHECK(self == NULL || self == Thread::Current());
    307   if (kDebugLocking && !recursive_) {
    308     AssertNotHeld(self);
    309   }
    310   if (!recursive_ || !IsExclusiveHeld(self)) {
    311 #if ART_USE_FUTEXES
    312     bool done = false;
    313     do {
    314       int32_t cur_state = state_.LoadRelaxed();
    315       if (LIKELY(cur_state == 0)) {
    316         // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
    317         done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
    318       } else {
    319         // Failed to acquire, hang up.
    320         ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
    321         num_contenders_++;
    322         if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
    323           // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
    324           // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
    325           if ((errno != EAGAIN) && (errno != EINTR)) {
    326             PLOG(FATAL) << "futex wait failed for " << name_;
    327           }
    328         }
    329         num_contenders_--;
    330       }
    331     } while (!done);
    332     DCHECK_EQ(state_.LoadRelaxed(), 1);
    333 #else
    334     CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
    335 #endif
    336     DCHECK_EQ(exclusive_owner_, 0U);
    337     exclusive_owner_ = SafeGetTid(self);
    338     RegisterAsLocked(self);
    339   }
    340   recursion_count_++;
    341   if (kDebugLocking) {
    342     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
    343         << name_ << " " << recursion_count_;
    344     AssertHeld(self);
    345   }
    346 }
    347 
    348 bool Mutex::ExclusiveTryLock(Thread* self) {
    349   DCHECK(self == NULL || self == Thread::Current());
    350   if (kDebugLocking && !recursive_) {
    351     AssertNotHeld(self);
    352   }
    353   if (!recursive_ || !IsExclusiveHeld(self)) {
    354 #if ART_USE_FUTEXES
    355     bool done = false;
    356     do {
    357       int32_t cur_state = state_.LoadRelaxed();
    358       if (cur_state == 0) {
    359         // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition.
    360         done = state_.CompareExchangeWeakAcquire(0 /* cur_state */, 1 /* new state */);
    361       } else {
    362         return false;
    363       }
    364     } while (!done);
    365     DCHECK_EQ(state_.LoadRelaxed(), 1);
    366 #else
    367     int result = pthread_mutex_trylock(&mutex_);
    368     if (result == EBUSY) {
    369       return false;
    370     }
    371     if (result != 0) {
    372       errno = result;
    373       PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
    374     }
    375 #endif
    376     DCHECK_EQ(exclusive_owner_, 0U);
    377     exclusive_owner_ = SafeGetTid(self);
    378     RegisterAsLocked(self);
    379   }
    380   recursion_count_++;
    381   if (kDebugLocking) {
    382     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
    383         << name_ << " " << recursion_count_;
    384     AssertHeld(self);
    385   }
    386   return true;
    387 }
    388 
    389 void Mutex::ExclusiveUnlock(Thread* self) {
    390   DCHECK(self == NULL || self == Thread::Current());
    391   AssertHeld(self);
    392   DCHECK_NE(exclusive_owner_, 0U);
    393   recursion_count_--;
    394   if (!recursive_ || recursion_count_ == 0) {
    395     if (kDebugLocking) {
    396       CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
    397           << name_ << " " << recursion_count_;
    398     }
    399     RegisterAsUnlocked(self);
    400 #if ART_USE_FUTEXES
    401     bool done = false;
    402     do {
    403       int32_t cur_state = state_.LoadRelaxed();
    404       if (LIKELY(cur_state == 1)) {
    405         // We're no longer the owner.
    406         exclusive_owner_ = 0;
    407         // Change state to 0 and impose load/store ordering appropriate for lock release.
    408         // Note, the relaxed loads below musn't reorder before the CompareExchange.
    409         // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
    410         // a status bit into the state on contention.
    411         done =  state_.CompareExchangeWeakSequentiallyConsistent(cur_state, 0 /* new state */);
    412         if (LIKELY(done)) {  // Spurious fail?
    413           // Wake a contender.
    414           if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
    415             futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
    416           }
    417         }
    418       } else {
    419         // Logging acquires the logging lock, avoid infinite recursion in that case.
    420         if (this != Locks::logging_lock_) {
    421           LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
    422         } else {
    423           LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
    424           LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
    425                                                  cur_state, name_).c_str());
    426           _exit(1);
    427         }
    428       }
    429     } while (!done);
    430 #else
    431     exclusive_owner_ = 0;
    432     CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
    433 #endif
    434   }
    435 }
    436 
    437 void Mutex::Dump(std::ostream& os) const {
    438   os << (recursive_ ? "recursive " : "non-recursive ")
    439       << name_
    440       << " level=" << static_cast<int>(level_)
    441       << " rec=" << recursion_count_
    442       << " owner=" << GetExclusiveOwnerTid() << " ";
    443   DumpContention(os);
    444 }
    445 
    446 std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
    447   mu.Dump(os);
    448   return os;
    449 }
    450 
    451 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
    452     : BaseMutex(name, level)
    453 #if ART_USE_FUTEXES
    454     , state_(0), num_pending_readers_(0), num_pending_writers_(0)
    455 #endif
    456 {  // NOLINT(whitespace/braces)
    457 #if !ART_USE_FUTEXES
    458   CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
    459 #endif
    460   exclusive_owner_ = 0;
    461 }
    462 
    463 ReaderWriterMutex::~ReaderWriterMutex() {
    464 #if ART_USE_FUTEXES
    465   CHECK_EQ(state_.LoadRelaxed(), 0);
    466   CHECK_EQ(exclusive_owner_, 0U);
    467   CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0);
    468   CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
    469 #else
    470   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
    471   // may still be using locks.
    472   int rc = pthread_rwlock_destroy(&rwlock_);
    473   if (rc != 0) {
    474     errno = rc;
    475     // TODO: should we just not log at all if shutting down? this could be the logging mutex!
    476     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
    477     Runtime* runtime = Runtime::Current();
    478     bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
    479     PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
    480   }
    481 #endif
    482 }
    483 
    484 void ReaderWriterMutex::ExclusiveLock(Thread* self) {
    485   DCHECK(self == NULL || self == Thread::Current());
    486   AssertNotExclusiveHeld(self);
    487 #if ART_USE_FUTEXES
    488   bool done = false;
    489   do {
    490     int32_t cur_state = state_.LoadRelaxed();
    491     if (LIKELY(cur_state == 0)) {
    492       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
    493       done =  state_.CompareExchangeWeakAcquire(0 /* cur_state*/, -1 /* new state */);
    494     } else {
    495       // Failed to acquire, hang up.
    496       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
    497       ++num_pending_writers_;
    498       if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
    499         // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
    500         // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
    501         if ((errno != EAGAIN) && (errno != EINTR)) {
    502           PLOG(FATAL) << "futex wait failed for " << name_;
    503         }
    504       }
    505       --num_pending_writers_;
    506     }
    507   } while (!done);
    508   DCHECK_EQ(state_.LoadRelaxed(), -1);
    509 #else
    510   CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
    511 #endif
    512   DCHECK_EQ(exclusive_owner_, 0U);
    513   exclusive_owner_ = SafeGetTid(self);
    514   RegisterAsLocked(self);
    515   AssertExclusiveHeld(self);
    516 }
    517 
    518 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
    519   DCHECK(self == NULL || self == Thread::Current());
    520   AssertExclusiveHeld(self);
    521   RegisterAsUnlocked(self);
    522   DCHECK_NE(exclusive_owner_, 0U);
    523 #if ART_USE_FUTEXES
    524   bool done = false;
    525   do {
    526     int32_t cur_state = state_.LoadRelaxed();
    527     if (LIKELY(cur_state == -1)) {
    528       // We're no longer the owner.
    529       exclusive_owner_ = 0;
    530       // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
    531       // Note, the relaxed loads below musn't reorder before the CompareExchange.
    532       // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
    533       // a status bit into the state on contention.
    534       done =  state_.CompareExchangeWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
    535       if (LIKELY(done)) {  // Weak CAS may fail spuriously.
    536         // Wake any waiters.
    537         if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
    538                      num_pending_writers_.LoadRelaxed() > 0)) {
    539           futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
    540         }
    541       }
    542     } else {
    543       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
    544     }
    545   } while (!done);
    546 #else
    547   exclusive_owner_ = 0;
    548   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
    549 #endif
    550 }
    551 
    552 #if HAVE_TIMED_RWLOCK
    553 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
    554   DCHECK(self == NULL || self == Thread::Current());
    555 #if ART_USE_FUTEXES
    556   bool done = false;
    557   timespec end_abs_ts;
    558   InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
    559   do {
    560     int32_t cur_state = state_.LoadRelaxed();
    561     if (cur_state == 0) {
    562       // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition.
    563       done =  state_.CompareExchangeWeakAcquire(0 /* cur_state */, -1 /* new state */);
    564     } else {
    565       // Failed to acquire, hang up.
    566       timespec now_abs_ts;
    567       InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
    568       timespec rel_ts;
    569       if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
    570         return false;  // Timed out.
    571       }
    572       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
    573       ++num_pending_writers_;
    574       if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
    575         if (errno == ETIMEDOUT) {
    576           --num_pending_writers_;
    577           return false;  // Timed out.
    578         } else if ((errno != EAGAIN) && (errno != EINTR)) {
    579           // EAGAIN and EINTR both indicate a spurious failure,
    580           // recompute the relative time out from now and try again.
    581           // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
    582           PLOG(FATAL) << "timed futex wait failed for " << name_;
    583         }
    584       }
    585       --num_pending_writers_;
    586     }
    587   } while (!done);
    588 #else
    589   timespec ts;
    590   InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
    591   int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
    592   if (result == ETIMEDOUT) {
    593     return false;
    594   }
    595   if (result != 0) {
    596     errno = result;
    597     PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
    598   }
    599 #endif
    600   exclusive_owner_ = SafeGetTid(self);
    601   RegisterAsLocked(self);
    602   AssertSharedHeld(self);
    603   return true;
    604 }
    605 #endif
    606 
    607 bool ReaderWriterMutex::SharedTryLock(Thread* self) {
    608   DCHECK(self == NULL || self == Thread::Current());
    609 #if ART_USE_FUTEXES
    610   bool done = false;
    611   do {
    612     int32_t cur_state = state_.LoadRelaxed();
    613     if (cur_state >= 0) {
    614       // Add as an extra reader and impose load/store ordering appropriate for lock acquisition.
    615       done =  state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
    616     } else {
    617       // Owner holds it exclusively.
    618       return false;
    619     }
    620   } while (!done);
    621 #else
    622   int result = pthread_rwlock_tryrdlock(&rwlock_);
    623   if (result == EBUSY) {
    624     return false;
    625   }
    626   if (result != 0) {
    627     errno = result;
    628     PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
    629   }
    630 #endif
    631   RegisterAsLocked(self);
    632   AssertSharedHeld(self);
    633   return true;
    634 }
    635 
    636 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
    637   DCHECK(self == NULL || self == Thread::Current());
    638   bool result;
    639   if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
    640     result = IsExclusiveHeld(self);  // TODO: a better best effort here.
    641   } else {
    642     result = (self->GetHeldMutex(level_) == this);
    643   }
    644   return result;
    645 }
    646 
    647 void ReaderWriterMutex::Dump(std::ostream& os) const {
    648   os << name_
    649       << " level=" << static_cast<int>(level_)
    650       << " owner=" << GetExclusiveOwnerTid() << " ";
    651   DumpContention(os);
    652 }
    653 
    654 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
    655   mu.Dump(os);
    656   return os;
    657 }
    658 
    659 ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
    660     : name_(name), guard_(guard) {
    661 #if ART_USE_FUTEXES
    662   DCHECK_EQ(0, sequence_.LoadRelaxed());
    663   num_waiters_ = 0;
    664 #else
    665   pthread_condattr_t cond_attrs;
    666   CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
    667 #if !defined(__APPLE__)
    668   // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
    669   CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
    670 #endif
    671   CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
    672 #endif
    673 }
    674 
    675 ConditionVariable::~ConditionVariable() {
    676 #if ART_USE_FUTEXES
    677   if (num_waiters_!= 0) {
    678     Runtime* runtime = Runtime::Current();
    679     bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
    680     LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
    681         << " called with " << num_waiters_ << " waiters.";
    682   }
    683 #else
    684   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
    685   // may still be using condition variables.
    686   int rc = pthread_cond_destroy(&cond_);
    687   if (rc != 0) {
    688     errno = rc;
    689     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
    690     Runtime* runtime = Runtime::Current();
    691     bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
    692     PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
    693   }
    694 #endif
    695 }
    696 
    697 void ConditionVariable::Broadcast(Thread* self) {
    698   DCHECK(self == NULL || self == Thread::Current());
    699   // TODO: enable below, there's a race in thread creation that causes false failures currently.
    700   // guard_.AssertExclusiveHeld(self);
    701   DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
    702 #if ART_USE_FUTEXES
    703   if (num_waiters_ > 0) {
    704     sequence_++;  // Indicate the broadcast occurred.
    705     bool done = false;
    706     do {
    707       int32_t cur_sequence = sequence_.LoadRelaxed();
    708       // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
    709       // mutex unlocks will awaken the requeued waiter thread.
    710       done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
    711                    reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
    712                    guard_.state_.Address(), cur_sequence) != -1;
    713       if (!done) {
    714         if (errno != EAGAIN) {
    715           PLOG(FATAL) << "futex cmp requeue failed for " << name_;
    716         }
    717       }
    718     } while (!done);
    719   }
    720 #else
    721   CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
    722 #endif
    723 }
    724 
    725 void ConditionVariable::Signal(Thread* self) {
    726   DCHECK(self == NULL || self == Thread::Current());
    727   guard_.AssertExclusiveHeld(self);
    728 #if ART_USE_FUTEXES
    729   if (num_waiters_ > 0) {
    730     sequence_++;  // Indicate a signal occurred.
    731     // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
    732     // to avoid this, however, requeueing can only move all waiters.
    733     int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
    734     // Check something was woken or else we changed sequence_ before they had chance to wait.
    735     CHECK((num_woken == 0) || (num_woken == 1));
    736   }
    737 #else
    738   CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
    739 #endif
    740 }
    741 
    742 void ConditionVariable::Wait(Thread* self) {
    743   guard_.CheckSafeToWait(self);
    744   WaitHoldingLocks(self);
    745 }
    746 
    747 void ConditionVariable::WaitHoldingLocks(Thread* self) {
    748   DCHECK(self == NULL || self == Thread::Current());
    749   guard_.AssertExclusiveHeld(self);
    750   unsigned int old_recursion_count = guard_.recursion_count_;
    751 #if ART_USE_FUTEXES
    752   num_waiters_++;
    753   // Ensure the Mutex is contended so that requeued threads are awoken.
    754   guard_.num_contenders_++;
    755   guard_.recursion_count_ = 1;
    756   int32_t cur_sequence = sequence_.LoadRelaxed();
    757   guard_.ExclusiveUnlock(self);
    758   if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
    759     // Futex failed, check it is an expected error.
    760     // EAGAIN == EWOULDBLK, so we let the caller try again.
    761     // EINTR implies a signal was sent to this thread.
    762     if ((errno != EINTR) && (errno != EAGAIN)) {
    763       PLOG(FATAL) << "futex wait failed for " << name_;
    764     }
    765   }
    766   guard_.ExclusiveLock(self);
    767   CHECK_GE(num_waiters_, 0);
    768   num_waiters_--;
    769   // We awoke and so no longer require awakes from the guard_'s unlock.
    770   CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
    771   guard_.num_contenders_--;
    772 #else
    773   uint64_t old_owner = guard_.exclusive_owner_;
    774   guard_.exclusive_owner_ = 0;
    775   guard_.recursion_count_ = 0;
    776   CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
    777   guard_.exclusive_owner_ = old_owner;
    778 #endif
    779   guard_.recursion_count_ = old_recursion_count;
    780 }
    781 
    782 void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
    783   DCHECK(self == NULL || self == Thread::Current());
    784   guard_.AssertExclusiveHeld(self);
    785   guard_.CheckSafeToWait(self);
    786   unsigned int old_recursion_count = guard_.recursion_count_;
    787 #if ART_USE_FUTEXES
    788   timespec rel_ts;
    789   InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
    790   num_waiters_++;
    791   // Ensure the Mutex is contended so that requeued threads are awoken.
    792   guard_.num_contenders_++;
    793   guard_.recursion_count_ = 1;
    794   int32_t cur_sequence = sequence_.LoadRelaxed();
    795   guard_.ExclusiveUnlock(self);
    796   if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
    797     if (errno == ETIMEDOUT) {
    798       // Timed out we're done.
    799     } else if ((errno == EAGAIN) || (errno == EINTR)) {
    800       // A signal or ConditionVariable::Signal/Broadcast has come in.
    801     } else {
    802       PLOG(FATAL) << "timed futex wait failed for " << name_;
    803     }
    804   }
    805   guard_.ExclusiveLock(self);
    806   CHECK_GE(num_waiters_, 0);
    807   num_waiters_--;
    808   // We awoke and so no longer require awakes from the guard_'s unlock.
    809   CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
    810   guard_.num_contenders_--;
    811 #else
    812 #if !defined(__APPLE__)
    813   int clock = CLOCK_MONOTONIC;
    814 #else
    815   int clock = CLOCK_REALTIME;
    816 #endif
    817   uint64_t old_owner = guard_.exclusive_owner_;
    818   guard_.exclusive_owner_ = 0;
    819   guard_.recursion_count_ = 0;
    820   timespec ts;
    821   InitTimeSpec(true, clock, ms, ns, &ts);
    822   int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
    823   if (rc != 0 && rc != ETIMEDOUT) {
    824     errno = rc;
    825     PLOG(FATAL) << "TimedWait failed for " << name_;
    826   }
    827   guard_.exclusive_owner_ = old_owner;
    828 #endif
    829   guard_.recursion_count_ = old_recursion_count;
    830 }
    831 
    832 void Locks::Init() {
    833   if (logging_lock_ != nullptr) {
    834     // Already initialized.
    835     if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
    836       DCHECK(modify_ldt_lock_ != nullptr);
    837     } else {
    838       DCHECK(modify_ldt_lock_ == nullptr);
    839     }
    840     DCHECK(abort_lock_ != nullptr);
    841     DCHECK(alloc_tracker_lock_ != nullptr);
    842     DCHECK(allocated_monitor_ids_lock_ != nullptr);
    843     DCHECK(allocated_thread_ids_lock_ != nullptr);
    844     DCHECK(breakpoint_lock_ != nullptr);
    845     DCHECK(classlinker_classes_lock_ != nullptr);
    846     DCHECK(deoptimization_lock_ != nullptr);
    847     DCHECK(heap_bitmap_lock_ != nullptr);
    848     DCHECK(intern_table_lock_ != nullptr);
    849     DCHECK(logging_lock_ != nullptr);
    850     DCHECK(mutator_lock_ != nullptr);
    851     DCHECK(profiler_lock_ != nullptr);
    852     DCHECK(thread_list_lock_ != nullptr);
    853     DCHECK(thread_list_suspend_thread_lock_ != nullptr);
    854     DCHECK(thread_suspend_count_lock_ != nullptr);
    855     DCHECK(trace_lock_ != nullptr);
    856     DCHECK(unexpected_signal_lock_ != nullptr);
    857   } else {
    858     // Create global locks in level order from highest lock level to lowest.
    859     LockLevel current_lock_level = kThreadListSuspendThreadLock;
    860     DCHECK(thread_list_suspend_thread_lock_ == nullptr);
    861     thread_list_suspend_thread_lock_ =
    862         new Mutex("thread list suspend thread by .. lock", current_lock_level);
    863 
    864     #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
    865       if (new_level >= current_lock_level) { \
    866         /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
    867         fprintf(stderr, "New local level %d is not less than current level %d\n", \
    868                 new_level, current_lock_level); \
    869         exit(1); \
    870       } \
    871       current_lock_level = new_level;
    872 
    873     UPDATE_CURRENT_LOCK_LEVEL(kInstrumentEntrypointsLock);
    874     DCHECK(instrument_entrypoints_lock_ == nullptr);
    875     instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
    876 
    877     UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
    878     DCHECK(mutator_lock_ == nullptr);
    879     mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
    880 
    881     UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
    882     DCHECK(heap_bitmap_lock_ == nullptr);
    883     heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
    884 
    885     UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
    886     DCHECK(trace_lock_ == nullptr);
    887     trace_lock_ = new Mutex("trace lock", current_lock_level);
    888 
    889     UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
    890     DCHECK(runtime_shutdown_lock_ == nullptr);
    891     runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
    892 
    893     UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
    894     DCHECK(profiler_lock_ == nullptr);
    895     profiler_lock_ = new Mutex("profiler lock", current_lock_level);
    896 
    897     UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
    898     DCHECK(deoptimization_lock_ == nullptr);
    899     deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
    900 
    901     UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
    902     DCHECK(alloc_tracker_lock_ == nullptr);
    903     alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
    904 
    905     UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
    906     DCHECK(thread_list_lock_ == nullptr);
    907     thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
    908 
    909     UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
    910     DCHECK(breakpoint_lock_ == nullptr);
    911     breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
    912 
    913     UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
    914     DCHECK(classlinker_classes_lock_ == nullptr);
    915     classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
    916                                                       current_lock_level);
    917 
    918     UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
    919     DCHECK(allocated_monitor_ids_lock_ == nullptr);
    920     allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
    921 
    922     UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
    923     DCHECK(allocated_thread_ids_lock_ == nullptr);
    924     allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
    925 
    926     if (kRuntimeISA == kX86 || kRuntimeISA == kX86_64) {
    927       UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
    928       DCHECK(modify_ldt_lock_ == nullptr);
    929       modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
    930     }
    931 
    932     UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
    933     DCHECK(intern_table_lock_ == nullptr);
    934     intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
    935 
    936     UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
    937     DCHECK(reference_processor_lock_ == nullptr);
    938     reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
    939 
    940     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
    941     DCHECK(reference_queue_cleared_references_lock_ == nullptr);
    942     reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
    943 
    944     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
    945     DCHECK(reference_queue_weak_references_lock_ == nullptr);
    946     reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
    947 
    948     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
    949     DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
    950     reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
    951 
    952     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
    953     DCHECK(reference_queue_phantom_references_lock_ == nullptr);
    954     reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
    955 
    956     UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
    957     DCHECK(reference_queue_soft_references_lock_ == nullptr);
    958     reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
    959 
    960     UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
    961     DCHECK(abort_lock_ == nullptr);
    962     abort_lock_ = new Mutex("abort lock", current_lock_level, true);
    963 
    964     UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
    965     DCHECK(thread_suspend_count_lock_ == nullptr);
    966     thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
    967 
    968     UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
    969     DCHECK(unexpected_signal_lock_ == nullptr);
    970     unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
    971 
    972     UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
    973     DCHECK(mem_maps_lock_ == nullptr);
    974     mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
    975 
    976     UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
    977     DCHECK(logging_lock_ == nullptr);
    978     logging_lock_ = new Mutex("logging lock", current_lock_level, true);
    979 
    980     #undef UPDATE_CURRENT_LOCK_LEVEL
    981   }
    982 }
    983 
    984 
    985 }  // namespace art
    986