Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "mutex.h"
     18 
     19 #include <errno.h>
     20 #include <sys/time.h>
     21 
     22 #include "atomic.h"
     23 #include "base/logging.h"
     24 #include "cutils/atomic.h"
     25 #include "cutils/atomic-inline.h"
     26 #include "mutex-inl.h"
     27 #include "runtime.h"
     28 #include "scoped_thread_state_change.h"
     29 #include "thread-inl.h"
     30 #include "utils.h"
     31 
     32 namespace art {
     33 
     34 #if defined(__APPLE__)
     35 
     36 // This works on Mac OS 10.6 but hasn't been tested on older releases.
     37 struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
     38   long padding0;  // NOLINT(runtime/int) exact match to darwin type
     39   int padding1;
     40   uint32_t padding2;
     41   int16_t padding3;
     42   int16_t padding4;
     43   uint32_t padding5;
     44   pthread_t darwin_pthread_mutex_owner;
     45   // ...other stuff we don't care about.
     46 };
     47 
     48 struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
     49   long padding0;  // NOLINT(runtime/int) exact match to darwin type
     50   pthread_mutex_t padding1;
     51   int padding2;
     52   pthread_cond_t padding3;
     53   pthread_cond_t padding4;
     54   int padding5;
     55   int padding6;
     56   pthread_t darwin_pthread_rwlock_owner;
     57   // ...other stuff we don't care about.
     58 };
     59 
     60 #endif  // __APPLE__
     61 
     62 #if defined(__GLIBC__)
     63 
     64 struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
     65   int32_t padding0[2];
     66   int owner;
     67   // ...other stuff we don't care about.
     68 };
     69 
     70 struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
     71 #ifdef __LP64__
     72   int32_t padding0[6];
     73 #else
     74   int32_t padding0[7];
     75 #endif
     76   int writer;
     77   // ...other stuff we don't care about.
     78 };
     79 
     80 #endif  // __GLIBC__
     81 
     82 #if ART_USE_FUTEXES
     83 static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
     84   const int32_t one_sec = 1000 * 1000 * 1000;  // one second in nanoseconds.
     85   result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
     86   result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
     87   if (result_ts->tv_nsec < 0) {
     88     result_ts->tv_sec--;
     89     result_ts->tv_nsec += one_sec;
     90   } else if (result_ts->tv_nsec > one_sec) {
     91     result_ts->tv_sec++;
     92     result_ts->tv_nsec -= one_sec;
     93   }
     94   return result_ts->tv_sec < 0;
     95 }
     96 #endif
     97 
     98 struct AllMutexData {
     99   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
    100   AtomicInteger all_mutexes_guard;
    101   // All created mutexes guarded by all_mutexes_guard_.
    102   std::set<BaseMutex*>* all_mutexes;
    103   AllMutexData() : all_mutexes(NULL) {}
    104 };
    105 static struct AllMutexData all_mutex_data[kAllMutexDataSize];
    106 
    107 class ScopedAllMutexesLock {
    108  public:
    109   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
    110     while (!all_mutex_data->all_mutexes_guard.compare_and_swap(0, reinterpret_cast<int32_t>(mutex))) {
    111       NanoSleep(100);
    112     }
    113   }
    114   ~ScopedAllMutexesLock() {
    115     while (!all_mutex_data->all_mutexes_guard.compare_and_swap(reinterpret_cast<int32_t>(mutex_), 0)) {
    116       NanoSleep(100);
    117     }
    118   }
    119  private:
    120   const BaseMutex* const mutex_;
    121 };
    122 
    123 BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
    124   if (kLogLockContentions) {
    125     ScopedAllMutexesLock mu(this);
    126     std::set<BaseMutex*>** all_mutexes_ptr = &all_mutex_data->all_mutexes;
    127     if (*all_mutexes_ptr == NULL) {
    128       // We leak the global set of all mutexes to avoid ordering issues in global variable
    129       // construction/destruction.
    130       *all_mutexes_ptr = new std::set<BaseMutex*>();
    131     }
    132     (*all_mutexes_ptr)->insert(this);
    133   }
    134 }
    135 
    136 BaseMutex::~BaseMutex() {
    137   if (kLogLockContentions) {
    138     ScopedAllMutexesLock mu(this);
    139     all_mutex_data->all_mutexes->erase(this);
    140   }
    141 }
    142 
    143 void BaseMutex::DumpAll(std::ostream& os) {
    144   if (kLogLockContentions) {
    145     os << "Mutex logging:\n";
    146     ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
    147     std::set<BaseMutex*>* all_mutexes = all_mutex_data->all_mutexes;
    148     if (all_mutexes == NULL) {
    149       // No mutexes have been created yet during at startup.
    150       return;
    151     }
    152     typedef std::set<BaseMutex*>::const_iterator It;
    153     os << "(Contented)\n";
    154     for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
    155       BaseMutex* mutex = *it;
    156       if (mutex->HasEverContended()) {
    157         mutex->Dump(os);
    158         os << "\n";
    159       }
    160     }
    161     os << "(Never contented)\n";
    162     for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
    163       BaseMutex* mutex = *it;
    164       if (!mutex->HasEverContended()) {
    165         mutex->Dump(os);
    166         os << "\n";
    167       }
    168     }
    169   }
    170 }
    171 
    172 void BaseMutex::CheckSafeToWait(Thread* self) {
    173   if (self == NULL) {
    174     CheckUnattachedThread(level_);
    175     return;
    176   }
    177   if (kDebugLocking) {
    178     CHECK(self->GetHeldMutex(level_) == this) << "Waiting on unacquired mutex: " << name_;
    179     bool bad_mutexes_held = false;
    180     for (int i = kLockLevelCount - 1; i >= 0; --i) {
    181       if (i != level_) {
    182         BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
    183         if (held_mutex != NULL) {
    184           LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
    185                      << "(level " << LockLevel(i) << ") while performing wait on "
    186                      << "\"" << name_ << "\" (level " << level_ << ")";
    187           bad_mutexes_held = true;
    188         }
    189       }
    190     }
    191     CHECK(!bad_mutexes_held);
    192   }
    193 }
    194 
    195 inline void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
    196   if (kLogLockContentions) {
    197     // Atomically add value to wait_time.
    198     uint64_t new_val, old_val;
    199     volatile int64_t* addr = reinterpret_cast<volatile int64_t*>(&wait_time);
    200     volatile const int64_t* caddr = const_cast<volatile const int64_t*>(addr);
    201     do {
    202       old_val = static_cast<uint64_t>(QuasiAtomic::Read64(caddr));
    203       new_val = old_val + value;
    204     } while (!QuasiAtomic::Cas64(static_cast<int64_t>(old_val), static_cast<int64_t>(new_val), addr));
    205   }
    206 }
    207 
    208 void BaseMutex::RecordContention(uint64_t blocked_tid,
    209                                  uint64_t owner_tid,
    210                                  uint64_t nano_time_blocked) {
    211   if (kLogLockContentions) {
    212     ContentionLogData* data = contetion_log_data_;
    213     ++(data->contention_count);
    214     data->AddToWaitTime(nano_time_blocked);
    215     ContentionLogEntry* log = data->contention_log;
    216     // This code is intentionally racy as it is only used for diagnostics.
    217     uint32_t slot = data->cur_content_log_entry;
    218     if (log[slot].blocked_tid == blocked_tid &&
    219         log[slot].owner_tid == blocked_tid) {
    220       ++log[slot].count;
    221     } else {
    222       uint32_t new_slot;
    223       do {
    224         slot = data->cur_content_log_entry;
    225         new_slot = (slot + 1) % kContentionLogSize;
    226       } while (!data->cur_content_log_entry.compare_and_swap(slot, new_slot));
    227       log[new_slot].blocked_tid = blocked_tid;
    228       log[new_slot].owner_tid = owner_tid;
    229       log[new_slot].count = 1;
    230     }
    231   }
    232 }
    233 
    234 void BaseMutex::DumpContention(std::ostream& os) const {
    235   if (kLogLockContentions) {
    236     const ContentionLogData* data = contetion_log_data_;
    237     const ContentionLogEntry* log = data->contention_log;
    238     uint64_t wait_time = data->wait_time;
    239     uint32_t contention_count = data->contention_count;
    240     if (contention_count == 0) {
    241       os << "never contended";
    242     } else {
    243       os << "contended " << contention_count
    244          << " times, average wait of contender " << PrettyDuration(wait_time / contention_count);
    245       SafeMap<uint64_t, size_t> most_common_blocker;
    246       SafeMap<uint64_t, size_t> most_common_blocked;
    247       typedef SafeMap<uint64_t, size_t>::const_iterator It;
    248       for (size_t i = 0; i < kContentionLogSize; ++i) {
    249         uint64_t blocked_tid = log[i].blocked_tid;
    250         uint64_t owner_tid = log[i].owner_tid;
    251         uint32_t count = log[i].count;
    252         if (count > 0) {
    253           It it = most_common_blocked.find(blocked_tid);
    254           if (it != most_common_blocked.end()) {
    255             most_common_blocked.Overwrite(blocked_tid, it->second + count);
    256           } else {
    257             most_common_blocked.Put(blocked_tid, count);
    258           }
    259           it = most_common_blocker.find(owner_tid);
    260           if (it != most_common_blocker.end()) {
    261             most_common_blocker.Overwrite(owner_tid, it->second + count);
    262           } else {
    263             most_common_blocker.Put(owner_tid, count);
    264           }
    265         }
    266       }
    267       uint64_t max_tid = 0;
    268       size_t max_tid_count = 0;
    269       for (It it = most_common_blocked.begin(); it != most_common_blocked.end(); ++it) {
    270         if (it->second > max_tid_count) {
    271           max_tid = it->first;
    272           max_tid_count = it->second;
    273         }
    274       }
    275       if (max_tid != 0) {
    276         os << " sample shows most blocked tid=" << max_tid;
    277       }
    278       max_tid = 0;
    279       max_tid_count = 0;
    280       for (It it = most_common_blocker.begin(); it != most_common_blocker.end(); ++it) {
    281         if (it->second > max_tid_count) {
    282           max_tid = it->first;
    283           max_tid_count = it->second;
    284         }
    285       }
    286       if (max_tid != 0) {
    287         os << " sample shows tid=" << max_tid << " owning during this time";
    288       }
    289     }
    290   }
    291 }
    292 
    293 
    294 Mutex::Mutex(const char* name, LockLevel level, bool recursive)
    295     : BaseMutex(name, level), recursive_(recursive), recursion_count_(0) {
    296 #if ART_USE_FUTEXES
    297   state_ = 0;
    298   exclusive_owner_ = 0;
    299   num_contenders_ = 0;
    300 #elif defined(__BIONIC__) || defined(__APPLE__)
    301   // Use recursive mutexes for bionic and Apple otherwise the
    302   // non-recursive mutexes don't have TIDs to check lock ownership of.
    303   pthread_mutexattr_t attributes;
    304   CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes));
    305   CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&attributes, PTHREAD_MUTEX_RECURSIVE));
    306   CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &attributes));
    307   CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&attributes));
    308 #else
    309   CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, NULL));
    310 #endif
    311 }
    312 
    313 Mutex::~Mutex() {
    314 #if ART_USE_FUTEXES
    315   if (state_ != 0) {
    316     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
    317     Runtime* runtime = Runtime::Current();
    318     bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
    319     LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
    320   } else {
    321     CHECK_EQ(exclusive_owner_, 0U)  << "unexpectedly found an owner on unlocked mutex " << name_;
    322     CHECK_EQ(num_contenders_, 0) << "unexpectedly found a contender on mutex " << name_;
    323   }
    324 #else
    325   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
    326   // may still be using locks.
    327   int rc = pthread_mutex_destroy(&mutex_);
    328   if (rc != 0) {
    329     errno = rc;
    330     // TODO: should we just not log at all if shutting down? this could be the logging mutex!
    331     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
    332     Runtime* runtime = Runtime::Current();
    333     bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
    334     PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
    335   }
    336 #endif
    337 }
    338 
    339 void Mutex::ExclusiveLock(Thread* self) {
    340   DCHECK(self == NULL || self == Thread::Current());
    341   if (kDebugLocking && !recursive_) {
    342     AssertNotHeld(self);
    343   }
    344   if (!recursive_ || !IsExclusiveHeld(self)) {
    345 #if ART_USE_FUTEXES
    346     bool done = false;
    347     do {
    348       int32_t cur_state = state_;
    349       if (cur_state == 0) {
    350         // Change state from 0 to 1.
    351         done = android_atomic_acquire_cas(0, 1, &state_) == 0;
    352       } else {
    353         // Failed to acquire, hang up.
    354         ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
    355         android_atomic_inc(&num_contenders_);
    356         if (futex(&state_, FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
    357           // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
    358           // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
    359           if ((errno != EAGAIN) && (errno != EINTR)) {
    360             PLOG(FATAL) << "futex wait failed for " << name_;
    361           }
    362         }
    363         android_atomic_dec(&num_contenders_);
    364       }
    365     } while (!done);
    366     DCHECK_EQ(state_, 1);
    367     exclusive_owner_ = SafeGetTid(self);
    368 #else
    369     CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
    370 #endif
    371     RegisterAsLocked(self);
    372   }
    373   recursion_count_++;
    374   if (kDebugLocking) {
    375     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
    376         << name_ << " " << recursion_count_;
    377     AssertHeld(self);
    378   }
    379 }
    380 
    381 bool Mutex::ExclusiveTryLock(Thread* self) {
    382   DCHECK(self == NULL || self == Thread::Current());
    383   if (kDebugLocking && !recursive_) {
    384     AssertNotHeld(self);
    385   }
    386   if (!recursive_ || !IsExclusiveHeld(self)) {
    387 #if ART_USE_FUTEXES
    388     bool done = false;
    389     do {
    390       int32_t cur_state = state_;
    391       if (cur_state == 0) {
    392         // Change state from 0 to 1.
    393         done = android_atomic_acquire_cas(0, 1, &state_) == 0;
    394       } else {
    395         return false;
    396       }
    397     } while (!done);
    398     DCHECK_EQ(state_, 1);
    399     exclusive_owner_ = SafeGetTid(self);
    400 #else
    401     int result = pthread_mutex_trylock(&mutex_);
    402     if (result == EBUSY) {
    403       return false;
    404     }
    405     if (result != 0) {
    406       errno = result;
    407       PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
    408     }
    409 #endif
    410     RegisterAsLocked(self);
    411   }
    412   recursion_count_++;
    413   if (kDebugLocking) {
    414     CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: "
    415         << name_ << " " << recursion_count_;
    416     AssertHeld(self);
    417   }
    418   return true;
    419 }
    420 
    421 void Mutex::ExclusiveUnlock(Thread* self) {
    422   DCHECK(self == NULL || self == Thread::Current());
    423   AssertHeld(self);
    424   recursion_count_--;
    425   if (!recursive_ || recursion_count_ == 0) {
    426     if (kDebugLocking) {
    427       CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: "
    428           << name_ << " " << recursion_count_;
    429     }
    430     RegisterAsUnlocked(self);
    431 #if ART_USE_FUTEXES
    432   bool done = false;
    433   do {
    434     int32_t cur_state = state_;
    435     if (cur_state == 1) {
    436       // We're no longer the owner.
    437       exclusive_owner_ = 0;
    438       // Change state to 0.
    439       done = android_atomic_release_cas(cur_state, 0, &state_) == 0;
    440       if (done) {  // Spurious fail?
    441         // Wake a contender
    442         if (num_contenders_ > 0) {
    443           futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
    444         }
    445       }
    446     } else {
    447       // Logging acquires the logging lock, avoid infinite recursion in that case.
    448       if (this != Locks::logging_lock_) {
    449         LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
    450       } else {
    451         LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
    452         LogMessage::LogLine(data, StringPrintf("Unexpected state_ %d in unlock for %s",
    453                                                cur_state, name_).c_str());
    454         _exit(1);
    455       }
    456     }
    457   } while (!done);
    458 #else
    459     CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
    460 #endif
    461   }
    462 }
    463 
    464 bool Mutex::IsExclusiveHeld(const Thread* self) const {
    465   DCHECK(self == NULL || self == Thread::Current());
    466   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
    467   if (kDebugLocking) {
    468     // Sanity debug check that if we think it is locked we have it in our held mutexes.
    469     if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
    470       CHECK_EQ(self->GetHeldMutex(level_), this);
    471     }
    472   }
    473   return result;
    474 }
    475 
    476 uint64_t Mutex::GetExclusiveOwnerTid() const {
    477 #if ART_USE_FUTEXES
    478   return exclusive_owner_;
    479 #elif defined(__BIONIC__)
    480   return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
    481 #elif defined(__GLIBC__)
    482   return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
    483 #elif defined(__APPLE__)
    484   const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
    485   pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
    486   // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
    487   // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
    488   if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
    489     return 0;
    490   }
    491   uint64_t tid;
    492   CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__);  // Requires Mac OS 10.6
    493   return tid;
    494 #else
    495 #error unsupported C library
    496 #endif
    497 }
    498 
    499 void Mutex::Dump(std::ostream& os) const {
    500   os << (recursive_ ? "recursive " : "non-recursive ")
    501       << name_
    502       << " level=" << static_cast<int>(level_)
    503       << " rec=" << recursion_count_
    504       << " owner=" << GetExclusiveOwnerTid() << " ";
    505   DumpContention(os);
    506 }
    507 
    508 std::ostream& operator<<(std::ostream& os, const Mutex& mu) {
    509   mu.Dump(os);
    510   return os;
    511 }
    512 
    513 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
    514     : BaseMutex(name, level)
    515 #if ART_USE_FUTEXES
    516     , state_(0), exclusive_owner_(0), num_pending_readers_(0), num_pending_writers_(0)
    517 #endif
    518 {  // NOLINT(whitespace/braces)
    519 #if !ART_USE_FUTEXES
    520   CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, NULL));
    521 #endif
    522 }
    523 
    524 ReaderWriterMutex::~ReaderWriterMutex() {
    525 #if ART_USE_FUTEXES
    526   CHECK_EQ(state_, 0);
    527   CHECK_EQ(exclusive_owner_, 0U);
    528   CHECK_EQ(num_pending_readers_, 0);
    529   CHECK_EQ(num_pending_writers_, 0);
    530 #else
    531   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
    532   // may still be using locks.
    533   int rc = pthread_rwlock_destroy(&rwlock_);
    534   if (rc != 0) {
    535     errno = rc;
    536     // TODO: should we just not log at all if shutting down? this could be the logging mutex!
    537     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
    538     Runtime* runtime = Runtime::Current();
    539     bool shutting_down = runtime == NULL || runtime->IsShuttingDown();
    540     PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
    541   }
    542 #endif
    543 }
    544 
    545 void ReaderWriterMutex::ExclusiveLock(Thread* self) {
    546   DCHECK(self == NULL || self == Thread::Current());
    547   AssertNotExclusiveHeld(self);
    548 #if ART_USE_FUTEXES
    549   bool done = false;
    550   do {
    551     int32_t cur_state = state_;
    552     if (cur_state == 0) {
    553       // Change state from 0 to -1.
    554       done = android_atomic_acquire_cas(0, -1, &state_) == 0;
    555     } else {
    556       // Failed to acquire, hang up.
    557       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
    558       android_atomic_inc(&num_pending_writers_);
    559       if (futex(&state_, FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
    560         // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
    561         // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
    562         if ((errno != EAGAIN) && (errno != EINTR)) {
    563           PLOG(FATAL) << "futex wait failed for " << name_;
    564         }
    565       }
    566       android_atomic_dec(&num_pending_writers_);
    567     }
    568   } while (!done);
    569   DCHECK_EQ(state_, -1);
    570   exclusive_owner_ = SafeGetTid(self);
    571 #else
    572   CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_));
    573 #endif
    574   RegisterAsLocked(self);
    575   AssertExclusiveHeld(self);
    576 }
    577 
    578 void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
    579   DCHECK(self == NULL || self == Thread::Current());
    580   AssertExclusiveHeld(self);
    581   RegisterAsUnlocked(self);
    582 #if ART_USE_FUTEXES
    583   bool done = false;
    584   do {
    585     int32_t cur_state = state_;
    586     if (cur_state == -1) {
    587       // We're no longer the owner.
    588       exclusive_owner_ = 0;
    589       // Change state from -1 to 0.
    590       done = android_atomic_release_cas(-1, 0, &state_) == 0;
    591       if (done) {  // cmpxchg may fail due to noise?
    592         // Wake any waiters.
    593         if (num_pending_readers_ > 0 || num_pending_writers_ > 0) {
    594           futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
    595         }
    596       }
    597     } else {
    598       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
    599     }
    600   } while (!done);
    601 #else
    602   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
    603 #endif
    604 }
    605 
    606 #if HAVE_TIMED_RWLOCK
    607 bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
    608   DCHECK(self == NULL || self == Thread::Current());
    609 #if ART_USE_FUTEXES
    610   bool done = false;
    611   timespec end_abs_ts;
    612   InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &end_abs_ts);
    613   do {
    614     int32_t cur_state = state_;
    615     if (cur_state == 0) {
    616       // Change state from 0 to -1.
    617       done = android_atomic_acquire_cas(0, -1, &state_) == 0;
    618     } else {
    619       // Failed to acquire, hang up.
    620       timespec now_abs_ts;
    621       InitTimeSpec(true, CLOCK_REALTIME, 0, 0, &now_abs_ts);
    622       timespec rel_ts;
    623       if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
    624         return false;  // Timed out.
    625       }
    626       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
    627       android_atomic_inc(&num_pending_writers_);
    628       if (futex(&state_, FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
    629         if (errno == ETIMEDOUT) {
    630           android_atomic_dec(&num_pending_writers_);
    631           return false;  // Timed out.
    632         } else if ((errno != EAGAIN) && (errno != EINTR)) {
    633           // EAGAIN and EINTR both indicate a spurious failure,
    634           // recompute the relative time out from now and try again.
    635           // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
    636           PLOG(FATAL) << "timed futex wait failed for " << name_;
    637         }
    638       }
    639       android_atomic_dec(&num_pending_writers_);
    640     }
    641   } while (!done);
    642   exclusive_owner_ = SafeGetTid(self);
    643 #else
    644   timespec ts;
    645   InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts);
    646   int result = pthread_rwlock_timedwrlock(&rwlock_, &ts);
    647   if (result == ETIMEDOUT) {
    648     return false;
    649   }
    650   if (result != 0) {
    651     errno = result;
    652     PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_;
    653   }
    654 #endif
    655   RegisterAsLocked(self);
    656   AssertSharedHeld(self);
    657   return true;
    658 }
    659 #endif
    660 
    661 bool ReaderWriterMutex::SharedTryLock(Thread* self) {
    662   DCHECK(self == NULL || self == Thread::Current());
    663 #if ART_USE_FUTEXES
    664   bool done = false;
    665   do {
    666     int32_t cur_state = state_;
    667     if (cur_state >= 0) {
    668       // Add as an extra reader.
    669       done = android_atomic_acquire_cas(cur_state, cur_state + 1, &state_) == 0;
    670     } else {
    671       // Owner holds it exclusively.
    672       return false;
    673     }
    674   } while (!done);
    675 #else
    676   int result = pthread_rwlock_tryrdlock(&rwlock_);
    677   if (result == EBUSY) {
    678     return false;
    679   }
    680   if (result != 0) {
    681     errno = result;
    682     PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_;
    683   }
    684 #endif
    685   RegisterAsLocked(self);
    686   AssertSharedHeld(self);
    687   return true;
    688 }
    689 
    690 bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
    691   DCHECK(self == NULL || self == Thread::Current());
    692   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
    693   if (kDebugLocking) {
    694     // Sanity that if the pthread thinks we own the lock the Thread agrees.
    695     if (self != NULL && result)  {
    696       CHECK_EQ(self->GetHeldMutex(level_), this);
    697     }
    698   }
    699   return result;
    700 }
    701 
    702 bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
    703   DCHECK(self == NULL || self == Thread::Current());
    704   bool result;
    705   if (UNLIKELY(self == NULL)) {  // Handle unattached threads.
    706     result = IsExclusiveHeld(self);  // TODO: a better best effort here.
    707   } else {
    708     result = (self->GetHeldMutex(level_) == this);
    709   }
    710   return result;
    711 }
    712 
    713 uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
    714 #if ART_USE_FUTEXES
    715   int32_t state = state_;
    716   if (state == 0) {
    717     return 0;  // No owner.
    718   } else if (state > 0) {
    719     return -1;  // Shared.
    720   } else {
    721     return exclusive_owner_;
    722   }
    723 #else
    724 #if defined(__BIONIC__)
    725   return rwlock_.writerThreadId;
    726 #elif defined(__GLIBC__)
    727   return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
    728 #elif defined(__APPLE__)
    729   const darwin_pthread_rwlock_t*
    730       dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
    731   pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
    732   if (owner == (pthread_t)0) {
    733     return 0;
    734   }
    735   uint64_t tid;
    736   CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__);  // Requires Mac OS 10.6
    737   return tid;
    738 #else
    739 #error unsupported C library
    740 #endif
    741 #endif
    742 }
    743 
    744 void ReaderWriterMutex::Dump(std::ostream& os) const {
    745   os << name_
    746       << " level=" << static_cast<int>(level_)
    747       << " owner=" << GetExclusiveOwnerTid() << " ";
    748   DumpContention(os);
    749 }
    750 
    751 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) {
    752   mu.Dump(os);
    753   return os;
    754 }
    755 
    756 ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
    757     : name_(name), guard_(guard) {
    758 #if ART_USE_FUTEXES
    759   sequence_ = 0;
    760   num_waiters_ = 0;
    761 #else
    762   CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, NULL));
    763 #endif
    764 }
    765 
    766 ConditionVariable::~ConditionVariable() {
    767 #if ART_USE_FUTEXES
    768   if (num_waiters_!= 0) {
    769     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
    770     Runtime* runtime = Runtime::Current();
    771     bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
    772     LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
    773         << " called with " << num_waiters_ << " waiters.";
    774   }
    775 #else
    776   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
    777   // may still be using condition variables.
    778   int rc = pthread_cond_destroy(&cond_);
    779   if (rc != 0) {
    780     errno = rc;
    781     MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
    782     Runtime* runtime = Runtime::Current();
    783     bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
    784     PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
    785   }
    786 #endif
    787 }
    788 
    789 void ConditionVariable::Broadcast(Thread* self) {
    790   DCHECK(self == NULL || self == Thread::Current());
    791   // TODO: enable below, there's a race in thread creation that causes false failures currently.
    792   // guard_.AssertExclusiveHeld(self);
    793   DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
    794 #if ART_USE_FUTEXES
    795   if (num_waiters_ > 0) {
    796     android_atomic_inc(&sequence_);  // Indicate the broadcast occurred.
    797     bool done = false;
    798     do {
    799       int32_t cur_sequence = sequence_;
    800       // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
    801       // mutex unlocks will awaken the requeued waiter thread.
    802       done = futex(&sequence_, FUTEX_CMP_REQUEUE, 0,
    803                    reinterpret_cast<const timespec*>(std::numeric_limits<int32_t>::max()),
    804                    &guard_.state_, cur_sequence) != -1;
    805       if (!done) {
    806         if (errno != EAGAIN) {
    807           PLOG(FATAL) << "futex cmp requeue failed for " << name_;
    808         }
    809       }
    810     } while (!done);
    811   }
    812 #else
    813   CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
    814 #endif
    815 }
    816 
    817 void ConditionVariable::Signal(Thread* self) {
    818   DCHECK(self == NULL || self == Thread::Current());
    819   guard_.AssertExclusiveHeld(self);
    820 #if ART_USE_FUTEXES
    821   if (num_waiters_ > 0) {
    822     android_atomic_inc(&sequence_);  // Indicate a signal occurred.
    823     // Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
    824     // to avoid this, however, requeueing can only move all waiters.
    825     int num_woken = futex(&sequence_, FUTEX_WAKE, 1, NULL, NULL, 0);
    826     // Check something was woken or else we changed sequence_ before they had chance to wait.
    827     CHECK((num_woken == 0) || (num_woken == 1));
    828   }
    829 #else
    830   CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
    831 #endif
    832 }
    833 
    834 void ConditionVariable::Wait(Thread* self) {
    835   guard_.CheckSafeToWait(self);
    836   WaitHoldingLocks(self);
    837 }
    838 
    839 void ConditionVariable::WaitHoldingLocks(Thread* self) {
    840   DCHECK(self == NULL || self == Thread::Current());
    841   guard_.AssertExclusiveHeld(self);
    842   unsigned int old_recursion_count = guard_.recursion_count_;
    843 #if ART_USE_FUTEXES
    844   num_waiters_++;
    845   // Ensure the Mutex is contended so that requeued threads are awoken.
    846   android_atomic_inc(&guard_.num_contenders_);
    847   guard_.recursion_count_ = 1;
    848   int32_t cur_sequence = sequence_;
    849   guard_.ExclusiveUnlock(self);
    850   if (futex(&sequence_, FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
    851     // Futex failed, check it is an expected error.
    852     // EAGAIN == EWOULDBLK, so we let the caller try again.
    853     // EINTR implies a signal was sent to this thread.
    854     if ((errno != EINTR) && (errno != EAGAIN)) {
    855       PLOG(FATAL) << "futex wait failed for " << name_;
    856     }
    857   }
    858   guard_.ExclusiveLock(self);
    859   CHECK_GE(num_waiters_, 0);
    860   num_waiters_--;
    861   // We awoke and so no longer require awakes from the guard_'s unlock.
    862   CHECK_GE(guard_.num_contenders_, 0);
    863   android_atomic_dec(&guard_.num_contenders_);
    864 #else
    865   guard_.recursion_count_ = 0;
    866   CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_));
    867 #endif
    868   guard_.recursion_count_ = old_recursion_count;
    869 }
    870 
    871 void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
    872   DCHECK(self == NULL || self == Thread::Current());
    873   guard_.AssertExclusiveHeld(self);
    874   guard_.CheckSafeToWait(self);
    875   unsigned int old_recursion_count = guard_.recursion_count_;
    876 #if ART_USE_FUTEXES
    877   timespec rel_ts;
    878   InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts);
    879   num_waiters_++;
    880   // Ensure the Mutex is contended so that requeued threads are awoken.
    881   android_atomic_inc(&guard_.num_contenders_);
    882   guard_.recursion_count_ = 1;
    883   int32_t cur_sequence = sequence_;
    884   guard_.ExclusiveUnlock(self);
    885   if (futex(&sequence_, FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
    886     if (errno == ETIMEDOUT) {
    887       // Timed out we're done.
    888     } else if ((errno == EAGAIN) || (errno == EINTR)) {
    889       // A signal or ConditionVariable::Signal/Broadcast has come in.
    890     } else {
    891       PLOG(FATAL) << "timed futex wait failed for " << name_;
    892     }
    893   }
    894   guard_.ExclusiveLock(self);
    895   CHECK_GE(num_waiters_, 0);
    896   num_waiters_--;
    897   // We awoke and so no longer require awakes from the guard_'s unlock.
    898   CHECK_GE(guard_.num_contenders_, 0);
    899   android_atomic_dec(&guard_.num_contenders_);
    900 #else
    901 #ifdef HAVE_TIMEDWAIT_MONOTONIC
    902 #define TIMEDWAIT pthread_cond_timedwait_monotonic
    903   int clock = CLOCK_MONOTONIC;
    904 #else
    905 #define TIMEDWAIT pthread_cond_timedwait
    906   int clock = CLOCK_REALTIME;
    907 #endif
    908   guard_.recursion_count_ = 0;
    909   timespec ts;
    910   InitTimeSpec(true, clock, ms, ns, &ts);
    911   int rc = TEMP_FAILURE_RETRY(TIMEDWAIT(&cond_, &guard_.mutex_, &ts));
    912   if (rc != 0 && rc != ETIMEDOUT) {
    913     errno = rc;
    914     PLOG(FATAL) << "TimedWait failed for " << name_;
    915   }
    916 #endif
    917   guard_.recursion_count_ = old_recursion_count;
    918 }
    919 
    920 }  // namespace art
    921