Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_BASE_MUTEX_INL_H_
     18 #define ART_RUNTIME_BASE_MUTEX_INL_H_
     19 
     20 #include <inttypes.h>
     21 
     22 #include "mutex.h"
     23 
     24 #include "base/utils.h"
     25 #include "base/value_object.h"
     26 #include "thread.h"
     27 
     28 #if ART_USE_FUTEXES
     29 #include "linux/futex.h"
     30 #include "sys/syscall.h"
     31 #ifndef SYS_futex
     32 #define SYS_futex __NR_futex
     33 #endif
     34 #endif  // ART_USE_FUTEXES
     35 
     36 #define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
     37 
     38 namespace art {
     39 
     40 #if ART_USE_FUTEXES
     41 static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout,
     42                         volatile int *uaddr2, int val3) {
     43   return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
     44 }
     45 #endif  // ART_USE_FUTEXES
     46 
     47 // The following isn't strictly necessary, but we want updates on Atomic<pid_t> to be lock-free.
     48 // TODO: Use std::atomic::is_always_lock_free after switching to C++17 atomics.
     49 static_assert(sizeof(pid_t) <= sizeof(int32_t), "pid_t should fit in 32 bits");
     50 
     51 static inline pid_t SafeGetTid(const Thread* self) {
     52   if (self != nullptr) {
     53     return self->GetTid();
     54   } else {
     55     return GetTid();
     56   }
     57 }
     58 
     59 static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS {
     60   // The check below enumerates the cases where we expect not to be able to sanity check locks
     61   // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
     62   // TODO: tighten this check.
     63   if (kDebugLocking) {
     64     CHECK(!Locks::IsSafeToCallAbortRacy() ||
     65           // Used during thread creation to avoid races with runtime shutdown. Thread::Current not
     66           // yet established.
     67           level == kRuntimeShutdownLock ||
     68           // Thread Ids are allocated/released before threads are established.
     69           level == kAllocatedThreadIdsLock ||
     70           // Thread LDT's are initialized without Thread::Current established.
     71           level == kModifyLdtLock ||
     72           // Threads are unregistered while holding the thread list lock, during this process they
     73           // no longer exist and so we expect an unlock with no self.
     74           level == kThreadListLock ||
     75           // Ignore logging which may or may not have set up thread data structures.
     76           level == kLoggingLock ||
     77           // When transitioning from suspended to runnable, a daemon thread might be in
     78           // a situation where the runtime is shutting down. To not crash our debug locking
     79           // mechanism we just pass null Thread* to the MutexLock during that transition
     80           // (see Thread::TransitionFromSuspendedToRunnable).
     81           level == kThreadSuspendCountLock ||
     82           // Avoid recursive death.
     83           level == kAbortLock ||
     84           // Locks at the absolute top of the stack can be locked at any time.
     85           level == kTopLockLevel) << level;
     86   }
     87 }
     88 
     89 inline void BaseMutex::RegisterAsLocked(Thread* self) {
     90   if (UNLIKELY(self == nullptr)) {
     91     CheckUnattachedThread(level_);
     92     return;
     93   }
     94   if (kDebugLocking) {
     95     // Check if a bad Mutex of this level or lower is held.
     96     bool bad_mutexes_held = false;
     97     // Specifically allow a kTopLockLevel lock to be gained when the current thread holds the
     98     // mutator_lock_ exclusive. This is because we suspending when holding locks at this level is
     99     // not allowed and if we hold the mutator_lock_ exclusive we must unsuspend stuff eventually
    100     // so there are no deadlocks.
    101     if (level_ == kTopLockLevel &&
    102         Locks::mutator_lock_->IsSharedHeld(self) &&
    103         !Locks::mutator_lock_->IsExclusiveHeld(self)) {
    104       LOG(ERROR) << "Lock level violation: holding \"" << Locks::mutator_lock_->name_ << "\" "
    105                   << "(level " << kMutatorLock << " - " << static_cast<int>(kMutatorLock)
    106                   << ") non-exclusive while locking \"" << name_ << "\" "
    107                   << "(level " << level_ << " - " << static_cast<int>(level_) << ") a top level"
    108                   << "mutex. This is not allowed.";
    109       bad_mutexes_held = true;
    110     } else if (this == Locks::mutator_lock_ && self->GetHeldMutex(kTopLockLevel) != nullptr) {
    111       LOG(ERROR) << "Lock level violation. Locking mutator_lock_ while already having a "
    112                  << "kTopLevelLock (" << self->GetHeldMutex(kTopLockLevel)->name_ << "held is "
    113                  << "not allowed.";
    114       bad_mutexes_held = true;
    115     }
    116     for (int i = level_; i >= 0; --i) {
    117       LockLevel lock_level_i = static_cast<LockLevel>(i);
    118       BaseMutex* held_mutex = self->GetHeldMutex(lock_level_i);
    119       if (level_ == kTopLockLevel &&
    120           lock_level_i == kMutatorLock &&
    121           Locks::mutator_lock_->IsExclusiveHeld(self)) {
    122         // This is checked above.
    123         continue;
    124       } else if (UNLIKELY(held_mutex != nullptr) && lock_level_i != kAbortLock) {
    125         LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
    126                    << "(level " << lock_level_i << " - " << i
    127                    << ") while locking \"" << name_ << "\" "
    128                    << "(level " << level_ << " - " << static_cast<int>(level_) << ")";
    129         if (lock_level_i > kAbortLock) {
    130           // Only abort in the check below if this is more than abort level lock.
    131           bad_mutexes_held = true;
    132         }
    133       }
    134     }
    135     if (gAborting == 0) {  // Avoid recursive aborts.
    136       CHECK(!bad_mutexes_held);
    137     }
    138   }
    139   // Don't record monitors as they are outside the scope of analysis. They may be inspected off of
    140   // the monitor list.
    141   if (level_ != kMonitorLock) {
    142     self->SetHeldMutex(level_, this);
    143   }
    144 }
    145 
    146 inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
    147   if (UNLIKELY(self == nullptr)) {
    148     CheckUnattachedThread(level_);
    149     return;
    150   }
    151   if (level_ != kMonitorLock) {
    152     if (kDebugLocking && gAborting == 0) {  // Avoid recursive aborts.
    153       CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
    154     }
    155     self->SetHeldMutex(level_, nullptr);
    156   }
    157 }
    158 
    159 inline void ReaderWriterMutex::SharedLock(Thread* self) {
    160   DCHECK(self == nullptr || self == Thread::Current());
    161 #if ART_USE_FUTEXES
    162   bool done = false;
    163   do {
    164     int32_t cur_state = state_.LoadRelaxed();
    165     if (LIKELY(cur_state >= 0)) {
    166       // Add as an extra reader.
    167       done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
    168     } else {
    169       HandleSharedLockContention(self, cur_state);
    170     }
    171   } while (!done);
    172 #else
    173   CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
    174 #endif
    175   DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1);
    176   RegisterAsLocked(self);
    177   AssertSharedHeld(self);
    178 }
    179 
    180 inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
    181   DCHECK(self == nullptr || self == Thread::Current());
    182   DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1);
    183   AssertSharedHeld(self);
    184   RegisterAsUnlocked(self);
    185 #if ART_USE_FUTEXES
    186   bool done = false;
    187   do {
    188     int32_t cur_state = state_.LoadRelaxed();
    189     if (LIKELY(cur_state > 0)) {
    190       // Reduce state by 1 and impose lock release load/store ordering.
    191       // Note, the relaxed loads below musn't reorder before the CompareAndSet.
    192       // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
    193       // a status bit into the state on contention.
    194       done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, cur_state - 1);
    195       if (done && (cur_state - 1) == 0) {  // Weak CAS may fail spuriously.
    196         if (num_pending_writers_.LoadRelaxed() > 0 ||
    197             num_pending_readers_.LoadRelaxed() > 0) {
    198           // Wake any exclusive waiters as there are now no readers.
    199           futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
    200         }
    201       }
    202     } else {
    203       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
    204     }
    205   } while (!done);
    206 #else
    207   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
    208 #endif
    209 }
    210 
    211 inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
    212   DCHECK(self == nullptr || self == Thread::Current());
    213   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
    214   if (kDebugLocking) {
    215     // Sanity debug check that if we think it is locked we have it in our held mutexes.
    216     if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
    217       CHECK_EQ(self->GetHeldMutex(level_), this);
    218     }
    219   }
    220   return result;
    221 }
    222 
    223 inline pid_t Mutex::GetExclusiveOwnerTid() const {
    224   return exclusive_owner_.LoadRelaxed();
    225 }
    226 
    227 inline void Mutex::AssertExclusiveHeld(const Thread* self) const {
    228   if (kDebugLocking && (gAborting == 0)) {
    229     CHECK(IsExclusiveHeld(self)) << *this;
    230   }
    231 }
    232 
    233 inline void Mutex::AssertHeld(const Thread* self) const {
    234   AssertExclusiveHeld(self);
    235 }
    236 
    237 inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
    238   DCHECK(self == nullptr || self == Thread::Current());
    239   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
    240   if (kDebugLocking) {
    241     // Sanity that if the pthread thinks we own the lock the Thread agrees.
    242     if (self != nullptr && result)  {
    243       CHECK_EQ(self->GetHeldMutex(level_), this);
    244     }
    245   }
    246   return result;
    247 }
    248 
    249 inline pid_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
    250 #if ART_USE_FUTEXES
    251   int32_t state = state_.LoadRelaxed();
    252   if (state == 0) {
    253     return 0;  // No owner.
    254   } else if (state > 0) {
    255     return -1;  // Shared.
    256   } else {
    257     return exclusive_owner_.LoadRelaxed();
    258   }
    259 #else
    260   return exclusive_owner_.LoadRelaxed();
    261 #endif
    262 }
    263 
    264 inline void ReaderWriterMutex::AssertExclusiveHeld(const Thread* self) const {
    265   if (kDebugLocking && (gAborting == 0)) {
    266     CHECK(IsExclusiveHeld(self)) << *this;
    267   }
    268 }
    269 
    270 inline void ReaderWriterMutex::AssertWriterHeld(const Thread* self) const {
    271   AssertExclusiveHeld(self);
    272 }
    273 
    274 inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) {
    275   AssertSharedHeld(self);
    276   RegisterAsUnlocked(self);
    277 }
    278 
    279 inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) {
    280   RegisterAsLocked(self);
    281   AssertSharedHeld(self);
    282 }
    283 
    284 inline ReaderMutexLock::ReaderMutexLock(Thread* self, ReaderWriterMutex& mu)
    285     : self_(self), mu_(mu) {
    286   mu_.SharedLock(self_);
    287 }
    288 
    289 inline ReaderMutexLock::~ReaderMutexLock() {
    290   mu_.SharedUnlock(self_);
    291 }
    292 
    293 // Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
    294 // "ReaderMutexLock mu(lock)".
    295 #define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
    296 
    297 }  // namespace art
    298 
    299 #endif  // ART_RUNTIME_BASE_MUTEX_INL_H_
    300