Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_BASE_MUTEX_INL_H_
     18 #define ART_RUNTIME_BASE_MUTEX_INL_H_
     19 
     20 #include <inttypes.h>
     21 
     22 #include "mutex.h"
     23 
     24 #include "base/stringprintf.h"
     25 #include "base/value_object.h"
     26 #include "runtime.h"
     27 #include "thread.h"
     28 #include "utils.h"
     29 
     30 #if ART_USE_FUTEXES
     31 #include "linux/futex.h"
     32 #include "sys/syscall.h"
     33 #ifndef SYS_futex
     34 #define SYS_futex __NR_futex
     35 #endif
     36 #endif  // ART_USE_FUTEXES
     37 
     38 #define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
     39 
     40 namespace art {
     41 
     42 #if ART_USE_FUTEXES
     43 static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout,
     44                         volatile int *uaddr2, int val3) {
     45   return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
     46 }
     47 #endif  // ART_USE_FUTEXES
     48 
     49 static inline uint64_t SafeGetTid(const Thread* self) {
     50   if (self != nullptr) {
     51     return static_cast<uint64_t>(self->GetTid());
     52   } else {
     53     return static_cast<uint64_t>(GetTid());
     54   }
     55 }
     56 
     57 static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS {
     58   // The check below enumerates the cases where we expect not to be able to sanity check locks
     59   // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
     60   // TODO: tighten this check.
     61   if (kDebugLocking) {
     62     Runtime* runtime = Runtime::Current();
     63     CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
     64           // Used during thread creation to avoid races with runtime shutdown. Thread::Current not
     65           // yet established.
     66           level == kRuntimeShutdownLock ||
     67           // Thread Ids are allocated/released before threads are established.
     68           level == kAllocatedThreadIdsLock ||
     69           // Thread LDT's are initialized without Thread::Current established.
     70           level == kModifyLdtLock ||
     71           // Threads are unregistered while holding the thread list lock, during this process they
     72           // no longer exist and so we expect an unlock with no self.
     73           level == kThreadListLock ||
     74           // Ignore logging which may or may not have set up thread data structures.
     75           level == kLoggingLock ||
     76           // Avoid recursive death.
     77           level == kAbortLock) << level;
     78   }
     79 }
     80 
     81 inline void BaseMutex::RegisterAsLocked(Thread* self) {
     82   if (UNLIKELY(self == nullptr)) {
     83     CheckUnattachedThread(level_);
     84     return;
     85   }
     86   if (kDebugLocking) {
     87     // Check if a bad Mutex of this level or lower is held.
     88     bool bad_mutexes_held = false;
     89     for (int i = level_; i >= 0; --i) {
     90       BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
     91       if (UNLIKELY(held_mutex != nullptr)) {
     92         LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
     93                    << "(level " << LockLevel(i) << " - " << i
     94                    << ") while locking \"" << name_ << "\" "
     95                    << "(level " << level_ << " - " << static_cast<int>(level_) << ")";
     96         if (i > kAbortLock) {
     97           // Only abort in the check below if this is more than abort level lock.
     98           bad_mutexes_held = true;
     99         }
    100       }
    101     }
    102     if (gAborting == 0) {  // Avoid recursive aborts.
    103       CHECK(!bad_mutexes_held);
    104     }
    105   }
    106   // Don't record monitors as they are outside the scope of analysis. They may be inspected off of
    107   // the monitor list.
    108   if (level_ != kMonitorLock) {
    109     self->SetHeldMutex(level_, this);
    110   }
    111 }
    112 
    113 inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
    114   if (UNLIKELY(self == nullptr)) {
    115     CheckUnattachedThread(level_);
    116     return;
    117   }
    118   if (level_ != kMonitorLock) {
    119     if (kDebugLocking && gAborting == 0) {  // Avoid recursive aborts.
    120       CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
    121     }
    122     self->SetHeldMutex(level_, nullptr);
    123   }
    124 }
    125 
    126 inline void ReaderWriterMutex::SharedLock(Thread* self) {
    127   DCHECK(self == nullptr || self == Thread::Current());
    128 #if ART_USE_FUTEXES
    129   bool done = false;
    130   do {
    131     int32_t cur_state = state_.LoadRelaxed();
    132     if (LIKELY(cur_state >= 0)) {
    133       // Add as an extra reader.
    134       done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
    135     } else {
    136       HandleSharedLockContention(self, cur_state);
    137     }
    138   } while (!done);
    139 #else
    140   CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
    141 #endif
    142   DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
    143   RegisterAsLocked(self);
    144   AssertSharedHeld(self);
    145 }
    146 
    147 inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
    148   DCHECK(self == nullptr || self == Thread::Current());
    149   DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
    150   AssertSharedHeld(self);
    151   RegisterAsUnlocked(self);
    152 #if ART_USE_FUTEXES
    153   bool done = false;
    154   do {
    155     int32_t cur_state = state_.LoadRelaxed();
    156     if (LIKELY(cur_state > 0)) {
    157       // Reduce state by 1 and impose lock release load/store ordering.
    158       // Note, the relaxed loads below musn't reorder before the CompareExchange.
    159       // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
    160       // a status bit into the state on contention.
    161       done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, cur_state - 1);
    162       if (done && (cur_state - 1) == 0) {  // Weak CAS may fail spuriously.
    163         if (num_pending_writers_.LoadRelaxed() > 0 ||
    164             num_pending_readers_.LoadRelaxed() > 0) {
    165           // Wake any exclusive waiters as there are now no readers.
    166           futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
    167         }
    168       }
    169     } else {
    170       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
    171     }
    172   } while (!done);
    173 #else
    174   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
    175 #endif
    176 }
    177 
    178 inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
    179   DCHECK(self == nullptr || self == Thread::Current());
    180   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
    181   if (kDebugLocking) {
    182     // Sanity debug check that if we think it is locked we have it in our held mutexes.
    183     if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
    184       CHECK_EQ(self->GetHeldMutex(level_), this);
    185     }
    186   }
    187   return result;
    188 }
    189 
    190 inline uint64_t Mutex::GetExclusiveOwnerTid() const {
    191   return exclusive_owner_;
    192 }
    193 
    194 inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
    195   DCHECK(self == nullptr || self == Thread::Current());
    196   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
    197   if (kDebugLocking) {
    198     // Sanity that if the pthread thinks we own the lock the Thread agrees.
    199     if (self != nullptr && result)  {
    200       CHECK_EQ(self->GetHeldMutex(level_), this);
    201     }
    202   }
    203   return result;
    204 }
    205 
    206 inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
    207 #if ART_USE_FUTEXES
    208   int32_t state = state_.LoadRelaxed();
    209   if (state == 0) {
    210     return 0;  // No owner.
    211   } else if (state > 0) {
    212     return -1;  // Shared.
    213   } else {
    214     return exclusive_owner_;
    215   }
    216 #else
    217   return exclusive_owner_;
    218 #endif
    219 }
    220 
    221 inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) {
    222   AssertSharedHeld(self);
    223   RegisterAsUnlocked(self);
    224 }
    225 
    226 inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) {
    227   RegisterAsLocked(self);
    228   AssertSharedHeld(self);
    229 }
    230 
    231 }  // namespace art
    232 
    233 #endif  // ART_RUNTIME_BASE_MUTEX_INL_H_
    234