Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_BASE_MUTEX_INL_H_
     18 #define ART_RUNTIME_BASE_MUTEX_INL_H_
     19 
     20 #include <inttypes.h>
     21 
     22 #include "mutex.h"
     23 
     24 #define ATRACE_TAG ATRACE_TAG_DALVIK
     25 
     26 #include "cutils/trace.h"
     27 
     28 #include "base/stringprintf.h"
     29 #include "runtime.h"
     30 #include "thread.h"
     31 
     32 namespace art {
     33 
     34 #define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
     35 
     36 #if ART_USE_FUTEXES
     37 #include "linux/futex.h"
     38 #include "sys/syscall.h"
     39 #ifndef SYS_futex
     40 #define SYS_futex __NR_futex
     41 #endif
     42 static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
     43   return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
     44 }
     45 #endif  // ART_USE_FUTEXES
     46 
     47 class ScopedContentionRecorder {
     48  public:
     49   ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
     50       : mutex_(kLogLockContentions ? mutex : NULL),
     51         blocked_tid_(kLogLockContentions ? blocked_tid : 0),
     52         owner_tid_(kLogLockContentions ? owner_tid : 0),
     53         start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
     54     if (ATRACE_ENABLED()) {
     55       std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
     56                                      mutex->GetName(), owner_tid);
     57       ATRACE_BEGIN(msg.c_str());
     58     }
     59   }
     60 
     61   ~ScopedContentionRecorder() {
     62     ATRACE_END();
     63     if (kLogLockContentions) {
     64       uint64_t end_nano_time = NanoTime();
     65       mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
     66     }
     67   }
     68 
     69  private:
     70   BaseMutex* const mutex_;
     71   const uint64_t blocked_tid_;
     72   const uint64_t owner_tid_;
     73   const uint64_t start_nano_time_;
     74 };
     75 
     76 static inline uint64_t SafeGetTid(const Thread* self) {
     77   if (self != NULL) {
     78     return static_cast<uint64_t>(self->GetTid());
     79   } else {
     80     return static_cast<uint64_t>(GetTid());
     81   }
     82 }
     83 
     84 static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS {
     85   // The check below enumerates the cases where we expect not to be able to sanity check locks
     86   // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
     87   // TODO: tighten this check.
     88   if (kDebugLocking) {
     89     Runtime* runtime = Runtime::Current();
     90     CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
     91           // Used during thread creation to avoid races with runtime shutdown. Thread::Current not
     92           // yet established.
     93           level == kRuntimeShutdownLock ||
     94           // Thread Ids are allocated/released before threads are established.
     95           level == kAllocatedThreadIdsLock ||
     96           // Thread LDT's are initialized without Thread::Current established.
     97           level == kModifyLdtLock ||
     98           // Threads are unregistered while holding the thread list lock, during this process they
     99           // no longer exist and so we expect an unlock with no self.
    100           level == kThreadListLock ||
    101           // Ignore logging which may or may not have set up thread data structures.
    102           level == kLoggingLock ||
    103           // Avoid recursive death.
    104           level == kAbortLock) << level;
    105   }
    106 }
    107 
    108 inline void BaseMutex::RegisterAsLocked(Thread* self) {
    109   if (UNLIKELY(self == NULL)) {
    110     CheckUnattachedThread(level_);
    111     return;
    112   }
    113   if (kDebugLocking) {
    114     // Check if a bad Mutex of this level or lower is held.
    115     bool bad_mutexes_held = false;
    116     for (int i = level_; i >= 0; --i) {
    117       BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
    118       if (UNLIKELY(held_mutex != NULL)) {
    119         LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
    120                    << "(level " << LockLevel(i) << " - " << i
    121                    << ") while locking \"" << name_ << "\" "
    122                    << "(level " << level_ << " - " << static_cast<int>(level_) << ")";
    123         if (i > kAbortLock) {
    124           // Only abort in the check below if this is more than abort level lock.
    125           bad_mutexes_held = true;
    126         }
    127       }
    128     }
    129     CHECK(!bad_mutexes_held);
    130   }
    131   // Don't record monitors as they are outside the scope of analysis. They may be inspected off of
    132   // the monitor list.
    133   if (level_ != kMonitorLock) {
    134     self->SetHeldMutex(level_, this);
    135   }
    136 }
    137 
    138 inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
    139   if (UNLIKELY(self == NULL)) {
    140     CheckUnattachedThread(level_);
    141     return;
    142   }
    143   if (level_ != kMonitorLock) {
    144     if (kDebugLocking && !gAborting) {
    145       CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
    146     }
    147     self->SetHeldMutex(level_, NULL);
    148   }
    149 }
    150 
    151 inline void ReaderWriterMutex::SharedLock(Thread* self) {
    152   DCHECK(self == NULL || self == Thread::Current());
    153 #if ART_USE_FUTEXES
    154   bool done = false;
    155   do {
    156     int32_t cur_state = state_.LoadRelaxed();
    157     if (LIKELY(cur_state >= 0)) {
    158       // Add as an extra reader.
    159       done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
    160     } else {
    161       // Owner holds it exclusively, hang up.
    162       ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
    163       ++num_pending_readers_;
    164       if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
    165         if (errno != EAGAIN) {
    166           PLOG(FATAL) << "futex wait failed for " << name_;
    167         }
    168       }
    169       --num_pending_readers_;
    170     }
    171   } while (!done);
    172 #else
    173   CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
    174 #endif
    175   DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
    176   RegisterAsLocked(self);
    177   AssertSharedHeld(self);
    178 }
    179 
    180 inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
    181   DCHECK(self == NULL || self == Thread::Current());
    182   DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
    183   AssertSharedHeld(self);
    184   RegisterAsUnlocked(self);
    185 #if ART_USE_FUTEXES
    186   bool done = false;
    187   do {
    188     int32_t cur_state = state_.LoadRelaxed();
    189     if (LIKELY(cur_state > 0)) {
    190       // Reduce state by 1 and impose lock release load/store ordering.
    191       // Note, the relaxed loads below musn't reorder before the CompareExchange.
    192       // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
    193       // a status bit into the state on contention.
    194       done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, cur_state - 1);
    195       if (done && (cur_state - 1) == 0) {  // Weak CAS may fail spuriously.
    196         if (num_pending_writers_.LoadRelaxed() > 0 ||
    197             num_pending_readers_.LoadRelaxed() > 0) {
    198           // Wake any exclusive waiters as there are now no readers.
    199           futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
    200         }
    201       }
    202     } else {
    203       LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
    204     }
    205   } while (!done);
    206 #else
    207   CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
    208 #endif
    209 }
    210 
    211 inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
    212   DCHECK(self == NULL || self == Thread::Current());
    213   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
    214   if (kDebugLocking) {
    215     // Sanity debug check that if we think it is locked we have it in our held mutexes.
    216     if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
    217       CHECK_EQ(self->GetHeldMutex(level_), this);
    218     }
    219   }
    220   return result;
    221 }
    222 
    223 inline uint64_t Mutex::GetExclusiveOwnerTid() const {
    224   return exclusive_owner_;
    225 }
    226 
    227 inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
    228   DCHECK(self == NULL || self == Thread::Current());
    229   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
    230   if (kDebugLocking) {
    231     // Sanity that if the pthread thinks we own the lock the Thread agrees.
    232     if (self != NULL && result)  {
    233       CHECK_EQ(self->GetHeldMutex(level_), this);
    234     }
    235   }
    236   return result;
    237 }
    238 
    239 inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
    240 #if ART_USE_FUTEXES
    241   int32_t state = state_.LoadRelaxed();
    242   if (state == 0) {
    243     return 0;  // No owner.
    244   } else if (state > 0) {
    245     return -1;  // Shared.
    246   } else {
    247     return exclusive_owner_;
    248   }
    249 #else
    250   return exclusive_owner_;
    251 #endif
    252 }
    253 
    254 }  // namespace art
    255 
    256 #endif  // ART_RUNTIME_BASE_MUTEX_INL_H_
    257