Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_BASE_MUTEX_H_
     18 #define ART_RUNTIME_BASE_MUTEX_H_
     19 
     20 #include <pthread.h>
     21 #include <stdint.h>
     22 
     23 #include <iosfwd>
     24 #include <string>
     25 
     26 #include "atomic_integer.h"
     27 #include "base/logging.h"
     28 #include "base/macros.h"
     29 #include "globals.h"
     30 #include "locks.h"
     31 
     32 #if defined(__APPLE__)
     33 #define ART_USE_FUTEXES 0
     34 #else
     35 #define ART_USE_FUTEXES !defined(__mips__)
     36 #endif
     37 
     38 // Currently Darwin doesn't support locks with timeouts.
     39 #if !defined(__APPLE__)
     40 #define HAVE_TIMED_RWLOCK 1
     41 #else
     42 #define HAVE_TIMED_RWLOCK 0
     43 #endif
     44 
     45 namespace art {
     46 
     47 class ScopedContentionRecorder;
     48 class Thread;
     49 
     50 const bool kDebugLocking = kIsDebugBuild;
     51 
     52 // Record Log contention information, dumpable via SIGQUIT.
     53 #ifdef ART_USE_FUTEXES
     54 // To enable lock contention logging, set this to true.
     55 const bool kLogLockContentions = false;
     56 #else
     57 // Keep this false as lock contention logging is supported only with
     58 // futex.
     59 const bool kLogLockContentions = false;
     60 #endif
     61 const size_t kContentionLogSize = 64;
     62 const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
     63 const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
     64 
     65 // Base class for all Mutex implementations
     66 class BaseMutex {
     67  public:
     68   const char* GetName() const {
     69     return name_;
     70   }
     71 
     72   virtual bool IsMutex() const { return false; }
     73   virtual bool IsReaderWriterMutex() const { return false; }
     74 
     75   virtual void Dump(std::ostream& os) const = 0;
     76 
     77   static void DumpAll(std::ostream& os);
     78 
     79  protected:
     80   friend class ConditionVariable;
     81 
     82   BaseMutex(const char* name, LockLevel level);
     83   virtual ~BaseMutex();
     84   void RegisterAsLocked(Thread* self);
     85   void RegisterAsUnlocked(Thread* self);
     86   void CheckSafeToWait(Thread* self);
     87 
     88   friend class ScopedContentionRecorder;
     89 
     90   void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
     91   void DumpContention(std::ostream& os) const;
     92 
     93   const LockLevel level_;  // Support for lock hierarchy.
     94   const char* const name_;
     95 
     96   // A log entry that records contention but makes no guarantee that either tid will be held live.
     97   struct ContentionLogEntry {
     98     ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
     99     uint64_t blocked_tid;
    100     uint64_t owner_tid;
    101     AtomicInteger count;
    102   };
    103   struct ContentionLogData {
    104     ContentionLogEntry contention_log[kContentionLogSize];
    105     // The next entry in the contention log to be updated. Value ranges from 0 to
    106     // kContentionLogSize - 1.
    107     AtomicInteger cur_content_log_entry;
    108     // Number of times the Mutex has been contended.
    109     AtomicInteger contention_count;
    110     // Sum of time waited by all contenders in ns.
    111     volatile uint64_t wait_time;
    112     void AddToWaitTime(uint64_t value);
    113     ContentionLogData() : wait_time(0) {}
    114   };
    115   ContentionLogData contetion_log_data_[kContentionLogDataSize];
    116 
    117  public:
    118   bool HasEverContended() const {
    119     if (kLogLockContentions) {
    120       return contetion_log_data_->contention_count > 0;
    121     }
    122     return false;
    123   }
    124 };
    125 
    126 // A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
    127 // exclusive access to what it guards. A Mutex can be in one of two states:
    128 // - Free - not owned by any thread,
    129 // - Exclusive - owned by a single thread.
    130 //
    131 // The effect of locking and unlocking operations on the state is:
    132 // State     | ExclusiveLock | ExclusiveUnlock
    133 // -------------------------------------------
    134 // Free      | Exclusive     | error
    135 // Exclusive | Block*        | Free
    136 // * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
    137 //   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
    138 std::ostream& operator<<(std::ostream& os, const Mutex& mu);
    139 class LOCKABLE Mutex : public BaseMutex {
    140  public:
    141   explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
    142   ~Mutex();
    143 
    144   virtual bool IsMutex() const { return true; }
    145 
    146   // Block until mutex is free then acquire exclusive access.
    147   void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
    148   void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
    149 
    150   // Returns true if acquires exclusive access, false otherwise.
    151   bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
    152   bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
    153 
    154   // Release exclusive access.
    155   void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
    156   void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
    157 
    158   // Is the current thread the exclusive holder of the Mutex.
    159   bool IsExclusiveHeld(const Thread* self) const;
    160 
    161   // Assert that the Mutex is exclusively held by the current thread.
    162   void AssertExclusiveHeld(const Thread* self) {
    163     if (kDebugLocking && (gAborting == 0)) {
    164       CHECK(IsExclusiveHeld(self)) << *this;
    165     }
    166   }
    167   void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
    168 
    169   // Assert that the Mutex is not held by the current thread.
    170   void AssertNotHeldExclusive(const Thread* self) {
    171     if (kDebugLocking && (gAborting == 0)) {
    172       CHECK(!IsExclusiveHeld(self)) << *this;
    173     }
    174   }
    175   void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
    176 
    177   // Id associated with exclusive owner.
    178   uint64_t GetExclusiveOwnerTid() const;
    179 
    180   // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
    181   unsigned int GetDepth() const {
    182     return recursion_count_;
    183   }
    184 
    185   virtual void Dump(std::ostream& os) const;
    186 
    187  private:
    188 #if ART_USE_FUTEXES
    189   // 0 is unheld, 1 is held.
    190   volatile int32_t state_;
    191   // Exclusive owner.
    192   volatile uint64_t exclusive_owner_;
    193   // Number of waiting contenders.
    194   volatile int32_t num_contenders_;
    195 #else
    196   pthread_mutex_t mutex_;
    197 #endif
    198   const bool recursive_;  // Can the lock be recursively held?
    199   unsigned int recursion_count_;
    200   friend class ConditionVariable;
    201   DISALLOW_COPY_AND_ASSIGN(Mutex);
    202 };
    203 
    204 // A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
    205 // Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
    206 // access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
    207 // condition variable. A ReaderWriterMutex can be in one of three states:
    208 // - Free - not owned by any thread,
    209 // - Exclusive - owned by a single thread,
    210 // - Shared(n) - shared amongst n threads.
    211 //
    212 // The effect of locking and unlocking operations on the state is:
    213 //
    214 // State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
    215 // ----------------------------------------------------------------------------
    216 // Free      | Exclusive     | error           | SharedLock(1)    | error
    217 // Exclusive | Block         | Free            | Block            | error
    218 // Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
    219 // * for large values of n the SharedLock may block.
    220 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
    221 class LOCKABLE ReaderWriterMutex : public BaseMutex {
    222  public:
    223   explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
    224   ~ReaderWriterMutex();
    225 
    226   virtual bool IsReaderWriterMutex() const { return true; }
    227 
    228   // Block until ReaderWriterMutex is free then acquire exclusive access.
    229   void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
    230   void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
    231 
    232   // Release exclusive access.
    233   void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
    234   void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
    235 
    236   // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
    237   // or false if timeout is reached.
    238 #if HAVE_TIMED_RWLOCK
    239   bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
    240       EXCLUSIVE_TRYLOCK_FUNCTION(true);
    241 #endif
    242 
    243   // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
    244   void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
    245   void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
    246 
    247   // Try to acquire share of ReaderWriterMutex.
    248   bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
    249 
    250   // Release a share of the access.
    251   void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
    252   void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
    253 
    254   // Is the current thread the exclusive holder of the ReaderWriterMutex.
    255   bool IsExclusiveHeld(const Thread* self) const;
    256 
    257   // Assert the current thread has exclusive access to the ReaderWriterMutex.
    258   void AssertExclusiveHeld(const Thread* self) {
    259     if (kDebugLocking && (gAborting == 0)) {
    260       CHECK(IsExclusiveHeld(self)) << *this;
    261     }
    262   }
    263   void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
    264 
    265   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
    266   void AssertNotExclusiveHeld(const Thread* self) {
    267     if (kDebugLocking && (gAborting == 0)) {
    268       CHECK(!IsExclusiveHeld(self)) << *this;
    269     }
    270   }
    271   void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
    272 
    273   // Is the current thread a shared holder of the ReaderWriterMutex.
    274   bool IsSharedHeld(const Thread* self) const;
    275 
    276   // Assert the current thread has shared access to the ReaderWriterMutex.
    277   void AssertSharedHeld(const Thread* self) {
    278     if (kDebugLocking && (gAborting == 0)) {
    279       // TODO: we can only assert this well when self != NULL.
    280       CHECK(IsSharedHeld(self) || self == NULL) << *this;
    281     }
    282   }
    283   void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
    284 
    285   // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
    286   // mode.
    287   void AssertNotHeld(const Thread* self) {
    288     if (kDebugLocking && (gAborting == 0)) {
    289       CHECK(!IsSharedHeld(self)) << *this;
    290     }
    291   }
    292 
    293   // Id associated with exclusive owner.
    294   uint64_t GetExclusiveOwnerTid() const;
    295 
    296   virtual void Dump(std::ostream& os) const;
    297 
    298  private:
    299 #if ART_USE_FUTEXES
    300   // -1 implies held exclusive, +ve shared held by state_ many owners.
    301   volatile int32_t state_;
    302   // Exclusive owner.
    303   volatile uint64_t exclusive_owner_;
    304   // Pending readers.
    305   volatile int32_t num_pending_readers_;
    306   // Pending writers.
    307   volatile int32_t num_pending_writers_;
    308 #else
    309   pthread_rwlock_t rwlock_;
    310 #endif
    311   DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
    312 };
    313 
    314 // ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
    315 // (Signal) or all at once (Broadcast).
    316 class ConditionVariable {
    317  public:
    318   explicit ConditionVariable(const char* name, Mutex& mutex);
    319   ~ConditionVariable();
    320 
    321   void Broadcast(Thread* self);
    322   void Signal(Thread* self);
    323   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
    324   //       pointer copy, thereby defeating annotalysis.
    325   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    326   void TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
    327   // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
    328   // when waiting.
    329   // TODO: remove this.
    330   void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    331 
    332  private:
    333   const char* const name_;
    334   // The Mutex being used by waiters. It is an error to mix condition variables between different
    335   // Mutexes.
    336   Mutex& guard_;
    337 #if ART_USE_FUTEXES
    338   // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
    339   // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
    340   // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
    341   // without guard_ held.
    342   volatile int32_t sequence_;
    343   // Number of threads that have come into to wait, not the length of the waiters on the futex as
    344   // waiters may have been requeued onto guard_. Guarded by guard_.
    345   volatile int32_t num_waiters_;
    346 #else
    347   pthread_cond_t cond_;
    348 #endif
    349   DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
    350 };
    351 
    352 // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
    353 // upon destruction.
    354 class SCOPED_LOCKABLE MutexLock {
    355  public:
    356   explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
    357     mu_.ExclusiveLock(self_);
    358   }
    359 
    360   ~MutexLock() UNLOCK_FUNCTION() {
    361     mu_.ExclusiveUnlock(self_);
    362   }
    363 
    364  private:
    365   Thread* const self_;
    366   Mutex& mu_;
    367   DISALLOW_COPY_AND_ASSIGN(MutexLock);
    368 };
    369 // Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
    370 #define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name)
    371 
    372 // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
    373 // construction and releases it upon destruction.
    374 class SCOPED_LOCKABLE ReaderMutexLock {
    375  public:
    376   explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
    377       self_(self), mu_(mu) {
    378     mu_.SharedLock(self_);
    379   }
    380 
    381   ~ReaderMutexLock() UNLOCK_FUNCTION() {
    382     mu_.SharedUnlock(self_);
    383   }
    384 
    385  private:
    386   Thread* const self_;
    387   ReaderWriterMutex& mu_;
    388   DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
    389 };
    390 // Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
    391 // "ReaderMutexLock mu(lock)".
    392 #define ReaderMutexLock(x) COMPILE_ASSERT(0, reader_mutex_lock_declaration_missing_variable_name)
    393 
    394 // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
    395 // construction and releases it upon destruction.
    396 class SCOPED_LOCKABLE WriterMutexLock {
    397  public:
    398   explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
    399       self_(self), mu_(mu) {
    400     mu_.ExclusiveLock(self_);
    401   }
    402 
    403   ~WriterMutexLock() UNLOCK_FUNCTION() {
    404     mu_.ExclusiveUnlock(self_);
    405   }
    406 
    407  private:
    408   Thread* const self_;
    409   ReaderWriterMutex& mu_;
    410   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
    411 };
    412 // Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
    413 // "WriterMutexLock mu(lock)".
    414 #define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name)
    415 
    416 }  // namespace art
    417 
    418 #endif  // ART_RUNTIME_BASE_MUTEX_H_
    419