Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_BASE_MUTEX_H_
     18 #define ART_RUNTIME_BASE_MUTEX_H_
     19 
     20 #include <pthread.h>
     21 #include <stdint.h>
     22 
     23 #include <iosfwd>
     24 #include <string>
     25 
     26 #include "atomic.h"
     27 #include "base/logging.h"
     28 #include "base/macros.h"
     29 #include "globals.h"
     30 
     31 #if defined(__APPLE__)
     32 #define ART_USE_FUTEXES 0
     33 #else
     34 #define ART_USE_FUTEXES 1
     35 #endif
     36 
     37 // Currently Darwin doesn't support locks with timeouts.
     38 #if !defined(__APPLE__)
     39 #define HAVE_TIMED_RWLOCK 1
     40 #else
     41 #define HAVE_TIMED_RWLOCK 0
     42 #endif
     43 
     44 namespace art {
     45 
     46 class SHARED_LOCKABLE ReaderWriterMutex;
     47 class SHARED_LOCKABLE MutatorMutex;
     48 class ScopedContentionRecorder;
     49 class Thread;
     50 
     51 // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
     52 // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
     53 // partial ordering and thereby cause deadlock situations to fail checks.
     54 //
     55 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
     56 enum LockLevel {
     57   kLoggingLock = 0,
     58   kSwapMutexesLock,
     59   kUnexpectedSignalLock,
     60   kThreadSuspendCountLock,
     61   kAbortLock,
     62   kJdwpAdbStateLock,
     63   kJdwpSocketLock,
     64   kRegionSpaceRegionLock,
     65   kMarkSweepMarkStackLock,
     66   kRosAllocGlobalLock,
     67   kRosAllocBracketLock,
     68   kRosAllocBulkFreeLock,
     69   kTaggingLockLevel,
     70   kTransactionLogLock,
     71   kJniFunctionTableLock,
     72   kJniWeakGlobalsLock,
     73   kJniGlobalsLock,
     74   kReferenceQueueSoftReferencesLock,
     75   kReferenceQueuePhantomReferencesLock,
     76   kReferenceQueueFinalizerReferencesLock,
     77   kReferenceQueueWeakReferencesLock,
     78   kReferenceQueueClearedReferencesLock,
     79   kReferenceProcessorLock,
     80   kJitDebugInterfaceLock,
     81   kAllocSpaceLock,
     82   kBumpPointerSpaceBlockLock,
     83   kArenaPoolLock,
     84   kInternTableLock,
     85   kOatFileSecondaryLookupLock,
     86   kHostDlOpenHandlesLock,
     87   kVerifierDepsLock,
     88   kOatFileManagerLock,
     89   kTracingUniqueMethodsLock,
     90   kTracingStreamingLock,
     91   kDeoptimizedMethodsLock,
     92   kClassLoaderClassesLock,
     93   kDefaultMutexLevel,
     94   kDexLock,
     95   kMarkSweepLargeObjectLock,
     96   kJdwpObjectRegistryLock,
     97   kModifyLdtLock,
     98   kAllocatedThreadIdsLock,
     99   kMonitorPoolLock,
    100   kClassLinkerClassesLock,  // TODO rename.
    101   kJitCodeCacheLock,
    102   kCHALock,
    103   kBreakpointLock,
    104   kMonitorLock,
    105   kMonitorListLock,
    106   kJniLoadLibraryLock,
    107   kThreadListLock,
    108   kAllocTrackerLock,
    109   kDeoptimizationLock,
    110   kProfilerLock,
    111   kJdwpShutdownLock,
    112   kJdwpEventListLock,
    113   kJdwpAttachLock,
    114   kJdwpStartLock,
    115   kRuntimeShutdownLock,
    116   kTraceLock,
    117   kHeapBitmapLock,
    118   kMutatorLock,
    119   kInstrumentEntrypointsLock,
    120   kZygoteCreationLock,
    121 
    122   kLockLevelCount  // Must come last.
    123 };
    124 std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
    125 
    126 const bool kDebugLocking = kIsDebugBuild;
    127 
    128 // Record Log contention information, dumpable via SIGQUIT.
    129 #ifdef ART_USE_FUTEXES
    130 // To enable lock contention logging, set this to true.
    131 const bool kLogLockContentions = false;
    132 #else
    133 // Keep this false as lock contention logging is supported only with
    134 // futex.
    135 const bool kLogLockContentions = false;
    136 #endif
    137 const size_t kContentionLogSize = 4;
    138 const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
    139 const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
    140 
    141 // Base class for all Mutex implementations
    142 class BaseMutex {
    143  public:
    144   const char* GetName() const {
    145     return name_;
    146   }
    147 
    148   virtual bool IsMutex() const { return false; }
    149   virtual bool IsReaderWriterMutex() const { return false; }
    150   virtual bool IsMutatorMutex() const { return false; }
    151 
    152   virtual void Dump(std::ostream& os) const = 0;
    153 
    154   static void DumpAll(std::ostream& os);
    155 
    156   bool ShouldRespondToEmptyCheckpointRequest() const {
    157     return should_respond_to_empty_checkpoint_request_;
    158   }
    159 
    160   void SetShouldRespondToEmptyCheckpointRequest(bool value) {
    161     should_respond_to_empty_checkpoint_request_ = value;
    162   }
    163 
    164   virtual void WakeupToRespondToEmptyCheckpoint() = 0;
    165 
    166  protected:
    167   friend class ConditionVariable;
    168 
    169   BaseMutex(const char* name, LockLevel level);
    170   virtual ~BaseMutex();
    171   void RegisterAsLocked(Thread* self);
    172   void RegisterAsUnlocked(Thread* self);
    173   void CheckSafeToWait(Thread* self);
    174 
    175   friend class ScopedContentionRecorder;
    176 
    177   void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
    178   void DumpContention(std::ostream& os) const;
    179 
    180   const LockLevel level_;  // Support for lock hierarchy.
    181   const char* const name_;
    182   bool should_respond_to_empty_checkpoint_request_;
    183 
    184   // A log entry that records contention but makes no guarantee that either tid will be held live.
    185   struct ContentionLogEntry {
    186     ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
    187     uint64_t blocked_tid;
    188     uint64_t owner_tid;
    189     AtomicInteger count;
    190   };
    191   struct ContentionLogData {
    192     ContentionLogEntry contention_log[kContentionLogSize];
    193     // The next entry in the contention log to be updated. Value ranges from 0 to
    194     // kContentionLogSize - 1.
    195     AtomicInteger cur_content_log_entry;
    196     // Number of times the Mutex has been contended.
    197     AtomicInteger contention_count;
    198     // Sum of time waited by all contenders in ns.
    199     Atomic<uint64_t> wait_time;
    200     void AddToWaitTime(uint64_t value);
    201     ContentionLogData() : wait_time(0) {}
    202   };
    203   ContentionLogData contention_log_data_[kContentionLogDataSize];
    204 
    205  public:
    206   bool HasEverContended() const {
    207     if (kLogLockContentions) {
    208       return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
    209     }
    210     return false;
    211   }
    212 };
    213 
    214 // A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
    215 // exclusive access to what it guards. A Mutex can be in one of two states:
    216 // - Free - not owned by any thread,
    217 // - Exclusive - owned by a single thread.
    218 //
    219 // The effect of locking and unlocking operations on the state is:
    220 // State     | ExclusiveLock | ExclusiveUnlock
    221 // -------------------------------------------
    222 // Free      | Exclusive     | error
    223 // Exclusive | Block*        | Free
    224 // * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
    225 //   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
    226 std::ostream& operator<<(std::ostream& os, const Mutex& mu);
    227 class LOCKABLE Mutex : public BaseMutex {
    228  public:
    229   explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
    230   ~Mutex();
    231 
    232   virtual bool IsMutex() const { return true; }
    233 
    234   // Block until mutex is free then acquire exclusive access.
    235   void ExclusiveLock(Thread* self) ACQUIRE();
    236   void Lock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
    237 
    238   // Returns true if acquires exclusive access, false otherwise.
    239   bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
    240   bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
    241 
    242   // Release exclusive access.
    243   void ExclusiveUnlock(Thread* self) RELEASE();
    244   void Unlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
    245 
    246   // Is the current thread the exclusive holder of the Mutex.
    247   bool IsExclusiveHeld(const Thread* self) const;
    248 
    249   // Assert that the Mutex is exclusively held by the current thread.
    250   void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
    251     if (kDebugLocking && (gAborting == 0)) {
    252       CHECK(IsExclusiveHeld(self)) << *this;
    253     }
    254   }
    255   void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
    256 
    257   // Assert that the Mutex is not held by the current thread.
    258   void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) {
    259     if (kDebugLocking && (gAborting == 0)) {
    260       CHECK(!IsExclusiveHeld(self)) << *this;
    261     }
    262   }
    263   void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) {
    264     AssertNotHeldExclusive(self);
    265   }
    266 
    267   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
    268   // than the owner.
    269   uint64_t GetExclusiveOwnerTid() const;
    270 
    271   // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
    272   unsigned int GetDepth() const {
    273     return recursion_count_;
    274   }
    275 
    276   virtual void Dump(std::ostream& os) const;
    277 
    278   // For negative capabilities in clang annotations.
    279   const Mutex& operator!() const { return *this; }
    280 
    281   void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
    282 
    283  private:
    284 #if ART_USE_FUTEXES
    285   // 0 is unheld, 1 is held.
    286   AtomicInteger state_;
    287   // Exclusive owner.
    288   volatile uint64_t exclusive_owner_;
    289   // Number of waiting contenders.
    290   AtomicInteger num_contenders_;
    291 #else
    292   pthread_mutex_t mutex_;
    293   volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
    294 #endif
    295   const bool recursive_;  // Can the lock be recursively held?
    296   unsigned int recursion_count_;
    297   friend class ConditionVariable;
    298   DISALLOW_COPY_AND_ASSIGN(Mutex);
    299 };
    300 
    301 // A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
    302 // Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
    303 // access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
    304 // condition variable. A ReaderWriterMutex can be in one of three states:
    305 // - Free - not owned by any thread,
    306 // - Exclusive - owned by a single thread,
    307 // - Shared(n) - shared amongst n threads.
    308 //
    309 // The effect of locking and unlocking operations on the state is:
    310 //
    311 // State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
    312 // ----------------------------------------------------------------------------
    313 // Free      | Exclusive     | error           | SharedLock(1)    | error
    314 // Exclusive | Block         | Free            | Block            | error
    315 // Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
    316 // * for large values of n the SharedLock may block.
    317 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
    318 class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex {
    319  public:
    320   explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
    321   ~ReaderWriterMutex();
    322 
    323   virtual bool IsReaderWriterMutex() const { return true; }
    324 
    325   // Block until ReaderWriterMutex is free then acquire exclusive access.
    326   void ExclusiveLock(Thread* self) ACQUIRE();
    327   void WriterLock(Thread* self) ACQUIRE() {  ExclusiveLock(self); }
    328 
    329   // Release exclusive access.
    330   void ExclusiveUnlock(Thread* self) RELEASE();
    331   void WriterUnlock(Thread* self) RELEASE() {  ExclusiveUnlock(self); }
    332 
    333   // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
    334   // or false if timeout is reached.
    335 #if HAVE_TIMED_RWLOCK
    336   bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
    337       EXCLUSIVE_TRYLOCK_FUNCTION(true);
    338 #endif
    339 
    340   // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
    341   void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE;
    342   void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); }
    343 
    344   // Try to acquire share of ReaderWriterMutex.
    345   bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true);
    346 
    347   // Release a share of the access.
    348   void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE;
    349   void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); }
    350 
    351   // Is the current thread the exclusive holder of the ReaderWriterMutex.
    352   bool IsExclusiveHeld(const Thread* self) const;
    353 
    354   // Assert the current thread has exclusive access to the ReaderWriterMutex.
    355   void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) {
    356     if (kDebugLocking && (gAborting == 0)) {
    357       CHECK(IsExclusiveHeld(self)) << *this;
    358     }
    359   }
    360   void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); }
    361 
    362   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
    363   void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
    364     if (kDebugLocking && (gAborting == 0)) {
    365       CHECK(!IsExclusiveHeld(self)) << *this;
    366     }
    367   }
    368   void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
    369     AssertNotExclusiveHeld(self);
    370   }
    371 
    372   // Is the current thread a shared holder of the ReaderWriterMutex.
    373   bool IsSharedHeld(const Thread* self) const;
    374 
    375   // Assert the current thread has shared access to the ReaderWriterMutex.
    376   void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
    377     if (kDebugLocking && (gAborting == 0)) {
    378       // TODO: we can only assert this well when self != null.
    379       CHECK(IsSharedHeld(self) || self == nullptr) << *this;
    380     }
    381   }
    382   void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) {
    383     AssertSharedHeld(self);
    384   }
    385 
    386   // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
    387   // mode.
    388   void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
    389     if (kDebugLocking && (gAborting == 0)) {
    390       CHECK(!IsSharedHeld(self)) << *this;
    391     }
    392   }
    393 
    394   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
    395   // than the owner.
    396   uint64_t GetExclusiveOwnerTid() const;
    397 
    398   virtual void Dump(std::ostream& os) const;
    399 
    400   // For negative capabilities in clang annotations.
    401   const ReaderWriterMutex& operator!() const { return *this; }
    402 
    403   void WakeupToRespondToEmptyCheckpoint() OVERRIDE;
    404 
    405  private:
    406 #if ART_USE_FUTEXES
    407   // Out-of-inline path for handling contention for a SharedLock.
    408   void HandleSharedLockContention(Thread* self, int32_t cur_state);
    409 
    410   // -1 implies held exclusive, +ve shared held by state_ many owners.
    411   AtomicInteger state_;
    412   // Exclusive owner. Modification guarded by this mutex.
    413   volatile uint64_t exclusive_owner_;
    414   // Number of contenders waiting for a reader share.
    415   AtomicInteger num_pending_readers_;
    416   // Number of contenders waiting to be the writer.
    417   AtomicInteger num_pending_writers_;
    418 #else
    419   pthread_rwlock_t rwlock_;
    420   volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
    421 #endif
    422   DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
    423 };
    424 
    425 // MutatorMutex is a special kind of ReaderWriterMutex created specifically for the
    426 // Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that
    427 // thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly
    428 // held by any mutator threads. However, a thread in the kRunnable state is considered to have
    429 // shared ownership of the mutator lock and therefore transitions in and out of the kRunnable
    430 // state have associated implications on lock ownership. Extra methods to handle the state
    431 // transitions have been added to the interface but are only accessible to the methods dealing
    432 // with state transitions. The thread state and flags attributes are used to ensure thread state
    433 // transitions are consistent with the permitted behaviour of the mutex.
    434 //
    435 // *) The most important consequence of this behaviour is that all threads must be in one of the
    436 // suspended states before exclusive ownership of the mutator mutex is sought.
    437 //
    438 std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu);
    439 class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex {
    440  public:
    441   explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel)
    442     : ReaderWriterMutex(name, level) {}
    443   ~MutatorMutex() {}
    444 
    445   virtual bool IsMutatorMutex() const { return true; }
    446 
    447   // For negative capabilities in clang annotations.
    448   const MutatorMutex& operator!() const { return *this; }
    449 
    450  private:
    451   friend class Thread;
    452   void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
    453   void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
    454 
    455   DISALLOW_COPY_AND_ASSIGN(MutatorMutex);
    456 };
    457 
    458 // ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
    459 // (Signal) or all at once (Broadcast).
    460 class ConditionVariable {
    461  public:
    462   ConditionVariable(const char* name, Mutex& mutex);
    463   ~ConditionVariable();
    464 
    465   void Broadcast(Thread* self);
    466   void Signal(Thread* self);
    467   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
    468   //       pointer copy, thereby defeating annotalysis.
    469   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    470   bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
    471   // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
    472   // when waiting.
    473   // TODO: remove this.
    474   void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    475 
    476  private:
    477   const char* const name_;
    478   // The Mutex being used by waiters. It is an error to mix condition variables between different
    479   // Mutexes.
    480   Mutex& guard_;
    481 #if ART_USE_FUTEXES
    482   // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
    483   // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
    484   // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
    485   // without guard_ held.
    486   AtomicInteger sequence_;
    487   // Number of threads that have come into to wait, not the length of the waiters on the futex as
    488   // waiters may have been requeued onto guard_. Guarded by guard_.
    489   volatile int32_t num_waiters_;
    490 #else
    491   pthread_cond_t cond_;
    492 #endif
    493   DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
    494 };
    495 
    496 // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
    497 // upon destruction.
    498 class SCOPED_CAPABILITY MutexLock {
    499  public:
    500   MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) {
    501     mu_.ExclusiveLock(self_);
    502   }
    503 
    504   ~MutexLock() RELEASE() {
    505     mu_.ExclusiveUnlock(self_);
    506   }
    507 
    508  private:
    509   Thread* const self_;
    510   Mutex& mu_;
    511   DISALLOW_COPY_AND_ASSIGN(MutexLock);
    512 };
    513 // Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
    514 #define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
    515 
    516 // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
    517 // construction and releases it upon destruction.
    518 class SCOPED_CAPABILITY ReaderMutexLock {
    519  public:
    520   ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) ALWAYS_INLINE :
    521       self_(self), mu_(mu) {
    522     mu_.SharedLock(self_);
    523   }
    524 
    525   ~ReaderMutexLock() RELEASE() ALWAYS_INLINE {
    526     mu_.SharedUnlock(self_);
    527   }
    528 
    529  private:
    530   Thread* const self_;
    531   ReaderWriterMutex& mu_;
    532   DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
    533 };
    534 // Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
    535 // "ReaderMutexLock mu(lock)".
    536 #define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
    537 
    538 // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
    539 // construction and releases it upon destruction.
    540 class SCOPED_CAPABILITY WriterMutexLock {
    541  public:
    542   WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
    543       self_(self), mu_(mu) {
    544     mu_.ExclusiveLock(self_);
    545   }
    546 
    547   ~WriterMutexLock() UNLOCK_FUNCTION() {
    548     mu_.ExclusiveUnlock(self_);
    549   }
    550 
    551  private:
    552   Thread* const self_;
    553   ReaderWriterMutex& mu_;
    554   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
    555 };
    556 // Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
    557 // "WriterMutexLock mu(lock)".
    558 #define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
    559 
    560 // For StartNoThreadSuspension and EndNoThreadSuspension.
    561 class CAPABILITY("role") Role {
    562  public:
    563   void Acquire() ACQUIRE() {}
    564   void Release() RELEASE() {}
    565   const Role& operator!() const { return *this; }
    566 };
    567 
    568 class Uninterruptible : public Role {
    569 };
    570 
    571 // Global mutexes corresponding to the levels above.
    572 class Locks {
    573  public:
    574   static void Init();
    575   static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
    576 
    577   // Destroying various lock types can emit errors that vary depending upon
    578   // whether the client (art::Runtime) is currently active.  Allow the client
    579   // to set a callback that is used to check when it is acceptable to call
    580   // Abort.  The default behavior is that the client *is not* able to call
    581   // Abort if no callback is established.
    582   using ClientCallback = bool();
    583   static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
    584   // Checks for whether it is safe to call Abort() without using locks.
    585   static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
    586 
    587   // Add a mutex to expected_mutexes_on_weak_ref_access_.
    588   static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
    589   // Remove a mutex from expected_mutexes_on_weak_ref_access_.
    590   static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
    591   // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
    592   static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
    593 
    594   // Guards allocation entrypoint instrumenting.
    595   static Mutex* instrument_entrypoints_lock_;
    596 
    597   // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
    598   // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
    599   // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
    600   // thread; threads in the runnable state will pass the barrier when they transit to the suspended
    601   // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
    602   //
    603   // Thread suspension:
    604   // mutator thread                                | GC/Debugger
    605   //   .. running ..                               |   .. running ..
    606   //   .. running ..                               | Request thread suspension by:
    607   //   .. running ..                               |   - acquiring thread_suspend_count_lock_
    608   //   .. running ..                               |   - incrementing Thread::suspend_count_ on
    609   //   .. running ..                               |     all mutator threads
    610   //   .. running ..                               |   - releasing thread_suspend_count_lock_
    611   //   .. running ..                               | Block wait for all threads to pass a barrier
    612   // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
    613   // suspend code.                                 |   .. blocked ..
    614   // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
    615   // x: Acquire thread_suspend_count_lock_         |   .. running ..
    616   // while Thread::suspend_count_ > 0              |   .. running ..
    617   //   - wait on Thread::resume_cond_              |   .. running ..
    618   //     (releases thread_suspend_count_lock_)     |   .. running ..
    619   //   .. waiting ..                               | Request thread resumption by:
    620   //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
    621   //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
    622   //   .. waiting ..                               |     all mutator threads
    623   //   .. waiting ..                               |   - notifying on Thread::resume_cond_
    624   //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
    625   // Release thread_suspend_count_lock_            |  .. running ..
    626   // Change to kRunnable                           |  .. running ..
    627   //  - this uses a CAS operation to ensure the    |  .. running ..
    628   //    suspend request flag isn't raised as the   |  .. running ..
    629   //    state is changed                           |  .. running ..
    630   //  - if the CAS operation fails then goto x     |  .. running ..
    631   //  .. running ..                                |  .. running ..
    632   static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
    633 
    634   // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
    635   static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
    636 
    637   // Guards shutdown of the runtime.
    638   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
    639 
    640   // Guards background profiler global state.
    641   static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
    642 
    643   // Guards trace (ie traceview) requests.
    644   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
    645 
    646   // Guards debugger recent allocation records.
    647   static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
    648 
    649   // Guards updates to instrumentation to ensure mutual exclusion of
    650   // events like deoptimization requests.
    651   // TODO: improve name, perhaps instrumentation_update_lock_.
    652   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
    653 
    654   // Guards Class Hierarchy Analysis (CHA).
    655   static Mutex* cha_lock_ ACQUIRED_AFTER(deoptimization_lock_);
    656 
    657   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
    658   // attaching and detaching.
    659   static Mutex* thread_list_lock_ ACQUIRED_AFTER(cha_lock_);
    660 
    661   // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
    662   static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
    663 
    664   // Guards maintaining loading library data structures.
    665   static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
    666 
    667   // Guards breakpoints.
    668   static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
    669 
    670   // Guards lists of classes within the class linker.
    671   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
    672 
    673   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
    674   // doesn't try to hold a higher level Mutex.
    675   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
    676 
    677   static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
    678 
    679   // Guard the allocation/deallocation of thread ids.
    680   static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
    681 
    682   // Guards modification of the LDT on x86.
    683   static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
    684 
    685   static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
    686 
    687   // Guards opened oat files in OatFileManager.
    688   static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
    689 
    690   // Guards extra string entries for VerifierDeps.
    691   static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
    692 
    693   // Guards dlopen_handles_ in DlOpenOatFile.
    694   static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
    695 
    696   // Guards intern table.
    697   static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
    698 
    699   // Guards reference processor.
    700   static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
    701 
    702   // Guards cleared references queue.
    703   static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
    704 
    705   // Guards weak references queue.
    706   static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
    707 
    708   // Guards finalizer references queue.
    709   static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
    710 
    711   // Guards phantom references queue.
    712   static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
    713 
    714   // Guards soft references queue.
    715   static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
    716 
    717   // Guard accesses to the JNI Global Reference table.
    718   static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
    719 
    720   // Guard accesses to the JNI Weak Global Reference table.
    721   static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
    722 
    723   // Guard accesses to the JNI function table override.
    724   static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
    725 
    726   // Have an exclusive aborting thread.
    727   static Mutex* abort_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
    728 
    729   // Allow mutual exclusion when manipulating Thread::suspend_count_.
    730   // TODO: Does the trade-off of a per-thread lock make sense?
    731   static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
    732 
    733   // One unexpected signal at a time lock.
    734   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
    735 
    736   // Have an exclusive logging thread.
    737   static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
    738 
    739   // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
    740   // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
    741   // encounter an unexpected mutex on accessing weak refs,
    742   // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
    743   static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
    744   static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
    745   class ScopedExpectedMutexesOnWeakRefAccessLock;
    746 };
    747 
    748 class Roles {
    749  public:
    750   // Uninterruptible means that the thread may not become suspended.
    751   static Uninterruptible uninterruptible_;
    752 };
    753 
    754 }  // namespace art
    755 
    756 #endif  // ART_RUNTIME_BASE_MUTEX_H_
    757