Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_BASE_MUTEX_H_
     18 #define ART_RUNTIME_BASE_MUTEX_H_
     19 
     20 #include <pthread.h>
     21 #include <stdint.h>
     22 
     23 #include <iosfwd>
     24 #include <string>
     25 
     26 #include "atomic.h"
     27 #include "base/logging.h"
     28 #include "base/macros.h"
     29 #include "globals.h"
     30 
     31 #if defined(__APPLE__)
     32 #define ART_USE_FUTEXES 0
     33 #else
     34 #define ART_USE_FUTEXES 1
     35 #endif
     36 
     37 // Currently Darwin doesn't support locks with timeouts.
     38 #if !defined(__APPLE__)
     39 #define HAVE_TIMED_RWLOCK 1
     40 #else
     41 #define HAVE_TIMED_RWLOCK 0
     42 #endif
     43 
     44 namespace art {
     45 
     46 class LOCKABLE ReaderWriterMutex;
     47 class ScopedContentionRecorder;
     48 class Thread;
     49 
     50 // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
     51 // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
     52 // partial ordering and thereby cause deadlock situations to fail checks.
     53 //
     54 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
     55 enum LockLevel {
     56   kLoggingLock = 0,
     57   kMemMapsLock,
     58   kSwapMutexesLock,
     59   kUnexpectedSignalLock,
     60   kThreadSuspendCountLock,
     61   kAbortLock,
     62   kJdwpSocketLock,
     63   kRegionSpaceRegionLock,
     64   kTransactionLogLock,
     65   kReferenceQueueSoftReferencesLock,
     66   kReferenceQueuePhantomReferencesLock,
     67   kReferenceQueueFinalizerReferencesLock,
     68   kReferenceQueueWeakReferencesLock,
     69   kReferenceQueueClearedReferencesLock,
     70   kReferenceProcessorLock,
     71   kJitCodeCacheLock,
     72   kRosAllocGlobalLock,
     73   kRosAllocBracketLock,
     74   kRosAllocBulkFreeLock,
     75   kAllocSpaceLock,
     76   kBumpPointerSpaceBlockLock,
     77   kArenaPoolLock,
     78   kDexFileMethodInlinerLock,
     79   kDexFileToMethodInlinerMapLock,
     80   kMarkSweepMarkStackLock,
     81   kInternTableLock,
     82   kOatFileSecondaryLookupLock,
     83   kTracingUniqueMethodsLock,
     84   kTracingStreamingLock,
     85   kDefaultMutexLevel,
     86   kMarkSweepLargeObjectLock,
     87   kPinTableLock,
     88   kJdwpObjectRegistryLock,
     89   kModifyLdtLock,
     90   kAllocatedThreadIdsLock,
     91   kMonitorPoolLock,
     92   kMethodVerifiersLock,
     93   kClassLinkerClassesLock,
     94   kBreakpointLock,
     95   kMonitorLock,
     96   kMonitorListLock,
     97   kJniLoadLibraryLock,
     98   kThreadListLock,
     99   kAllocTrackerLock,
    100   kDeoptimizationLock,
    101   kProfilerLock,
    102   kJdwpShutdownLock,
    103   kJdwpEventListLock,
    104   kJdwpAttachLock,
    105   kJdwpStartLock,
    106   kRuntimeShutdownLock,
    107   kTraceLock,
    108   kHeapBitmapLock,
    109   kMutatorLock,
    110   kInstrumentEntrypointsLock,
    111   kZygoteCreationLock,
    112 
    113   kLockLevelCount  // Must come last.
    114 };
    115 std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
    116 
    117 const bool kDebugLocking = kIsDebugBuild;
    118 
    119 // Record Log contention information, dumpable via SIGQUIT.
    120 #ifdef ART_USE_FUTEXES
    121 // To enable lock contention logging, set this to true.
    122 const bool kLogLockContentions = false;
    123 #else
    124 // Keep this false as lock contention logging is supported only with
    125 // futex.
    126 const bool kLogLockContentions = false;
    127 #endif
    128 const size_t kContentionLogSize = 4;
    129 const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
    130 const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
    131 
    132 // Base class for all Mutex implementations
    133 class BaseMutex {
    134  public:
    135   const char* GetName() const {
    136     return name_;
    137   }
    138 
    139   virtual bool IsMutex() const { return false; }
    140   virtual bool IsReaderWriterMutex() const { return false; }
    141 
    142   virtual void Dump(std::ostream& os) const = 0;
    143 
    144   static void DumpAll(std::ostream& os);
    145 
    146  protected:
    147   friend class ConditionVariable;
    148 
    149   BaseMutex(const char* name, LockLevel level);
    150   virtual ~BaseMutex();
    151   void RegisterAsLocked(Thread* self);
    152   void RegisterAsUnlocked(Thread* self);
    153   void CheckSafeToWait(Thread* self);
    154 
    155   friend class ScopedContentionRecorder;
    156 
    157   void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
    158   void DumpContention(std::ostream& os) const;
    159 
    160   const LockLevel level_;  // Support for lock hierarchy.
    161   const char* const name_;
    162 
    163   // A log entry that records contention but makes no guarantee that either tid will be held live.
    164   struct ContentionLogEntry {
    165     ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
    166     uint64_t blocked_tid;
    167     uint64_t owner_tid;
    168     AtomicInteger count;
    169   };
    170   struct ContentionLogData {
    171     ContentionLogEntry contention_log[kContentionLogSize];
    172     // The next entry in the contention log to be updated. Value ranges from 0 to
    173     // kContentionLogSize - 1.
    174     AtomicInteger cur_content_log_entry;
    175     // Number of times the Mutex has been contended.
    176     AtomicInteger contention_count;
    177     // Sum of time waited by all contenders in ns.
    178     Atomic<uint64_t> wait_time;
    179     void AddToWaitTime(uint64_t value);
    180     ContentionLogData() : wait_time(0) {}
    181   };
    182   ContentionLogData contention_log_data_[kContentionLogDataSize];
    183 
    184  public:
    185   bool HasEverContended() const {
    186     if (kLogLockContentions) {
    187       return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
    188     }
    189     return false;
    190   }
    191 };
    192 
    193 // A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
    194 // exclusive access to what it guards. A Mutex can be in one of two states:
    195 // - Free - not owned by any thread,
    196 // - Exclusive - owned by a single thread.
    197 //
    198 // The effect of locking and unlocking operations on the state is:
    199 // State     | ExclusiveLock | ExclusiveUnlock
    200 // -------------------------------------------
    201 // Free      | Exclusive     | error
    202 // Exclusive | Block*        | Free
    203 // * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
    204 //   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
    205 std::ostream& operator<<(std::ostream& os, const Mutex& mu);
    206 class LOCKABLE Mutex : public BaseMutex {
    207  public:
    208   explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
    209   ~Mutex();
    210 
    211   virtual bool IsMutex() const { return true; }
    212 
    213   // Block until mutex is free then acquire exclusive access.
    214   void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
    215   void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
    216 
    217   // Returns true if acquires exclusive access, false otherwise.
    218   bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
    219   bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
    220 
    221   // Release exclusive access.
    222   void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
    223   void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
    224 
    225   // Is the current thread the exclusive holder of the Mutex.
    226   bool IsExclusiveHeld(const Thread* self) const;
    227 
    228   // Assert that the Mutex is exclusively held by the current thread.
    229   void AssertExclusiveHeld(const Thread* self) {
    230     if (kDebugLocking && (gAborting == 0)) {
    231       CHECK(IsExclusiveHeld(self)) << *this;
    232     }
    233   }
    234   void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
    235 
    236   // Assert that the Mutex is not held by the current thread.
    237   void AssertNotHeldExclusive(const Thread* self) {
    238     if (kDebugLocking && (gAborting == 0)) {
    239       CHECK(!IsExclusiveHeld(self)) << *this;
    240     }
    241   }
    242   void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
    243 
    244   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
    245   // than the owner.
    246   uint64_t GetExclusiveOwnerTid() const;
    247 
    248   // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
    249   unsigned int GetDepth() const {
    250     return recursion_count_;
    251   }
    252 
    253   virtual void Dump(std::ostream& os) const;
    254 
    255  private:
    256 #if ART_USE_FUTEXES
    257   // 0 is unheld, 1 is held.
    258   AtomicInteger state_;
    259   // Exclusive owner.
    260   volatile uint64_t exclusive_owner_;
    261   // Number of waiting contenders.
    262   AtomicInteger num_contenders_;
    263 #else
    264   pthread_mutex_t mutex_;
    265   volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
    266 #endif
    267   const bool recursive_;  // Can the lock be recursively held?
    268   unsigned int recursion_count_;
    269   friend class ConditionVariable;
    270   DISALLOW_COPY_AND_ASSIGN(Mutex);
    271 };
    272 
    273 // A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
    274 // Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
    275 // access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
    276 // condition variable. A ReaderWriterMutex can be in one of three states:
    277 // - Free - not owned by any thread,
    278 // - Exclusive - owned by a single thread,
    279 // - Shared(n) - shared amongst n threads.
    280 //
    281 // The effect of locking and unlocking operations on the state is:
    282 //
    283 // State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
    284 // ----------------------------------------------------------------------------
    285 // Free      | Exclusive     | error           | SharedLock(1)    | error
    286 // Exclusive | Block         | Free            | Block            | error
    287 // Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
    288 // * for large values of n the SharedLock may block.
    289 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
    290 class LOCKABLE ReaderWriterMutex : public BaseMutex {
    291  public:
    292   explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
    293   ~ReaderWriterMutex();
    294 
    295   virtual bool IsReaderWriterMutex() const { return true; }
    296 
    297   // Block until ReaderWriterMutex is free then acquire exclusive access.
    298   void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
    299   void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
    300 
    301   // Release exclusive access.
    302   void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
    303   void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
    304 
    305   // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
    306   // or false if timeout is reached.
    307 #if HAVE_TIMED_RWLOCK
    308   bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
    309       EXCLUSIVE_TRYLOCK_FUNCTION(true);
    310 #endif
    311 
    312   // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
    313   void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
    314   void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
    315 
    316   // Try to acquire share of ReaderWriterMutex.
    317   bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
    318 
    319   // Release a share of the access.
    320   void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
    321   void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
    322 
    323   // Is the current thread the exclusive holder of the ReaderWriterMutex.
    324   bool IsExclusiveHeld(const Thread* self) const;
    325 
    326   // Assert the current thread has exclusive access to the ReaderWriterMutex.
    327   void AssertExclusiveHeld(const Thread* self) {
    328     if (kDebugLocking && (gAborting == 0)) {
    329       CHECK(IsExclusiveHeld(self)) << *this;
    330     }
    331   }
    332   void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
    333 
    334   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
    335   void AssertNotExclusiveHeld(const Thread* self) {
    336     if (kDebugLocking && (gAborting == 0)) {
    337       CHECK(!IsExclusiveHeld(self)) << *this;
    338     }
    339   }
    340   void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
    341 
    342   // Is the current thread a shared holder of the ReaderWriterMutex.
    343   bool IsSharedHeld(const Thread* self) const;
    344 
    345   // Assert the current thread has shared access to the ReaderWriterMutex.
    346   void AssertSharedHeld(const Thread* self) {
    347     if (kDebugLocking && (gAborting == 0)) {
    348       // TODO: we can only assert this well when self != null.
    349       CHECK(IsSharedHeld(self) || self == nullptr) << *this;
    350     }
    351   }
    352   void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
    353 
    354   // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
    355   // mode.
    356   void AssertNotHeld(const Thread* self) {
    357     if (kDebugLocking && (gAborting == 0)) {
    358       CHECK(!IsSharedHeld(self)) << *this;
    359     }
    360   }
    361 
    362   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
    363   // than the owner.
    364   uint64_t GetExclusiveOwnerTid() const;
    365 
    366   virtual void Dump(std::ostream& os) const;
    367 
    368  private:
    369 #if ART_USE_FUTEXES
    370   // Out-of-inline path for handling contention for a SharedLock.
    371   void HandleSharedLockContention(Thread* self, int32_t cur_state);
    372 
    373   // -1 implies held exclusive, +ve shared held by state_ many owners.
    374   AtomicInteger state_;
    375   // Exclusive owner. Modification guarded by this mutex.
    376   volatile uint64_t exclusive_owner_;
    377   // Number of contenders waiting for a reader share.
    378   AtomicInteger num_pending_readers_;
    379   // Number of contenders waiting to be the writer.
    380   AtomicInteger num_pending_writers_;
    381 #else
    382   pthread_rwlock_t rwlock_;
    383   volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
    384 #endif
    385   DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
    386 };
    387 
    388 // ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
    389 // (Signal) or all at once (Broadcast).
    390 class ConditionVariable {
    391  public:
    392   explicit ConditionVariable(const char* name, Mutex& mutex);
    393   ~ConditionVariable();
    394 
    395   void Broadcast(Thread* self);
    396   void Signal(Thread* self);
    397   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
    398   //       pointer copy, thereby defeating annotalysis.
    399   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    400   bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
    401   // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
    402   // when waiting.
    403   // TODO: remove this.
    404   void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    405 
    406  private:
    407   const char* const name_;
    408   // The Mutex being used by waiters. It is an error to mix condition variables between different
    409   // Mutexes.
    410   Mutex& guard_;
    411 #if ART_USE_FUTEXES
    412   // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
    413   // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
    414   // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
    415   // without guard_ held.
    416   AtomicInteger sequence_;
    417   // Number of threads that have come into to wait, not the length of the waiters on the futex as
    418   // waiters may have been requeued onto guard_. Guarded by guard_.
    419   volatile int32_t num_waiters_;
    420 #else
    421   pthread_cond_t cond_;
    422 #endif
    423   DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
    424 };
    425 
    426 // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
    427 // upon destruction.
    428 class SCOPED_LOCKABLE MutexLock {
    429  public:
    430   explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
    431     mu_.ExclusiveLock(self_);
    432   }
    433 
    434   ~MutexLock() UNLOCK_FUNCTION() {
    435     mu_.ExclusiveUnlock(self_);
    436   }
    437 
    438  private:
    439   Thread* const self_;
    440   Mutex& mu_;
    441   DISALLOW_COPY_AND_ASSIGN(MutexLock);
    442 };
    443 // Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
    444 #define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
    445 
    446 // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
    447 // construction and releases it upon destruction.
    448 class SCOPED_LOCKABLE ReaderMutexLock {
    449  public:
    450   explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
    451       self_(self), mu_(mu) {
    452     mu_.SharedLock(self_);
    453   }
    454 
    455   ~ReaderMutexLock() UNLOCK_FUNCTION() {
    456     mu_.SharedUnlock(self_);
    457   }
    458 
    459  private:
    460   Thread* const self_;
    461   ReaderWriterMutex& mu_;
    462   DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
    463 };
    464 // Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
    465 // "ReaderMutexLock mu(lock)".
    466 #define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
    467 
    468 // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
    469 // construction and releases it upon destruction.
    470 class SCOPED_LOCKABLE WriterMutexLock {
    471  public:
    472   explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
    473       self_(self), mu_(mu) {
    474     mu_.ExclusiveLock(self_);
    475   }
    476 
    477   ~WriterMutexLock() UNLOCK_FUNCTION() {
    478     mu_.ExclusiveUnlock(self_);
    479   }
    480 
    481  private:
    482   Thread* const self_;
    483   ReaderWriterMutex& mu_;
    484   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
    485 };
    486 // Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
    487 // "WriterMutexLock mu(lock)".
    488 #define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
    489 
    490 // Global mutexes corresponding to the levels above.
    491 class Locks {
    492  public:
    493   static void Init();
    494   static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
    495   // Guards allocation entrypoint instrumenting.
    496   static Mutex* instrument_entrypoints_lock_;
    497 
    498   // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
    499   // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
    500   // a share on the mutator_lock_. The garbage collector may also execute with shared access but
    501   // at times requires exclusive access to the heap (not to be confused with the heap meta-data
    502   // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks
    503   // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_
    504   // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition
    505   // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on
    506   // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector)
    507   // chance to acquire the lock.
    508   //
    509   // Thread suspension:
    510   // Shared users                                  | Exclusive user
    511   // (holding mutator lock and in kRunnable state) |   .. running ..
    512   //   .. running ..                               | Request thread suspension by:
    513   //   .. running ..                               |   - acquiring thread_suspend_count_lock_
    514   //   .. running ..                               |   - incrementing Thread::suspend_count_ on
    515   //   .. running ..                               |     all mutator threads
    516   //   .. running ..                               |   - releasing thread_suspend_count_lock_
    517   //   .. running ..                               | Block trying to acquire exclusive mutator lock
    518   // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
    519   // suspend code.                                 |   .. blocked ..
    520   // Change state to kSuspended                    |   .. blocked ..
    521   // x: Release share on mutator_lock_             | Carry out exclusive access
    522   // Acquire thread_suspend_count_lock_            |   .. exclusive ..
    523   // while Thread::suspend_count_ > 0              |   .. exclusive ..
    524   //   - wait on Thread::resume_cond_              |   .. exclusive ..
    525   //     (releases thread_suspend_count_lock_)     |   .. exclusive ..
    526   //   .. waiting ..                               | Release mutator_lock_
    527   //   .. waiting ..                               | Request thread resumption by:
    528   //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
    529   //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
    530   //   .. waiting ..                               |     all mutator threads
    531   //   .. waiting ..                               |   - notifying on Thread::resume_cond_
    532   //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
    533   // Release thread_suspend_count_lock_            |  .. running ..
    534   // Acquire share on mutator_lock_                |  .. running ..
    535   //  - This could block but the thread still      |  .. running ..
    536   //    has a state of kSuspended and so this      |  .. running ..
    537   //    isn't an issue.                            |  .. running ..
    538   // Acquire thread_suspend_count_lock_            |  .. running ..
    539   //  - we poll here as we're transitioning into   |  .. running ..
    540   //    kRunnable and an individual thread suspend |  .. running ..
    541   //    request (e.g for debugging) won't try      |  .. running ..
    542   //    to acquire the mutator lock (which would   |  .. running ..
    543   //    block as we hold the mutator lock). This   |  .. running ..
    544   //    poll ensures that if the suspender thought |  .. running ..
    545   //    we were suspended by incrementing our      |  .. running ..
    546   //    Thread::suspend_count_ and then reading    |  .. running ..
    547   //    our state we go back to waiting on         |  .. running ..
    548   //    Thread::resume_cond_.                      |  .. running ..
    549   // can_go_runnable = Thread::suspend_count_ == 0 |  .. running ..
    550   // Release thread_suspend_count_lock_            |  .. running ..
    551   // if can_go_runnable                            |  .. running ..
    552   //   Change state to kRunnable                   |  .. running ..
    553   // else                                          |  .. running ..
    554   //   Goto x                                      |  .. running ..
    555   //  .. running ..                                |  .. running ..
    556   static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
    557 
    558   // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
    559   static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
    560 
    561   // Guards shutdown of the runtime.
    562   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
    563 
    564   // Guards background profiler global state.
    565   static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
    566 
    567   // Guards trace (ie traceview) requests.
    568   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
    569 
    570   // Guards debugger recent allocation records.
    571   static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
    572 
    573   // Guards updates to instrumentation to ensure mutual exclusion of
    574   // events like deoptimization requests.
    575   // TODO: improve name, perhaps instrumentation_update_lock_.
    576   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
    577 
    578   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
    579   // attaching and detaching.
    580   static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
    581 
    582   // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
    583   static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
    584 
    585   // Guards maintaining loading library data structures.
    586   static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
    587 
    588   // Guards breakpoints.
    589   static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
    590 
    591   // Guards lists of classes within the class linker.
    592   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
    593 
    594   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
    595   // doesn't try to hold a higher level Mutex.
    596   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
    597 
    598   static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
    599 
    600   // Guard the allocation/deallocation of thread ids.
    601   static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
    602 
    603   // Guards modification of the LDT on x86.
    604   static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
    605 
    606   // Guards intern table.
    607   static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
    608 
    609   // Guards reference processor.
    610   static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
    611 
    612   // Guards cleared references queue.
    613   static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
    614 
    615   // Guards weak references queue.
    616   static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
    617 
    618   // Guards finalizer references queue.
    619   static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
    620 
    621   // Guards phantom references queue.
    622   static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
    623 
    624   // Guards soft references queue.
    625   static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
    626 
    627   // Have an exclusive aborting thread.
    628   static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
    629 
    630   // Allow mutual exclusion when manipulating Thread::suspend_count_.
    631   // TODO: Does the trade-off of a per-thread lock make sense?
    632   static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
    633 
    634   // One unexpected signal at a time lock.
    635   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
    636 
    637   // Guards the maps in mem_map.
    638   static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
    639 
    640   // Have an exclusive logging thread.
    641   static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
    642 };
    643 
    644 }  // namespace art
    645 
    646 #endif  // ART_RUNTIME_BASE_MUTEX_H_
    647