Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_BASE_MUTEX_H_
     18 #define ART_RUNTIME_BASE_MUTEX_H_
     19 
     20 #include <pthread.h>
     21 #include <stdint.h>
     22 
     23 #include <iosfwd>
     24 #include <string>
     25 
     26 #include "atomic.h"
     27 #include "base/logging.h"
     28 #include "base/macros.h"
     29 #include "globals.h"
     30 
     31 #if defined(__APPLE__)
     32 #define ART_USE_FUTEXES 0
     33 #else
     34 #define ART_USE_FUTEXES 1
     35 #endif
     36 
     37 // Currently Darwin doesn't support locks with timeouts.
     38 #if !defined(__APPLE__)
     39 #define HAVE_TIMED_RWLOCK 1
     40 #else
     41 #define HAVE_TIMED_RWLOCK 0
     42 #endif
     43 
     44 namespace art {
     45 
     46 class LOCKABLE ReaderWriterMutex;
     47 class ScopedContentionRecorder;
     48 class Thread;
     49 
     50 // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
     51 // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
     52 // partial ordering and thereby cause deadlock situations to fail checks.
     53 //
     54 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
     55 enum LockLevel {
     56   kLoggingLock = 0,
     57   kMemMapsLock,
     58   kSwapMutexesLock,
     59   kUnexpectedSignalLock,
     60   kThreadSuspendCountLock,
     61   kAbortLock,
     62   kJdwpSocketLock,
     63   kReferenceQueueSoftReferencesLock,
     64   kReferenceQueuePhantomReferencesLock,
     65   kReferenceQueueFinalizerReferencesLock,
     66   kReferenceQueueWeakReferencesLock,
     67   kReferenceQueueClearedReferencesLock,
     68   kReferenceProcessorLock,
     69   kRosAllocGlobalLock,
     70   kRosAllocBracketLock,
     71   kRosAllocBulkFreeLock,
     72   kAllocSpaceLock,
     73   kDexFileMethodInlinerLock,
     74   kDexFileToMethodInlinerMapLock,
     75   kMarkSweepMarkStackLock,
     76   kTransactionLogLock,
     77   kInternTableLock,
     78   kOatFileSecondaryLookupLock,
     79   kDefaultMutexLevel,
     80   kMarkSweepLargeObjectLock,
     81   kPinTableLock,
     82   kLoadLibraryLock,
     83   kJdwpObjectRegistryLock,
     84   kModifyLdtLock,
     85   kAllocatedThreadIdsLock,
     86   kMonitorPoolLock,
     87   kClassLinkerClassesLock,
     88   kBreakpointLock,
     89   kMonitorLock,
     90   kMonitorListLock,
     91   kThreadListLock,
     92   kBreakpointInvokeLock,
     93   kAllocTrackerLock,
     94   kDeoptimizationLock,
     95   kProfilerLock,
     96   kJdwpEventListLock,
     97   kJdwpAttachLock,
     98   kJdwpStartLock,
     99   kRuntimeShutdownLock,
    100   kTraceLock,
    101   kHeapBitmapLock,
    102   kMutatorLock,
    103   kInstrumentEntrypointsLock,
    104   kThreadListSuspendThreadLock,
    105   kZygoteCreationLock,
    106 
    107   kLockLevelCount  // Must come last.
    108 };
    109 std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
    110 
    111 const bool kDebugLocking = kIsDebugBuild;
    112 
    113 // Record Log contention information, dumpable via SIGQUIT.
    114 #ifdef ART_USE_FUTEXES
    115 // To enable lock contention logging, set this to true.
    116 const bool kLogLockContentions = false;
    117 #else
    118 // Keep this false as lock contention logging is supported only with
    119 // futex.
    120 const bool kLogLockContentions = false;
    121 #endif
    122 const size_t kContentionLogSize = 4;
    123 const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
    124 const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
    125 
    126 // Base class for all Mutex implementations
    127 class BaseMutex {
    128  public:
    129   const char* GetName() const {
    130     return name_;
    131   }
    132 
    133   virtual bool IsMutex() const { return false; }
    134   virtual bool IsReaderWriterMutex() const { return false; }
    135 
    136   virtual void Dump(std::ostream& os) const = 0;
    137 
    138   static void DumpAll(std::ostream& os);
    139 
    140  protected:
    141   friend class ConditionVariable;
    142 
    143   BaseMutex(const char* name, LockLevel level);
    144   virtual ~BaseMutex();
    145   void RegisterAsLocked(Thread* self);
    146   void RegisterAsUnlocked(Thread* self);
    147   void CheckSafeToWait(Thread* self);
    148 
    149   friend class ScopedContentionRecorder;
    150 
    151   void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked);
    152   void DumpContention(std::ostream& os) const;
    153 
    154   const LockLevel level_;  // Support for lock hierarchy.
    155   const char* const name_;
    156 
    157   // A log entry that records contention but makes no guarantee that either tid will be held live.
    158   struct ContentionLogEntry {
    159     ContentionLogEntry() : blocked_tid(0), owner_tid(0) {}
    160     uint64_t blocked_tid;
    161     uint64_t owner_tid;
    162     AtomicInteger count;
    163   };
    164   struct ContentionLogData {
    165     ContentionLogEntry contention_log[kContentionLogSize];
    166     // The next entry in the contention log to be updated. Value ranges from 0 to
    167     // kContentionLogSize - 1.
    168     AtomicInteger cur_content_log_entry;
    169     // Number of times the Mutex has been contended.
    170     AtomicInteger contention_count;
    171     // Sum of time waited by all contenders in ns.
    172     Atomic<uint64_t> wait_time;
    173     void AddToWaitTime(uint64_t value);
    174     ContentionLogData() : wait_time(0) {}
    175   };
    176   ContentionLogData contention_log_data_[kContentionLogDataSize];
    177 
    178  public:
    179   bool HasEverContended() const {
    180     if (kLogLockContentions) {
    181       return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
    182     }
    183     return false;
    184   }
    185 };
    186 
    187 // A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain
    188 // exclusive access to what it guards. A Mutex can be in one of two states:
    189 // - Free - not owned by any thread,
    190 // - Exclusive - owned by a single thread.
    191 //
    192 // The effect of locking and unlocking operations on the state is:
    193 // State     | ExclusiveLock | ExclusiveUnlock
    194 // -------------------------------------------
    195 // Free      | Exclusive     | error
    196 // Exclusive | Block*        | Free
    197 // * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
    198 //   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
    199 std::ostream& operator<<(std::ostream& os, const Mutex& mu);
    200 class LOCKABLE Mutex : public BaseMutex {
    201  public:
    202   explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false);
    203   ~Mutex();
    204 
    205   virtual bool IsMutex() const { return true; }
    206 
    207   // Block until mutex is free then acquire exclusive access.
    208   void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
    209   void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
    210 
    211   // Returns true if acquires exclusive access, false otherwise.
    212   bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
    213   bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); }
    214 
    215   // Release exclusive access.
    216   void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
    217   void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
    218 
    219   // Is the current thread the exclusive holder of the Mutex.
    220   bool IsExclusiveHeld(const Thread* self) const;
    221 
    222   // Assert that the Mutex is exclusively held by the current thread.
    223   void AssertExclusiveHeld(const Thread* self) {
    224     if (kDebugLocking && (gAborting == 0)) {
    225       CHECK(IsExclusiveHeld(self)) << *this;
    226     }
    227   }
    228   void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); }
    229 
    230   // Assert that the Mutex is not held by the current thread.
    231   void AssertNotHeldExclusive(const Thread* self) {
    232     if (kDebugLocking && (gAborting == 0)) {
    233       CHECK(!IsExclusiveHeld(self)) << *this;
    234     }
    235   }
    236   void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); }
    237 
    238   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
    239   // than the owner.
    240   uint64_t GetExclusiveOwnerTid() const;
    241 
    242   // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
    243   unsigned int GetDepth() const {
    244     return recursion_count_;
    245   }
    246 
    247   virtual void Dump(std::ostream& os) const;
    248 
    249  private:
    250 #if ART_USE_FUTEXES
    251   // 0 is unheld, 1 is held.
    252   AtomicInteger state_;
    253   // Exclusive owner.
    254   volatile uint64_t exclusive_owner_;
    255   // Number of waiting contenders.
    256   AtomicInteger num_contenders_;
    257 #else
    258   pthread_mutex_t mutex_;
    259   volatile uint64_t exclusive_owner_;  // Guarded by mutex_.
    260 #endif
    261   const bool recursive_;  // Can the lock be recursively held?
    262   unsigned int recursion_count_;
    263   friend class ConditionVariable;
    264   DISALLOW_COPY_AND_ASSIGN(Mutex);
    265 };
    266 
    267 // A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex.
    268 // Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader)
    269 // access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a
    270 // condition variable. A ReaderWriterMutex can be in one of three states:
    271 // - Free - not owned by any thread,
    272 // - Exclusive - owned by a single thread,
    273 // - Shared(n) - shared amongst n threads.
    274 //
    275 // The effect of locking and unlocking operations on the state is:
    276 //
    277 // State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock
    278 // ----------------------------------------------------------------------------
    279 // Free      | Exclusive     | error           | SharedLock(1)    | error
    280 // Exclusive | Block         | Free            | Block            | error
    281 // Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free
    282 // * for large values of n the SharedLock may block.
    283 std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu);
    284 class LOCKABLE ReaderWriterMutex : public BaseMutex {
    285  public:
    286   explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel);
    287   ~ReaderWriterMutex();
    288 
    289   virtual bool IsReaderWriterMutex() const { return true; }
    290 
    291   // Block until ReaderWriterMutex is free then acquire exclusive access.
    292   void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION();
    293   void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); }
    294 
    295   // Release exclusive access.
    296   void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION();
    297   void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); }
    298 
    299   // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success
    300   // or false if timeout is reached.
    301 #if HAVE_TIMED_RWLOCK
    302   bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns)
    303       EXCLUSIVE_TRYLOCK_FUNCTION(true);
    304 #endif
    305 
    306   // Block until ReaderWriterMutex is shared or free then acquire a share on the access.
    307   void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE;
    308   void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); }
    309 
    310   // Try to acquire share of ReaderWriterMutex.
    311   bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true);
    312 
    313   // Release a share of the access.
    314   void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE;
    315   void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); }
    316 
    317   // Is the current thread the exclusive holder of the ReaderWriterMutex.
    318   bool IsExclusiveHeld(const Thread* self) const;
    319 
    320   // Assert the current thread has exclusive access to the ReaderWriterMutex.
    321   void AssertExclusiveHeld(const Thread* self) {
    322     if (kDebugLocking && (gAborting == 0)) {
    323       CHECK(IsExclusiveHeld(self)) << *this;
    324     }
    325   }
    326   void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); }
    327 
    328   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
    329   void AssertNotExclusiveHeld(const Thread* self) {
    330     if (kDebugLocking && (gAborting == 0)) {
    331       CHECK(!IsExclusiveHeld(self)) << *this;
    332     }
    333   }
    334   void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); }
    335 
    336   // Is the current thread a shared holder of the ReaderWriterMutex.
    337   bool IsSharedHeld(const Thread* self) const;
    338 
    339   // Assert the current thread has shared access to the ReaderWriterMutex.
    340   void AssertSharedHeld(const Thread* self) {
    341     if (kDebugLocking && (gAborting == 0)) {
    342       // TODO: we can only assert this well when self != NULL.
    343       CHECK(IsSharedHeld(self) || self == NULL) << *this;
    344     }
    345   }
    346   void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
    347 
    348   // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
    349   // mode.
    350   void AssertNotHeld(const Thread* self) {
    351     if (kDebugLocking && (gAborting == 0)) {
    352       CHECK(!IsSharedHeld(self)) << *this;
    353     }
    354   }
    355 
    356   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other
    357   // than the owner.
    358   uint64_t GetExclusiveOwnerTid() const;
    359 
    360   virtual void Dump(std::ostream& os) const;
    361 
    362  private:
    363 #if ART_USE_FUTEXES
    364   // -1 implies held exclusive, +ve shared held by state_ many owners.
    365   AtomicInteger state_;
    366   // Exclusive owner. Modification guarded by this mutex.
    367   volatile uint64_t exclusive_owner_;
    368   // Number of contenders waiting for a reader share.
    369   AtomicInteger num_pending_readers_;
    370   // Number of contenders waiting to be the writer.
    371   AtomicInteger num_pending_writers_;
    372 #else
    373   pthread_rwlock_t rwlock_;
    374   volatile uint64_t exclusive_owner_;  // Guarded by rwlock_.
    375 #endif
    376   DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
    377 };
    378 
    379 // ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually
    380 // (Signal) or all at once (Broadcast).
    381 class ConditionVariable {
    382  public:
    383   explicit ConditionVariable(const char* name, Mutex& mutex);
    384   ~ConditionVariable();
    385 
    386   void Broadcast(Thread* self);
    387   void Signal(Thread* self);
    388   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
    389   //       pointer copy, thereby defeating annotalysis.
    390   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    391   void TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
    392   // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
    393   // when waiting.
    394   // TODO: remove this.
    395   void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
    396 
    397  private:
    398   const char* const name_;
    399   // The Mutex being used by waiters. It is an error to mix condition variables between different
    400   // Mutexes.
    401   Mutex& guard_;
    402 #if ART_USE_FUTEXES
    403   // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up
    404   // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_
    405   // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait
    406   // without guard_ held.
    407   AtomicInteger sequence_;
    408   // Number of threads that have come into to wait, not the length of the waiters on the futex as
    409   // waiters may have been requeued onto guard_. Guarded by guard_.
    410   volatile int32_t num_waiters_;
    411 #else
    412   pthread_cond_t cond_;
    413 #endif
    414   DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
    415 };
    416 
    417 // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it
    418 // upon destruction.
    419 class SCOPED_LOCKABLE MutexLock {
    420  public:
    421   explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) {
    422     mu_.ExclusiveLock(self_);
    423   }
    424 
    425   ~MutexLock() UNLOCK_FUNCTION() {
    426     mu_.ExclusiveUnlock(self_);
    427   }
    428 
    429  private:
    430   Thread* const self_;
    431   Mutex& mu_;
    432   DISALLOW_COPY_AND_ASSIGN(MutexLock);
    433 };
    434 // Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
    435 #define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name)
    436 
    437 // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
    438 // construction and releases it upon destruction.
    439 class SCOPED_LOCKABLE ReaderMutexLock {
    440  public:
    441   explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
    442       self_(self), mu_(mu) {
    443     mu_.SharedLock(self_);
    444   }
    445 
    446   ~ReaderMutexLock() UNLOCK_FUNCTION() {
    447     mu_.SharedUnlock(self_);
    448   }
    449 
    450  private:
    451   Thread* const self_;
    452   ReaderWriterMutex& mu_;
    453   DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock);
    454 };
    455 // Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
    456 // "ReaderMutexLock mu(lock)".
    457 #define ReaderMutexLock(x) COMPILE_ASSERT(0, reader_mutex_lock_declaration_missing_variable_name)
    458 
    459 // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
    460 // construction and releases it upon destruction.
    461 class SCOPED_LOCKABLE WriterMutexLock {
    462  public:
    463   explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) :
    464       self_(self), mu_(mu) {
    465     mu_.ExclusiveLock(self_);
    466   }
    467 
    468   ~WriterMutexLock() UNLOCK_FUNCTION() {
    469     mu_.ExclusiveUnlock(self_);
    470   }
    471 
    472  private:
    473   Thread* const self_;
    474   ReaderWriterMutex& mu_;
    475   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
    476 };
    477 // Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
    478 // "WriterMutexLock mu(lock)".
    479 #define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name)
    480 
    481 // Global mutexes corresponding to the levels above.
    482 class Locks {
    483  public:
    484   static void Init();
    485 
    486   // There's a potential race for two threads to try to suspend each other and for both of them
    487   // to succeed and get blocked becoming runnable. This lock ensures that only one thread is
    488   // requesting suspension of another at any time. As the the thread list suspend thread logic
    489   // transitions to runnable, if the current thread were tried to be suspended then this thread
    490   // would block holding this lock until it could safely request thread suspension of the other
    491   // thread without that thread having a suspension request against this thread. This avoids a
    492   // potential deadlock cycle.
    493   static Mutex* thread_list_suspend_thread_lock_;
    494 
    495   // Guards allocation entrypoint instrumenting.
    496   static Mutex* instrument_entrypoints_lock_ ACQUIRED_AFTER(thread_list_suspend_thread_lock_);
    497 
    498   // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
    499   // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
    500   // a share on the mutator_lock_. The garbage collector may also execute with shared access but
    501   // at times requires exclusive access to the heap (not to be confused with the heap meta-data
    502   // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks
    503   // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_
    504   // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition
    505   // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on
    506   // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector)
    507   // chance to acquire the lock.
    508   //
    509   // Thread suspension:
    510   // Shared users                                  | Exclusive user
    511   // (holding mutator lock and in kRunnable state) |   .. running ..
    512   //   .. running ..                               | Request thread suspension by:
    513   //   .. running ..                               |   - acquiring thread_suspend_count_lock_
    514   //   .. running ..                               |   - incrementing Thread::suspend_count_ on
    515   //   .. running ..                               |     all mutator threads
    516   //   .. running ..                               |   - releasing thread_suspend_count_lock_
    517   //   .. running ..                               | Block trying to acquire exclusive mutator lock
    518   // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
    519   // suspend code.                                 |   .. blocked ..
    520   // Change state to kSuspended                    |   .. blocked ..
    521   // x: Release share on mutator_lock_             | Carry out exclusive access
    522   // Acquire thread_suspend_count_lock_            |   .. exclusive ..
    523   // while Thread::suspend_count_ > 0              |   .. exclusive ..
    524   //   - wait on Thread::resume_cond_              |   .. exclusive ..
    525   //     (releases thread_suspend_count_lock_)     |   .. exclusive ..
    526   //   .. waiting ..                               | Release mutator_lock_
    527   //   .. waiting ..                               | Request thread resumption by:
    528   //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
    529   //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
    530   //   .. waiting ..                               |     all mutator threads
    531   //   .. waiting ..                               |   - notifying on Thread::resume_cond_
    532   //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
    533   // Release thread_suspend_count_lock_            |  .. running ..
    534   // Acquire share on mutator_lock_                |  .. running ..
    535   //  - This could block but the thread still      |  .. running ..
    536   //    has a state of kSuspended and so this      |  .. running ..
    537   //    isn't an issue.                            |  .. running ..
    538   // Acquire thread_suspend_count_lock_            |  .. running ..
    539   //  - we poll here as we're transitioning into   |  .. running ..
    540   //    kRunnable and an individual thread suspend |  .. running ..
    541   //    request (e.g for debugging) won't try      |  .. running ..
    542   //    to acquire the mutator lock (which would   |  .. running ..
    543   //    block as we hold the mutator lock). This   |  .. running ..
    544   //    poll ensures that if the suspender thought |  .. running ..
    545   //    we were suspended by incrementing our      |  .. running ..
    546   //    Thread::suspend_count_ and then reading    |  .. running ..
    547   //    our state we go back to waiting on         |  .. running ..
    548   //    Thread::resume_cond_.                      |  .. running ..
    549   // can_go_runnable = Thread::suspend_count_ == 0 |  .. running ..
    550   // Release thread_suspend_count_lock_            |  .. running ..
    551   // if can_go_runnable                            |  .. running ..
    552   //   Change state to kRunnable                   |  .. running ..
    553   // else                                          |  .. running ..
    554   //   Goto x                                      |  .. running ..
    555   //  .. running ..                                |  .. running ..
    556   static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
    557 
    558   // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
    559   static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
    560 
    561   // Guards shutdown of the runtime.
    562   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
    563 
    564   // Guards background profiler global state.
    565   static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
    566 
    567   // Guards trace (ie traceview) requests.
    568   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
    569 
    570   // Guards debugger recent allocation records.
    571   static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
    572 
    573   // Guards updates to instrumentation to ensure mutual exclusion of
    574   // events like deoptimization requests.
    575   // TODO: improve name, perhaps instrumentation_update_lock_.
    576   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
    577 
    578   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
    579   // attaching and detaching.
    580   static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
    581 
    582   // Guards breakpoints.
    583   static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(trace_lock_);
    584 
    585   // Guards lists of classes within the class linker.
    586   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
    587 
    588   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
    589   // doesn't try to hold a higher level Mutex.
    590   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
    591 
    592   static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
    593 
    594   // Guard the allocation/deallocation of thread ids.
    595   static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
    596 
    597   // Guards modification of the LDT on x86.
    598   static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
    599 
    600   // Guards intern table.
    601   static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
    602 
    603   // Guards reference processor.
    604   static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
    605 
    606   // Guards cleared references queue.
    607   static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
    608 
    609   // Guards weak references queue.
    610   static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
    611 
    612   // Guards finalizer references queue.
    613   static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
    614 
    615   // Guards phantom references queue.
    616   static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
    617 
    618   // Guards soft references queue.
    619   static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
    620 
    621   // Have an exclusive aborting thread.
    622   static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
    623 
    624   // Allow mutual exclusion when manipulating Thread::suspend_count_.
    625   // TODO: Does the trade-off of a per-thread lock make sense?
    626   static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
    627 
    628   // One unexpected signal at a time lock.
    629   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
    630 
    631   // Guards the maps in mem_map.
    632   static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
    633 
    634   // Have an exclusive logging thread.
    635   static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
    636 };
    637 
    638 }  // namespace art
    639 
    640 #endif  // ART_RUNTIME_BASE_MUTEX_H_
    641