Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_MONITOR_H_
     18 #define ART_RUNTIME_MONITOR_H_
     19 
     20 #include <pthread.h>
     21 #include <stdint.h>
     22 #include <stdlib.h>
     23 
     24 #include <iosfwd>
     25 #include <list>
     26 #include <vector>
     27 
     28 #include "base/allocator.h"
     29 #include "base/atomic.h"
     30 #include "base/mutex.h"
     31 #include "gc_root.h"
     32 #include "lock_word.h"
     33 #include "read_barrier_option.h"
     34 #include "runtime_callbacks.h"
     35 #include "thread_state.h"
     36 
     37 namespace art {
     38 
     39 class ArtMethod;
     40 class IsMarkedVisitor;
     41 class LockWord;
     42 template<class T> class Handle;
     43 class StackVisitor;
     44 class Thread;
     45 typedef uint32_t MonitorId;
     46 
     47 namespace mirror {
     48 class Object;
     49 }  // namespace mirror
     50 
     51 enum class LockReason {
     52   kForWait,
     53   kForLock,
     54 };
     55 
     56 class Monitor {
     57  public:
     58   // The default number of spins that are done before thread suspension is used to forcibly inflate
     59   // a lock word. See Runtime::max_spins_before_thin_lock_inflation_.
     60   constexpr static size_t kDefaultMaxSpinsBeforeThinLockInflation = 50;
     61 
     62   ~Monitor();
     63 
     64   static void Init(uint32_t lock_profiling_threshold, uint32_t stack_dump_lock_profiling_threshold);
     65 
     66   // Return the thread id of the lock owner or 0 when there is no owner.
     67   static uint32_t GetLockOwnerThreadId(mirror::Object* obj)
     68       NO_THREAD_SAFETY_ANALYSIS;  // TODO: Reading lock owner without holding lock is racy.
     69 
     70   // NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
     71   static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj, bool trylock)
     72       EXCLUSIVE_LOCK_FUNCTION(obj)
     73       NO_THREAD_SAFETY_ANALYSIS
     74       REQUIRES(!Roles::uninterruptible_)
     75       REQUIRES_SHARED(Locks::mutator_lock_);
     76 
     77   // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
     78   static bool MonitorExit(Thread* thread, mirror::Object* obj)
     79       NO_THREAD_SAFETY_ANALYSIS
     80       REQUIRES(!Roles::uninterruptible_)
     81       REQUIRES_SHARED(Locks::mutator_lock_)
     82       UNLOCK_FUNCTION(obj);
     83 
     84   static void Notify(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
     85     DoNotify(self, obj, false);
     86   }
     87   static void NotifyAll(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
     88     DoNotify(self, obj, true);
     89   }
     90 
     91   // Object.wait().  Also called for class init.
     92   // NO_THREAD_SAFETY_ANALYSIS for mon->Wait.
     93   static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
     94                    bool interruptShouldThrow, ThreadState why)
     95       REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
     96 
     97   static ThreadState FetchState(const Thread* thread,
     98                                 /* out */ mirror::Object** monitor_object,
     99                                 /* out */ uint32_t* lock_owner_tid)
    100       REQUIRES(!Locks::thread_suspend_count_lock_)
    101       REQUIRES_SHARED(Locks::mutator_lock_);
    102 
    103   // Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
    104   static mirror::Object* GetContendedMonitor(Thread* thread)
    105       REQUIRES_SHARED(Locks::mutator_lock_);
    106 
    107   // Calls 'callback' once for each lock held in the single stack frame represented by
    108   // the current state of 'stack_visitor'.
    109   // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
    110   // is necessary when we have already aborted but want to dump the stack as much as we can.
    111   static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
    112                          void* callback_context, bool abort_on_failure = true)
    113       REQUIRES_SHARED(Locks::mutator_lock_);
    114 
    115   static bool IsValidLockWord(LockWord lock_word);
    116 
    117   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
    118   mirror::Object* GetObject() REQUIRES_SHARED(Locks::mutator_lock_) {
    119     return obj_.Read<kReadBarrierOption>();
    120   }
    121 
    122   void SetObject(mirror::Object* object);
    123 
    124   Thread* GetOwner() const NO_THREAD_SAFETY_ANALYSIS {
    125     return owner_;
    126   }
    127 
    128   int32_t GetHashCode();
    129 
    130   bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
    131 
    132   bool HasHashCode() const {
    133     return hash_code_.LoadRelaxed() != 0;
    134   }
    135 
    136   MonitorId GetMonitorId() const {
    137     return monitor_id_;
    138   }
    139 
    140   // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
    141   static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
    142                                 uint32_t hash_code) REQUIRES_SHARED(Locks::mutator_lock_);
    143 
    144   // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
    145   // does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
    146   // NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_.
    147   static bool Deflate(Thread* self, mirror::Object* obj)
    148       REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
    149 
    150 #ifndef __LP64__
    151   void* operator new(size_t size) {
    152     // Align Monitor* as per the monitor ID field size in the lock word.
    153     void* result;
    154     int error = posix_memalign(&result, LockWord::kMonitorIdAlignment, size);
    155     CHECK_EQ(error, 0) << strerror(error);
    156     return result;
    157   }
    158 
    159   void operator delete(void* ptr) {
    160     free(ptr);
    161   }
    162 #endif
    163 
    164  private:
    165   Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
    166       REQUIRES_SHARED(Locks::mutator_lock_);
    167   Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code, MonitorId id)
    168       REQUIRES_SHARED(Locks::mutator_lock_);
    169 
    170   // Install the monitor into its object, may fail if another thread installs a different monitor
    171   // first.
    172   bool Install(Thread* self)
    173       REQUIRES(!monitor_lock_)
    174       REQUIRES_SHARED(Locks::mutator_lock_);
    175 
    176   // Links a thread into a monitor's wait set.  The monitor lock must be held by the caller of this
    177   // routine.
    178   void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_);
    179 
    180   // Unlinks a thread from a monitor's wait set.  The monitor lock must be held by the caller of
    181   // this routine.
    182   void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
    183 
    184   // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
    185   // calling thread must own the lock or the owner must be suspended. There's a race with other
    186   // threads inflating the lock, installing hash codes and spurious failures. The caller should
    187   // re-read the lock word following the call.
    188   static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
    189       REQUIRES_SHARED(Locks::mutator_lock_)
    190       NO_THREAD_SAFETY_ANALYSIS;  // For m->Install(self)
    191 
    192   void LogContentionEvent(Thread* self,
    193                           uint32_t wait_ms,
    194                           uint32_t sample_percent,
    195                           ArtMethod* owner_method,
    196                           uint32_t owner_dex_pc)
    197       REQUIRES_SHARED(Locks::mutator_lock_);
    198 
    199   static void FailedUnlock(mirror::Object* obj,
    200                            uint32_t expected_owner_thread_id,
    201                            uint32_t found_owner_thread_id,
    202                            Monitor* mon)
    203       REQUIRES(!Locks::thread_list_lock_,
    204                !monitor_lock_)
    205       REQUIRES_SHARED(Locks::mutator_lock_);
    206 
    207   // Try to lock without blocking, returns true if we acquired the lock.
    208   bool TryLock(Thread* self)
    209       REQUIRES(!monitor_lock_)
    210       REQUIRES_SHARED(Locks::mutator_lock_);
    211   // Variant for already holding the monitor lock.
    212   bool TryLockLocked(Thread* self)
    213       REQUIRES(monitor_lock_)
    214       REQUIRES_SHARED(Locks::mutator_lock_);
    215 
    216   template<LockReason reason = LockReason::kForLock>
    217   void Lock(Thread* self)
    218       REQUIRES(!monitor_lock_)
    219       REQUIRES_SHARED(Locks::mutator_lock_);
    220 
    221   bool Unlock(Thread* thread)
    222       REQUIRES(!monitor_lock_)
    223       REQUIRES_SHARED(Locks::mutator_lock_);
    224 
    225   static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all)
    226       REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;  // For mon->Notify.
    227 
    228   void Notify(Thread* self)
    229       REQUIRES(!monitor_lock_)
    230       REQUIRES_SHARED(Locks::mutator_lock_);
    231 
    232   void NotifyAll(Thread* self)
    233       REQUIRES(!monitor_lock_)
    234       REQUIRES_SHARED(Locks::mutator_lock_);
    235 
    236   static std::string PrettyContentionInfo(const std::string& owner_name,
    237                                           pid_t owner_tid,
    238                                           ArtMethod* owners_method,
    239                                           uint32_t owners_dex_pc,
    240                                           size_t num_waiters)
    241       REQUIRES_SHARED(Locks::mutator_lock_);
    242 
    243   // Wait on a monitor until timeout, interrupt, or notification.  Used for Object.wait() and
    244   // (somewhat indirectly) Thread.sleep() and Thread.join().
    245   //
    246   // If another thread calls Thread.interrupt(), we throw InterruptedException and return
    247   // immediately if one of the following are true:
    248   //  - blocked in wait(), wait(long), or wait(long, int) methods of Object
    249   //  - blocked in join(), join(long), or join(long, int) methods of Thread
    250   //  - blocked in sleep(long), or sleep(long, int) methods of Thread
    251   // Otherwise, we set the "interrupted" flag.
    252   //
    253   // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and
    254   // throws the appropriate exception if it isn't.
    255   //
    256   // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in
    257   // a loop.  This appears to derive from concerns about pthread_cond_wait() on multiprocessor
    258   // systems.  Some commentary on the web casts doubt on whether these can/should occur.
    259   //
    260   // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
    261   // of the 32-bit time epoch.
    262   void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
    263       REQUIRES(!monitor_lock_)
    264       REQUIRES_SHARED(Locks::mutator_lock_);
    265 
    266   // Translates the provided method and pc into its declaring class' source file and line number.
    267   static void TranslateLocation(ArtMethod* method, uint32_t pc,
    268                                 const char** source_file,
    269                                 int32_t* line_number)
    270       REQUIRES_SHARED(Locks::mutator_lock_);
    271 
    272   uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
    273 
    274   // Support for systrace output of monitor operations.
    275   ALWAYS_INLINE static void AtraceMonitorLock(Thread* self,
    276                                               mirror::Object* obj,
    277                                               bool is_wait)
    278       REQUIRES_SHARED(Locks::mutator_lock_);
    279   static void AtraceMonitorLockImpl(Thread* self,
    280                                     mirror::Object* obj,
    281                                     bool is_wait)
    282       REQUIRES_SHARED(Locks::mutator_lock_);
    283   ALWAYS_INLINE static void AtraceMonitorUnlock();
    284 
    285   static uint32_t lock_profiling_threshold_;
    286   static uint32_t stack_dump_lock_profiling_threshold_;
    287 
    288   Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    289 
    290   ConditionVariable monitor_contenders_ GUARDED_BY(monitor_lock_);
    291 
    292   // Number of people waiting on the condition.
    293   size_t num_waiters_ GUARDED_BY(monitor_lock_);
    294 
    295   // Which thread currently owns the lock?
    296   Thread* volatile owner_ GUARDED_BY(monitor_lock_);
    297 
    298   // Owner's recursive lock depth.
    299   int lock_count_ GUARDED_BY(monitor_lock_);
    300 
    301   // What object are we part of. This is a weak root. Do not access
    302   // this directly, use GetObject() to read it so it will be guarded
    303   // by a read barrier.
    304   GcRoot<mirror::Object> obj_;
    305 
    306   // Threads currently waiting on this monitor.
    307   Thread* wait_set_ GUARDED_BY(monitor_lock_);
    308 
    309   // Stored object hash code, generated lazily by GetHashCode.
    310   AtomicInteger hash_code_;
    311 
    312   // Method and dex pc where the lock owner acquired the lock, used when lock
    313   // sampling is enabled. locking_method_ may be null if the lock is currently
    314   // unlocked, or if the lock is acquired by the system when the stack is empty.
    315   ArtMethod* locking_method_ GUARDED_BY(monitor_lock_);
    316   uint32_t locking_dex_pc_ GUARDED_BY(monitor_lock_);
    317 
    318   // The denser encoded version of this monitor as stored in the lock word.
    319   MonitorId monitor_id_;
    320 
    321 #ifdef __LP64__
    322   // Free list for monitor pool.
    323   Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
    324 #endif
    325 
    326   friend class MonitorInfo;
    327   friend class MonitorList;
    328   friend class MonitorPool;
    329   friend class mirror::Object;
    330   DISALLOW_COPY_AND_ASSIGN(Monitor);
    331 };
    332 
    333 class MonitorList {
    334  public:
    335   MonitorList();
    336   ~MonitorList();
    337 
    338   void Add(Monitor* m) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
    339 
    340   void SweepMonitorList(IsMarkedVisitor* visitor)
    341       REQUIRES(!monitor_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
    342   void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
    343   void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
    344   void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
    345   // Returns how many monitors were deflated.
    346   size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
    347   size_t Size() REQUIRES(!monitor_list_lock_);
    348 
    349   typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors;
    350 
    351  private:
    352   // During sweeping we may free an object and on a separate thread have an object created using
    353   // the newly freed memory. That object may then have its lock-word inflated and a monitor created.
    354   // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as
    355   // the object wasn't marked when sweeping began.
    356   bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
    357   Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
    358   ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
    359   Monitors list_ GUARDED_BY(monitor_list_lock_);
    360 
    361   friend class Monitor;
    362   DISALLOW_COPY_AND_ASSIGN(MonitorList);
    363 };
    364 
    365 // Collects information about the current state of an object's monitor.
    366 // This is very unsafe, and must only be called when all threads are suspended.
    367 // For use only by the JDWP implementation.
    368 class MonitorInfo {
    369  public:
    370   MonitorInfo() : owner_(nullptr), entry_count_(0) {}
    371   MonitorInfo(const MonitorInfo&) = default;
    372   MonitorInfo& operator=(const MonitorInfo&) = default;
    373   explicit MonitorInfo(mirror::Object* o) REQUIRES(Locks::mutator_lock_);
    374 
    375   Thread* owner_;
    376   size_t entry_count_;
    377   std::vector<Thread*> waiters_;
    378 };
    379 
    380 }  // namespace art
    381 
    382 #endif  // ART_RUNTIME_MONITOR_H_
    383