Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "monitor.h"
     18 
     19 #include <vector>
     20 
     21 #include "android-base/stringprintf.h"
     22 
     23 #include "art_method-inl.h"
     24 #include "base/logging.h"  // For VLOG.
     25 #include "base/mutex.h"
     26 #include "base/quasi_atomic.h"
     27 #include "base/stl_util.h"
     28 #include "base/systrace.h"
     29 #include "base/time_utils.h"
     30 #include "class_linker.h"
     31 #include "dex/dex_file-inl.h"
     32 #include "dex/dex_file_types.h"
     33 #include "dex/dex_instruction-inl.h"
     34 #include "lock_word-inl.h"
     35 #include "mirror/class-inl.h"
     36 #include "mirror/object-inl.h"
     37 #include "object_callbacks.h"
     38 #include "scoped_thread_state_change-inl.h"
     39 #include "stack.h"
     40 #include "thread.h"
     41 #include "thread_list.h"
     42 #include "verifier/method_verifier.h"
     43 #include "well_known_classes.h"
     44 
     45 namespace art {
     46 
     47 using android::base::StringPrintf;
     48 
     49 static constexpr uint64_t kDebugThresholdFudgeFactor = kIsDebugBuild ? 10 : 1;
     50 static constexpr uint64_t kLongWaitMs = 100 * kDebugThresholdFudgeFactor;
     51 
     52 /*
     53  * Every Object has a monitor associated with it, but not every Object is actually locked.  Even
     54  * the ones that are locked do not need a full-fledged monitor until a) there is actual contention
     55  * or b) wait() is called on the Object.
     56  *
     57  * For Android, we have implemented a scheme similar to the one described in Bacon et al.'s
     58  * "Thin locks: featherweight synchronization for Java" (ACM 1998).  Things are even easier for us,
     59  * though, because we have a full 32 bits to work with.
     60  *
     61  * The two states of an Object's lock are referred to as "thin" and "fat".  A lock may transition
     62  * from the "thin" state to the "fat" state and this transition is referred to as inflation. Once
     63  * a lock has been inflated it remains in the "fat" state indefinitely.
     64  *
     65  * The lock value itself is stored in mirror::Object::monitor_ and the representation is described
     66  * in the LockWord value type.
     67  *
     68  * Monitors provide:
     69  *  - mutually exclusive access to resources
     70  *  - a way for multiple threads to wait for notification
     71  *
     72  * In effect, they fill the role of both mutexes and condition variables.
     73  *
     74  * Only one thread can own the monitor at any time.  There may be several threads waiting on it
     75  * (the wait call unlocks it).  One or more waiting threads may be getting interrupted or notified
     76  * at any given time.
     77  */
     78 
     79 uint32_t Monitor::lock_profiling_threshold_ = 0;
     80 uint32_t Monitor::stack_dump_lock_profiling_threshold_ = 0;
     81 
     82 void Monitor::Init(uint32_t lock_profiling_threshold,
     83                    uint32_t stack_dump_lock_profiling_threshold) {
     84   // It isn't great to always include the debug build fudge factor for command-
     85   // line driven arguments, but it's easier to adjust here than in the build.
     86   lock_profiling_threshold_ =
     87       lock_profiling_threshold * kDebugThresholdFudgeFactor;
     88   stack_dump_lock_profiling_threshold_ =
     89       stack_dump_lock_profiling_threshold * kDebugThresholdFudgeFactor;
     90 }
     91 
     92 Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
     93     : monitor_lock_("a monitor lock", kMonitorLock),
     94       monitor_contenders_("monitor contenders", monitor_lock_),
     95       num_waiters_(0),
     96       owner_(owner),
     97       lock_count_(0),
     98       obj_(GcRoot<mirror::Object>(obj)),
     99       wait_set_(nullptr),
    100       hash_code_(hash_code),
    101       locking_method_(nullptr),
    102       locking_dex_pc_(0),
    103       monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
    104 #ifdef __LP64__
    105   DCHECK(false) << "Should not be reached in 64b";
    106   next_free_ = nullptr;
    107 #endif
    108   // We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
    109   // with the owner unlocking the thin-lock.
    110   CHECK(owner == nullptr || owner == self || owner->IsSuspended());
    111   // The identity hash code is set for the life time of the monitor.
    112 }
    113 
    114 Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
    115                  MonitorId id)
    116     : monitor_lock_("a monitor lock", kMonitorLock),
    117       monitor_contenders_("monitor contenders", monitor_lock_),
    118       num_waiters_(0),
    119       owner_(owner),
    120       lock_count_(0),
    121       obj_(GcRoot<mirror::Object>(obj)),
    122       wait_set_(nullptr),
    123       hash_code_(hash_code),
    124       locking_method_(nullptr),
    125       locking_dex_pc_(0),
    126       monitor_id_(id) {
    127 #ifdef __LP64__
    128   next_free_ = nullptr;
    129 #endif
    130   // We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
    131   // with the owner unlocking the thin-lock.
    132   CHECK(owner == nullptr || owner == self || owner->IsSuspended());
    133   // The identity hash code is set for the life time of the monitor.
    134 }
    135 
    136 int32_t Monitor::GetHashCode() {
    137   while (!HasHashCode()) {
    138     if (hash_code_.CompareAndSetWeakRelaxed(0, mirror::Object::GenerateIdentityHashCode())) {
    139       break;
    140     }
    141   }
    142   DCHECK(HasHashCode());
    143   return hash_code_.LoadRelaxed();
    144 }
    145 
    146 bool Monitor::Install(Thread* self) {
    147   MutexLock mu(self, monitor_lock_);  // Uncontended mutex acquisition as monitor isn't yet public.
    148   CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
    149   // Propagate the lock state.
    150   LockWord lw(GetObject()->GetLockWord(false));
    151   switch (lw.GetState()) {
    152     case LockWord::kThinLocked: {
    153       CHECK_EQ(owner_->GetThreadId(), lw.ThinLockOwner());
    154       lock_count_ = lw.ThinLockCount();
    155       break;
    156     }
    157     case LockWord::kHashCode: {
    158       CHECK_EQ(hash_code_.LoadRelaxed(), static_cast<int32_t>(lw.GetHashCode()));
    159       break;
    160     }
    161     case LockWord::kFatLocked: {
    162       // The owner_ is suspended but another thread beat us to install a monitor.
    163       return false;
    164     }
    165     case LockWord::kUnlocked: {
    166       LOG(FATAL) << "Inflating unlocked lock word";
    167       break;
    168     }
    169     default: {
    170       LOG(FATAL) << "Invalid monitor state " << lw.GetState();
    171       return false;
    172     }
    173   }
    174   LockWord fat(this, lw.GCState());
    175   // Publish the updated lock word, which may race with other threads.
    176   bool success = GetObject()->CasLockWordWeakRelease(lw, fat);
    177   // Lock profiling.
    178   if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
    179     // Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
    180     // abort.
    181     locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_, false);
    182     if (locking_method_ != nullptr && UNLIKELY(locking_method_->IsProxyMethod())) {
    183       // Grab another frame. Proxy methods are not helpful for lock profiling. This should be rare
    184       // enough that it's OK to walk the stack twice.
    185       struct NextMethodVisitor FINAL : public StackVisitor {
    186         explicit NextMethodVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
    187             : StackVisitor(thread,
    188                            nullptr,
    189                            StackVisitor::StackWalkKind::kIncludeInlinedFrames,
    190                            false),
    191               count_(0),
    192               method_(nullptr),
    193               dex_pc_(0) {}
    194         bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
    195           ArtMethod* m = GetMethod();
    196           if (m->IsRuntimeMethod()) {
    197             // Continue if this is a runtime method.
    198             return true;
    199           }
    200           count_++;
    201           if (count_ == 2u) {
    202             method_ = m;
    203             dex_pc_ = GetDexPc(false);
    204             return false;
    205           }
    206           return true;
    207         }
    208         size_t count_;
    209         ArtMethod* method_;
    210         uint32_t dex_pc_;
    211       };
    212       NextMethodVisitor nmv(owner_);
    213       nmv.WalkStack();
    214       locking_method_ = nmv.method_;
    215       locking_dex_pc_ = nmv.dex_pc_;
    216     }
    217     DCHECK(locking_method_ == nullptr || !locking_method_->IsProxyMethod());
    218   }
    219   return success;
    220 }
    221 
    222 Monitor::~Monitor() {
    223   // Deflated monitors have a null object.
    224 }
    225 
    226 void Monitor::AppendToWaitSet(Thread* thread) {
    227   DCHECK(owner_ == Thread::Current());
    228   DCHECK(thread != nullptr);
    229   DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext();
    230   if (wait_set_ == nullptr) {
    231     wait_set_ = thread;
    232     return;
    233   }
    234 
    235   // push_back.
    236   Thread* t = wait_set_;
    237   while (t->GetWaitNext() != nullptr) {
    238     t = t->GetWaitNext();
    239   }
    240   t->SetWaitNext(thread);
    241 }
    242 
    243 void Monitor::RemoveFromWaitSet(Thread *thread) {
    244   DCHECK(owner_ == Thread::Current());
    245   DCHECK(thread != nullptr);
    246   if (wait_set_ == nullptr) {
    247     return;
    248   }
    249   if (wait_set_ == thread) {
    250     wait_set_ = thread->GetWaitNext();
    251     thread->SetWaitNext(nullptr);
    252     return;
    253   }
    254 
    255   Thread* t = wait_set_;
    256   while (t->GetWaitNext() != nullptr) {
    257     if (t->GetWaitNext() == thread) {
    258       t->SetWaitNext(thread->GetWaitNext());
    259       thread->SetWaitNext(nullptr);
    260       return;
    261     }
    262     t = t->GetWaitNext();
    263   }
    264 }
    265 
    266 void Monitor::SetObject(mirror::Object* object) {
    267   obj_ = GcRoot<mirror::Object>(object);
    268 }
    269 
    270 // Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
    271 
    272 struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
    273   explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
    274       REQUIRES_SHARED(Locks::mutator_lock_)
    275       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
    276         method_(nullptr),
    277         dex_pc_(0),
    278         current_frame_number_(0),
    279         wanted_frame_number_(frame) {}
    280   bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
    281     ArtMethod* m = GetMethod();
    282     if (m == nullptr || m->IsRuntimeMethod()) {
    283       // Runtime method, upcall, or resolution issue. Skip.
    284       return true;
    285     }
    286 
    287     // Is this the requested frame?
    288     if (current_frame_number_ == wanted_frame_number_) {
    289       method_ = m;
    290       dex_pc_ = GetDexPc(false /* abort_on_error*/);
    291       return false;
    292     }
    293 
    294     // Look for more.
    295     current_frame_number_++;
    296     return true;
    297   }
    298 
    299   ArtMethod* method_;
    300   uint32_t dex_pc_;
    301 
    302  private:
    303   size_t current_frame_number_;
    304   const size_t wanted_frame_number_;
    305 };
    306 
    307 // This function is inlined and just helps to not have the VLOG and ATRACE check at all the
    308 // potential tracing points.
    309 void Monitor::AtraceMonitorLock(Thread* self, mirror::Object* obj, bool is_wait) {
    310   if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging) && ATRACE_ENABLED())) {
    311     AtraceMonitorLockImpl(self, obj, is_wait);
    312   }
    313 }
    314 
    315 void Monitor::AtraceMonitorLockImpl(Thread* self, mirror::Object* obj, bool is_wait) {
    316   // Wait() requires a deeper call stack to be useful. Otherwise you'll see "Waiting at
    317   // Object.java". Assume that we'll wait a nontrivial amount, so it's OK to do a longer
    318   // stack walk than if !is_wait.
    319   NthCallerWithDexPcVisitor visitor(self, is_wait ? 1U : 0U);
    320   visitor.WalkStack(false);
    321   const char* prefix = is_wait ? "Waiting on " : "Locking ";
    322 
    323   const char* filename;
    324   int32_t line_number;
    325   TranslateLocation(visitor.method_, visitor.dex_pc_, &filename, &line_number);
    326 
    327   // It would be nice to have a stable "ID" for the object here. However, the only stable thing
    328   // would be the identity hashcode. But we cannot use IdentityHashcode here: For one, there are
    329   // times when it is unsafe to make that call (see stack dumping for an explanation). More
    330   // importantly, we would have to give up on thin-locking when adding systrace locks, as the
    331   // identity hashcode is stored in the lockword normally (so can't be used with thin-locks).
    332   //
    333   // Because of thin-locks we also cannot use the monitor id (as there is no monitor). Monitor ids
    334   // also do not have to be stable, as the monitor may be deflated.
    335   std::string tmp = StringPrintf("%s %d at %s:%d",
    336       prefix,
    337       (obj == nullptr ? -1 : static_cast<int32_t>(reinterpret_cast<uintptr_t>(obj))),
    338       (filename != nullptr ? filename : "null"),
    339       line_number);
    340   ATRACE_BEGIN(tmp.c_str());
    341 }
    342 
    343 void Monitor::AtraceMonitorUnlock() {
    344   if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
    345     ATRACE_END();
    346   }
    347 }
    348 
    349 std::string Monitor::PrettyContentionInfo(const std::string& owner_name,
    350                                           pid_t owner_tid,
    351                                           ArtMethod* owners_method,
    352                                           uint32_t owners_dex_pc,
    353                                           size_t num_waiters) {
    354   Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    355   const char* owners_filename;
    356   int32_t owners_line_number = 0;
    357   if (owners_method != nullptr) {
    358     TranslateLocation(owners_method, owners_dex_pc, &owners_filename, &owners_line_number);
    359   }
    360   std::ostringstream oss;
    361   oss << "monitor contention with owner " << owner_name << " (" << owner_tid << ")";
    362   if (owners_method != nullptr) {
    363     oss << " at " << owners_method->PrettyMethod();
    364     oss << "(" << owners_filename << ":" << owners_line_number << ")";
    365   }
    366   oss << " waiters=" << num_waiters;
    367   return oss.str();
    368 }
    369 
    370 bool Monitor::TryLockLocked(Thread* self) {
    371   if (owner_ == nullptr) {  // Unowned.
    372     owner_ = self;
    373     CHECK_EQ(lock_count_, 0);
    374     // When debugging, save the current monitor holder for future
    375     // acquisition failures to use in sampled logging.
    376     if (lock_profiling_threshold_ != 0) {
    377       locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
    378       // We don't expect a proxy method here.
    379       DCHECK(locking_method_ == nullptr || !locking_method_->IsProxyMethod());
    380     }
    381   } else if (owner_ == self) {  // Recursive.
    382     lock_count_++;
    383   } else {
    384     return false;
    385   }
    386   AtraceMonitorLock(self, GetObject(), false /* is_wait */);
    387   return true;
    388 }
    389 
    390 bool Monitor::TryLock(Thread* self) {
    391   MutexLock mu(self, monitor_lock_);
    392   return TryLockLocked(self);
    393 }
    394 
    395 // Asserts that a mutex isn't held when the class comes into and out of scope.
    396 class ScopedAssertNotHeld {
    397  public:
    398   ScopedAssertNotHeld(Thread* self, Mutex& mu) : self_(self), mu_(mu) {
    399     mu_.AssertNotHeld(self_);
    400   }
    401 
    402   ~ScopedAssertNotHeld() {
    403     mu_.AssertNotHeld(self_);
    404   }
    405 
    406  private:
    407   Thread* const self_;
    408   Mutex& mu_;
    409   DISALLOW_COPY_AND_ASSIGN(ScopedAssertNotHeld);
    410 };
    411 
    412 template <LockReason reason>
    413 void Monitor::Lock(Thread* self) {
    414   ScopedAssertNotHeld sanh(self, monitor_lock_);
    415   bool called_monitors_callback = false;
    416   monitor_lock_.Lock(self);
    417   while (true) {
    418     if (TryLockLocked(self)) {
    419       break;
    420     }
    421     // Contended.
    422     const bool log_contention = (lock_profiling_threshold_ != 0);
    423     uint64_t wait_start_ms = log_contention ? MilliTime() : 0;
    424     ArtMethod* owners_method = locking_method_;
    425     uint32_t owners_dex_pc = locking_dex_pc_;
    426     // Do this before releasing the lock so that we don't get deflated.
    427     size_t num_waiters = num_waiters_;
    428     ++num_waiters_;
    429 
    430     // If systrace logging is enabled, first look at the lock owner. Acquiring the monitor's
    431     // lock and then re-acquiring the mutator lock can deadlock.
    432     bool started_trace = false;
    433     if (ATRACE_ENABLED()) {
    434       if (owner_ != nullptr) {  // Did the owner_ give the lock up?
    435         std::ostringstream oss;
    436         std::string name;
    437         owner_->GetThreadName(name);
    438         oss << PrettyContentionInfo(name,
    439                                     owner_->GetTid(),
    440                                     owners_method,
    441                                     owners_dex_pc,
    442                                     num_waiters);
    443         // Add info for contending thread.
    444         uint32_t pc;
    445         ArtMethod* m = self->GetCurrentMethod(&pc);
    446         const char* filename;
    447         int32_t line_number;
    448         TranslateLocation(m, pc, &filename, &line_number);
    449         oss << " blocking from "
    450             << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
    451             << ":" << line_number << ")";
    452         ATRACE_BEGIN(oss.str().c_str());
    453         started_trace = true;
    454       }
    455     }
    456 
    457     monitor_lock_.Unlock(self);  // Let go of locks in order.
    458     // Call the contended locking cb once and only once. Also only call it if we are locking for
    459     // the first time, not during a Wait wakeup.
    460     if (reason == LockReason::kForLock && !called_monitors_callback) {
    461       called_monitors_callback = true;
    462       Runtime::Current()->GetRuntimeCallbacks()->MonitorContendedLocking(this);
    463     }
    464     self->SetMonitorEnterObject(GetObject());
    465     {
    466       ScopedThreadSuspension tsc(self, kBlocked);  // Change to blocked and give up mutator_lock_.
    467       uint32_t original_owner_thread_id = 0u;
    468       {
    469         // Reacquire monitor_lock_ without mutator_lock_ for Wait.
    470         MutexLock mu2(self, monitor_lock_);
    471         if (owner_ != nullptr) {  // Did the owner_ give the lock up?
    472           original_owner_thread_id = owner_->GetThreadId();
    473           monitor_contenders_.Wait(self);  // Still contended so wait.
    474         }
    475       }
    476       if (original_owner_thread_id != 0u) {
    477         // Woken from contention.
    478         if (log_contention) {
    479           uint64_t wait_ms = MilliTime() - wait_start_ms;
    480           uint32_t sample_percent;
    481           if (wait_ms >= lock_profiling_threshold_) {
    482             sample_percent = 100;
    483           } else {
    484             sample_percent = 100 * wait_ms / lock_profiling_threshold_;
    485           }
    486           if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
    487             // Reacquire mutator_lock_ for logging.
    488             ScopedObjectAccess soa(self);
    489 
    490             bool owner_alive = false;
    491             pid_t original_owner_tid = 0;
    492             std::string original_owner_name;
    493 
    494             const bool should_dump_stacks = stack_dump_lock_profiling_threshold_ > 0 &&
    495                 wait_ms > stack_dump_lock_profiling_threshold_;
    496             std::string owner_stack_dump;
    497 
    498             // Acquire thread-list lock to find thread and keep it from dying until we've got all
    499             // the info we need.
    500             {
    501               Locks::thread_list_lock_->ExclusiveLock(Thread::Current());
    502 
    503               // Re-find the owner in case the thread got killed.
    504               Thread* original_owner = Runtime::Current()->GetThreadList()->FindThreadByThreadId(
    505                   original_owner_thread_id);
    506 
    507               if (original_owner != nullptr) {
    508                 owner_alive = true;
    509                 original_owner_tid = original_owner->GetTid();
    510                 original_owner->GetThreadName(original_owner_name);
    511 
    512                 if (should_dump_stacks) {
    513                   // Very long contention. Dump stacks.
    514                   struct CollectStackTrace : public Closure {
    515                     void Run(art::Thread* thread) OVERRIDE
    516                         REQUIRES_SHARED(art::Locks::mutator_lock_) {
    517                       thread->DumpJavaStack(oss);
    518                     }
    519 
    520                     std::ostringstream oss;
    521                   };
    522                   CollectStackTrace owner_trace;
    523                   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its
    524                   // execution.
    525                   original_owner->RequestSynchronousCheckpoint(&owner_trace);
    526                   owner_stack_dump = owner_trace.oss.str();
    527                 } else {
    528                   Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
    529                 }
    530               } else {
    531                 Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
    532               }
    533               // This is all the data we need. Now drop the thread-list lock, it's OK for the
    534               // owner to go away now.
    535             }
    536 
    537             // If we found the owner (and thus have owner data), go and log now.
    538             if (owner_alive) {
    539               // Give the detailed traces for really long contention.
    540               if (should_dump_stacks) {
    541                 // This must be here (and not above) because we cannot hold the thread-list lock
    542                 // while running the checkpoint.
    543                 std::ostringstream self_trace_oss;
    544                 self->DumpJavaStack(self_trace_oss);
    545 
    546                 uint32_t pc;
    547                 ArtMethod* m = self->GetCurrentMethod(&pc);
    548 
    549                 LOG(WARNING) << "Long "
    550                     << PrettyContentionInfo(original_owner_name,
    551                                             original_owner_tid,
    552                                             owners_method,
    553                                             owners_dex_pc,
    554                                             num_waiters)
    555                     << " in " << ArtMethod::PrettyMethod(m) << " for "
    556                     << PrettyDuration(MsToNs(wait_ms)) << "\n"
    557                     << "Current owner stack:\n" << owner_stack_dump
    558                     << "Contender stack:\n" << self_trace_oss.str();
    559               } else if (wait_ms > kLongWaitMs && owners_method != nullptr) {
    560                 uint32_t pc;
    561                 ArtMethod* m = self->GetCurrentMethod(&pc);
    562                 // TODO: We should maybe check that original_owner is still a live thread.
    563                 LOG(WARNING) << "Long "
    564                     << PrettyContentionInfo(original_owner_name,
    565                                             original_owner_tid,
    566                                             owners_method,
    567                                             owners_dex_pc,
    568                                             num_waiters)
    569                     << " in " << ArtMethod::PrettyMethod(m) << " for "
    570                     << PrettyDuration(MsToNs(wait_ms));
    571               }
    572               LogContentionEvent(self,
    573                                 wait_ms,
    574                                 sample_percent,
    575                                 owners_method,
    576                                 owners_dex_pc);
    577             }
    578           }
    579         }
    580       }
    581     }
    582     if (started_trace) {
    583       ATRACE_END();
    584     }
    585     self->SetMonitorEnterObject(nullptr);
    586     monitor_lock_.Lock(self);  // Reacquire locks in order.
    587     --num_waiters_;
    588   }
    589   monitor_lock_.Unlock(self);
    590   // We need to pair this with a single contended locking call. NB we match the RI behavior and call
    591   // this even if MonitorEnter failed.
    592   if (called_monitors_callback) {
    593     CHECK(reason == LockReason::kForLock);
    594     Runtime::Current()->GetRuntimeCallbacks()->MonitorContendedLocked(this);
    595   }
    596 }
    597 
    598 template void Monitor::Lock<LockReason::kForLock>(Thread* self);
    599 template void Monitor::Lock<LockReason::kForWait>(Thread* self);
    600 
    601 static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
    602                                               __attribute__((format(printf, 1, 2)));
    603 
    604 static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
    605     REQUIRES_SHARED(Locks::mutator_lock_) {
    606   va_list args;
    607   va_start(args, fmt);
    608   Thread* self = Thread::Current();
    609   self->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
    610   if (!Runtime::Current()->IsStarted() || VLOG_IS_ON(monitor)) {
    611     std::ostringstream ss;
    612     self->Dump(ss);
    613     LOG(Runtime::Current()->IsStarted() ? ::android::base::INFO : ::android::base::ERROR)
    614         << self->GetException()->Dump() << "\n" << ss.str();
    615   }
    616   va_end(args);
    617 }
    618 
    619 static std::string ThreadToString(Thread* thread) {
    620   if (thread == nullptr) {
    621     return "nullptr";
    622   }
    623   std::ostringstream oss;
    624   // TODO: alternatively, we could just return the thread's name.
    625   oss << *thread;
    626   return oss.str();
    627 }
    628 
    629 void Monitor::FailedUnlock(mirror::Object* o,
    630                            uint32_t expected_owner_thread_id,
    631                            uint32_t found_owner_thread_id,
    632                            Monitor* monitor) {
    633   // Acquire thread list lock so threads won't disappear from under us.
    634   std::string current_owner_string;
    635   std::string expected_owner_string;
    636   std::string found_owner_string;
    637   uint32_t current_owner_thread_id = 0u;
    638   {
    639     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
    640     ThreadList* const thread_list = Runtime::Current()->GetThreadList();
    641     Thread* expected_owner = thread_list->FindThreadByThreadId(expected_owner_thread_id);
    642     Thread* found_owner = thread_list->FindThreadByThreadId(found_owner_thread_id);
    643 
    644     // Re-read owner now that we hold lock.
    645     Thread* current_owner = (monitor != nullptr) ? monitor->GetOwner() : nullptr;
    646     if (current_owner != nullptr) {
    647       current_owner_thread_id = current_owner->GetThreadId();
    648     }
    649     // Get short descriptions of the threads involved.
    650     current_owner_string = ThreadToString(current_owner);
    651     expected_owner_string = expected_owner != nullptr ? ThreadToString(expected_owner) : "unnamed";
    652     found_owner_string = found_owner != nullptr ? ThreadToString(found_owner) : "unnamed";
    653   }
    654 
    655   if (current_owner_thread_id == 0u) {
    656     if (found_owner_thread_id == 0u) {
    657       ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
    658                                          " on thread '%s'",
    659                                          mirror::Object::PrettyTypeOf(o).c_str(),
    660                                          expected_owner_string.c_str());
    661     } else {
    662       // Race: the original read found an owner but now there is none
    663       ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
    664                                          " (where now the monitor appears unowned) on thread '%s'",
    665                                          found_owner_string.c_str(),
    666                                          mirror::Object::PrettyTypeOf(o).c_str(),
    667                                          expected_owner_string.c_str());
    668     }
    669   } else {
    670     if (found_owner_thread_id == 0u) {
    671       // Race: originally there was no owner, there is now
    672       ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
    673                                          " (originally believed to be unowned) on thread '%s'",
    674                                          current_owner_string.c_str(),
    675                                          mirror::Object::PrettyTypeOf(o).c_str(),
    676                                          expected_owner_string.c_str());
    677     } else {
    678       if (found_owner_thread_id != current_owner_thread_id) {
    679         // Race: originally found and current owner have changed
    680         ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now"
    681                                            " owned by '%s') on object of type '%s' on thread '%s'",
    682                                            found_owner_string.c_str(),
    683                                            current_owner_string.c_str(),
    684                                            mirror::Object::PrettyTypeOf(o).c_str(),
    685                                            expected_owner_string.c_str());
    686       } else {
    687         ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
    688                                            " on thread '%s",
    689                                            current_owner_string.c_str(),
    690                                            mirror::Object::PrettyTypeOf(o).c_str(),
    691                                            expected_owner_string.c_str());
    692       }
    693     }
    694   }
    695 }
    696 
    697 bool Monitor::Unlock(Thread* self) {
    698   DCHECK(self != nullptr);
    699   uint32_t owner_thread_id = 0u;
    700   {
    701     MutexLock mu(self, monitor_lock_);
    702     Thread* owner = owner_;
    703     if (owner != nullptr) {
    704       owner_thread_id = owner->GetThreadId();
    705     }
    706     if (owner == self) {
    707       // We own the monitor, so nobody else can be in here.
    708       AtraceMonitorUnlock();
    709       if (lock_count_ == 0) {
    710         owner_ = nullptr;
    711         locking_method_ = nullptr;
    712         locking_dex_pc_ = 0;
    713         // Wake a contender.
    714         monitor_contenders_.Signal(self);
    715       } else {
    716         --lock_count_;
    717       }
    718       return true;
    719     }
    720   }
    721   // We don't own this, so we're not allowed to unlock it.
    722   // The JNI spec says that we should throw IllegalMonitorStateException in this case.
    723   FailedUnlock(GetObject(), self->GetThreadId(), owner_thread_id, this);
    724   return false;
    725 }
    726 
    727 void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
    728                    bool interruptShouldThrow, ThreadState why) {
    729   DCHECK(self != nullptr);
    730   DCHECK(why == kTimedWaiting || why == kWaiting || why == kSleeping);
    731 
    732   monitor_lock_.Lock(self);
    733 
    734   // Make sure that we hold the lock.
    735   if (owner_ != self) {
    736     monitor_lock_.Unlock(self);
    737     ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
    738     return;
    739   }
    740 
    741   // We need to turn a zero-length timed wait into a regular wait because
    742   // Object.wait(0, 0) is defined as Object.wait(0), which is defined as Object.wait().
    743   if (why == kTimedWaiting && (ms == 0 && ns == 0)) {
    744     why = kWaiting;
    745   }
    746 
    747   // Enforce the timeout range.
    748   if (ms < 0 || ns < 0 || ns > 999999) {
    749     monitor_lock_.Unlock(self);
    750     self->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
    751                              "timeout arguments out of range: ms=%" PRId64 " ns=%d", ms, ns);
    752     return;
    753   }
    754 
    755   /*
    756    * Add ourselves to the set of threads waiting on this monitor, and
    757    * release our hold.  We need to let it go even if we're a few levels
    758    * deep in a recursive lock, and we need to restore that later.
    759    *
    760    * We append to the wait set ahead of clearing the count and owner
    761    * fields so the subroutine can check that the calling thread owns
    762    * the monitor.  Aside from that, the order of member updates is
    763    * not order sensitive as we hold the pthread mutex.
    764    */
    765   AppendToWaitSet(self);
    766   ++num_waiters_;
    767   int prev_lock_count = lock_count_;
    768   lock_count_ = 0;
    769   owner_ = nullptr;
    770   ArtMethod* saved_method = locking_method_;
    771   locking_method_ = nullptr;
    772   uintptr_t saved_dex_pc = locking_dex_pc_;
    773   locking_dex_pc_ = 0;
    774 
    775   AtraceMonitorUnlock();  // For the implict Unlock() just above. This will only end the deepest
    776                           // nesting, but that is enough for the visualization, and corresponds to
    777                           // the single Lock() we do afterwards.
    778   AtraceMonitorLock(self, GetObject(), true /* is_wait */);
    779 
    780   bool was_interrupted = false;
    781   bool timed_out = false;
    782   {
    783     // Update thread state. If the GC wakes up, it'll ignore us, knowing
    784     // that we won't touch any references in this state, and we'll check
    785     // our suspend mode before we transition out.
    786     ScopedThreadSuspension sts(self, why);
    787 
    788     // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
    789     MutexLock mu(self, *self->GetWaitMutex());
    790 
    791     // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
    792     // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
    793     // up.
    794     DCHECK(self->GetWaitMonitor() == nullptr);
    795     self->SetWaitMonitor(this);
    796 
    797     // Release the monitor lock.
    798     monitor_contenders_.Signal(self);
    799     monitor_lock_.Unlock(self);
    800 
    801     // Handle the case where the thread was interrupted before we called wait().
    802     if (self->IsInterrupted()) {
    803       was_interrupted = true;
    804     } else {
    805       // Wait for a notification or a timeout to occur.
    806       if (why == kWaiting) {
    807         self->GetWaitConditionVariable()->Wait(self);
    808       } else {
    809         DCHECK(why == kTimedWaiting || why == kSleeping) << why;
    810         timed_out = self->GetWaitConditionVariable()->TimedWait(self, ms, ns);
    811       }
    812       was_interrupted = self->IsInterrupted();
    813     }
    814   }
    815 
    816   {
    817     // We reset the thread's wait_monitor_ field after transitioning back to runnable so
    818     // that a thread in a waiting/sleeping state has a non-null wait_monitor_ for debugging
    819     // and diagnostic purposes. (If you reset this earlier, stack dumps will claim that threads
    820     // are waiting on "null".)
    821     MutexLock mu(self, *self->GetWaitMutex());
    822     DCHECK(self->GetWaitMonitor() != nullptr);
    823     self->SetWaitMonitor(nullptr);
    824   }
    825 
    826   // Allocate the interrupted exception not holding the monitor lock since it may cause a GC.
    827   // If the GC requires acquiring the monitor for enqueuing cleared references, this would
    828   // cause a deadlock if the monitor is held.
    829   if (was_interrupted && interruptShouldThrow) {
    830     /*
    831      * We were interrupted while waiting, or somebody interrupted an
    832      * un-interruptible thread earlier and we're bailing out immediately.
    833      *
    834      * The doc sayeth: "The interrupted status of the current thread is
    835      * cleared when this exception is thrown."
    836      */
    837     self->SetInterrupted(false);
    838     self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
    839   }
    840 
    841   AtraceMonitorUnlock();  // End Wait().
    842 
    843   // We just slept, tell the runtime callbacks about this.
    844   Runtime::Current()->GetRuntimeCallbacks()->MonitorWaitFinished(this, timed_out);
    845 
    846   // Re-acquire the monitor and lock.
    847   Lock<LockReason::kForWait>(self);
    848   monitor_lock_.Lock(self);
    849   self->GetWaitMutex()->AssertNotHeld(self);
    850 
    851   /*
    852    * We remove our thread from wait set after restoring the count
    853    * and owner fields so the subroutine can check that the calling
    854    * thread owns the monitor. Aside from that, the order of member
    855    * updates is not order sensitive as we hold the pthread mutex.
    856    */
    857   owner_ = self;
    858   lock_count_ = prev_lock_count;
    859   locking_method_ = saved_method;
    860   locking_dex_pc_ = saved_dex_pc;
    861   --num_waiters_;
    862   RemoveFromWaitSet(self);
    863 
    864   monitor_lock_.Unlock(self);
    865 }
    866 
    867 void Monitor::Notify(Thread* self) {
    868   DCHECK(self != nullptr);
    869   MutexLock mu(self, monitor_lock_);
    870   // Make sure that we hold the lock.
    871   if (owner_ != self) {
    872     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
    873     return;
    874   }
    875   // Signal the first waiting thread in the wait set.
    876   while (wait_set_ != nullptr) {
    877     Thread* thread = wait_set_;
    878     wait_set_ = thread->GetWaitNext();
    879     thread->SetWaitNext(nullptr);
    880 
    881     // Check to see if the thread is still waiting.
    882     MutexLock wait_mu(self, *thread->GetWaitMutex());
    883     if (thread->GetWaitMonitor() != nullptr) {
    884       thread->GetWaitConditionVariable()->Signal(self);
    885       return;
    886     }
    887   }
    888 }
    889 
    890 void Monitor::NotifyAll(Thread* self) {
    891   DCHECK(self != nullptr);
    892   MutexLock mu(self, monitor_lock_);
    893   // Make sure that we hold the lock.
    894   if (owner_ != self) {
    895     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
    896     return;
    897   }
    898   // Signal all threads in the wait set.
    899   while (wait_set_ != nullptr) {
    900     Thread* thread = wait_set_;
    901     wait_set_ = thread->GetWaitNext();
    902     thread->SetWaitNext(nullptr);
    903     thread->Notify();
    904   }
    905 }
    906 
    907 bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
    908   DCHECK(obj != nullptr);
    909   // Don't need volatile since we only deflate with mutators suspended.
    910   LockWord lw(obj->GetLockWord(false));
    911   // If the lock isn't an inflated monitor, then we don't need to deflate anything.
    912   if (lw.GetState() == LockWord::kFatLocked) {
    913     Monitor* monitor = lw.FatLockMonitor();
    914     DCHECK(monitor != nullptr);
    915     MutexLock mu(self, monitor->monitor_lock_);
    916     // Can't deflate if we have anybody waiting on the CV.
    917     if (monitor->num_waiters_ > 0) {
    918       return false;
    919     }
    920     Thread* owner = monitor->owner_;
    921     if (owner != nullptr) {
    922       // Can't deflate if we are locked and have a hash code.
    923       if (monitor->HasHashCode()) {
    924         return false;
    925       }
    926       // Can't deflate if our lock count is too high.
    927       if (static_cast<uint32_t>(monitor->lock_count_) > LockWord::kThinLockMaxCount) {
    928         return false;
    929       }
    930       // Deflate to a thin lock.
    931       LockWord new_lw = LockWord::FromThinLockId(owner->GetThreadId(),
    932                                                  monitor->lock_count_,
    933                                                  lw.GCState());
    934       // Assume no concurrent read barrier state changes as mutators are suspended.
    935       obj->SetLockWord(new_lw, false);
    936       VLOG(monitor) << "Deflated " << obj << " to thin lock " << owner->GetTid() << " / "
    937           << monitor->lock_count_;
    938     } else if (monitor->HasHashCode()) {
    939       LockWord new_lw = LockWord::FromHashCode(monitor->GetHashCode(), lw.GCState());
    940       // Assume no concurrent read barrier state changes as mutators are suspended.
    941       obj->SetLockWord(new_lw, false);
    942       VLOG(monitor) << "Deflated " << obj << " to hash monitor " << monitor->GetHashCode();
    943     } else {
    944       // No lock and no hash, just put an empty lock word inside the object.
    945       LockWord new_lw = LockWord::FromDefault(lw.GCState());
    946       // Assume no concurrent read barrier state changes as mutators are suspended.
    947       obj->SetLockWord(new_lw, false);
    948       VLOG(monitor) << "Deflated" << obj << " to empty lock word";
    949     }
    950     // The monitor is deflated, mark the object as null so that we know to delete it during the
    951     // next GC.
    952     monitor->obj_ = GcRoot<mirror::Object>(nullptr);
    953   }
    954   return true;
    955 }
    956 
    957 void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) {
    958   DCHECK(self != nullptr);
    959   DCHECK(obj != nullptr);
    960   // Allocate and acquire a new monitor.
    961   Monitor* m = MonitorPool::CreateMonitor(self, owner, obj, hash_code);
    962   DCHECK(m != nullptr);
    963   if (m->Install(self)) {
    964     if (owner != nullptr) {
    965       VLOG(monitor) << "monitor: thread" << owner->GetThreadId()
    966           << " created monitor " << m << " for object " << obj;
    967     } else {
    968       VLOG(monitor) << "monitor: Inflate with hashcode " << hash_code
    969           << " created monitor " << m << " for object " << obj;
    970     }
    971     Runtime::Current()->GetMonitorList()->Add(m);
    972     CHECK_EQ(obj->GetLockWord(true).GetState(), LockWord::kFatLocked);
    973   } else {
    974     MonitorPool::ReleaseMonitor(self, m);
    975   }
    976 }
    977 
    978 void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
    979                                 uint32_t hash_code) {
    980   DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
    981   uint32_t owner_thread_id = lock_word.ThinLockOwner();
    982   if (owner_thread_id == self->GetThreadId()) {
    983     // We own the monitor, we can easily inflate it.
    984     Inflate(self, self, obj.Get(), hash_code);
    985   } else {
    986     ThreadList* thread_list = Runtime::Current()->GetThreadList();
    987     // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
    988     self->SetMonitorEnterObject(obj.Get());
    989     bool timed_out;
    990     Thread* owner;
    991     {
    992       ScopedThreadSuspension sts(self, kWaitingForLockInflation);
    993       owner = thread_list->SuspendThreadByThreadId(owner_thread_id,
    994                                                    SuspendReason::kInternal,
    995                                                    &timed_out);
    996     }
    997     if (owner != nullptr) {
    998       // We succeeded in suspending the thread, check the lock's status didn't change.
    999       lock_word = obj->GetLockWord(true);
   1000       if (lock_word.GetState() == LockWord::kThinLocked &&
   1001           lock_word.ThinLockOwner() == owner_thread_id) {
   1002         // Go ahead and inflate the lock.
   1003         Inflate(self, owner, obj.Get(), hash_code);
   1004       }
   1005       bool resumed = thread_list->Resume(owner, SuspendReason::kInternal);
   1006       DCHECK(resumed);
   1007     }
   1008     self->SetMonitorEnterObject(nullptr);
   1009   }
   1010 }
   1011 
   1012 // Fool annotalysis into thinking that the lock on obj is acquired.
   1013 static mirror::Object* FakeLock(mirror::Object* obj)
   1014     EXCLUSIVE_LOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS {
   1015   return obj;
   1016 }
   1017 
   1018 // Fool annotalysis into thinking that the lock on obj is release.
   1019 static mirror::Object* FakeUnlock(mirror::Object* obj)
   1020     UNLOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS {
   1021   return obj;
   1022 }
   1023 
   1024 mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool trylock) {
   1025   DCHECK(self != nullptr);
   1026   DCHECK(obj != nullptr);
   1027   self->AssertThreadSuspensionIsAllowable();
   1028   obj = FakeLock(obj);
   1029   uint32_t thread_id = self->GetThreadId();
   1030   size_t contention_count = 0;
   1031   StackHandleScope<1> hs(self);
   1032   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
   1033   while (true) {
   1034     // We initially read the lockword with ordinary Java/relaxed semantics. When stronger
   1035     // semantics are needed, we address it below. Since GetLockWord bottoms out to a relaxed load,
   1036     // we can fix it later, in an infrequently executed case, with a fence.
   1037     LockWord lock_word = h_obj->GetLockWord(false);
   1038     switch (lock_word.GetState()) {
   1039       case LockWord::kUnlocked: {
   1040         // No ordering required for preceding lockword read, since we retest.
   1041         LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
   1042         if (h_obj->CasLockWordWeakAcquire(lock_word, thin_locked)) {
   1043           AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
   1044           return h_obj.Get();  // Success!
   1045         }
   1046         continue;  // Go again.
   1047       }
   1048       case LockWord::kThinLocked: {
   1049         uint32_t owner_thread_id = lock_word.ThinLockOwner();
   1050         if (owner_thread_id == thread_id) {
   1051           // No ordering required for initial lockword read.
   1052           // We own the lock, increase the recursion count.
   1053           uint32_t new_count = lock_word.ThinLockCount() + 1;
   1054           if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
   1055             LockWord thin_locked(LockWord::FromThinLockId(thread_id,
   1056                                                           new_count,
   1057                                                           lock_word.GCState()));
   1058             // Only this thread pays attention to the count. Thus there is no need for stronger
   1059             // than relaxed memory ordering.
   1060             if (!kUseReadBarrier) {
   1061               h_obj->SetLockWord(thin_locked, false /* volatile */);
   1062               AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
   1063               return h_obj.Get();  // Success!
   1064             } else {
   1065               // Use CAS to preserve the read barrier state.
   1066               if (h_obj->CasLockWordWeakRelaxed(lock_word, thin_locked)) {
   1067                 AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
   1068                 return h_obj.Get();  // Success!
   1069               }
   1070             }
   1071             continue;  // Go again.
   1072           } else {
   1073             // We'd overflow the recursion count, so inflate the monitor.
   1074             InflateThinLocked(self, h_obj, lock_word, 0);
   1075           }
   1076         } else {
   1077           if (trylock) {
   1078             return nullptr;
   1079           }
   1080           // Contention.
   1081           contention_count++;
   1082           Runtime* runtime = Runtime::Current();
   1083           if (contention_count <= runtime->GetMaxSpinsBeforeThinLockInflation()) {
   1084             // TODO: Consider switching the thread state to kWaitingForLockInflation when we are
   1085             // yielding.  Use sched_yield instead of NanoSleep since NanoSleep can wait much longer
   1086             // than the parameter you pass in. This can cause thread suspension to take excessively
   1087             // long and make long pauses. See b/16307460.
   1088             // TODO: We should literally spin first, without sched_yield. Sched_yield either does
   1089             // nothing (at significant expense), or guarantees that we wait at least microseconds.
   1090             // If the owner is running, I would expect the median lock hold time to be hundreds
   1091             // of nanoseconds or less.
   1092             sched_yield();
   1093           } else {
   1094             contention_count = 0;
   1095             // No ordering required for initial lockword read. Install rereads it anyway.
   1096             InflateThinLocked(self, h_obj, lock_word, 0);
   1097           }
   1098         }
   1099         continue;  // Start from the beginning.
   1100       }
   1101       case LockWord::kFatLocked: {
   1102         // We should have done an acquire read of the lockword initially, to ensure
   1103         // visibility of the monitor data structure. Use an explicit fence instead.
   1104         QuasiAtomic::ThreadFenceAcquire();
   1105         Monitor* mon = lock_word.FatLockMonitor();
   1106         if (trylock) {
   1107           return mon->TryLock(self) ? h_obj.Get() : nullptr;
   1108         } else {
   1109           mon->Lock(self);
   1110           return h_obj.Get();  // Success!
   1111         }
   1112       }
   1113       case LockWord::kHashCode:
   1114         // Inflate with the existing hashcode.
   1115         // Again no ordering required for initial lockword read, since we don't rely
   1116         // on the visibility of any prior computation.
   1117         Inflate(self, nullptr, h_obj.Get(), lock_word.GetHashCode());
   1118         continue;  // Start from the beginning.
   1119       default: {
   1120         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
   1121         UNREACHABLE();
   1122       }
   1123     }
   1124   }
   1125 }
   1126 
   1127 bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
   1128   DCHECK(self != nullptr);
   1129   DCHECK(obj != nullptr);
   1130   self->AssertThreadSuspensionIsAllowable();
   1131   obj = FakeUnlock(obj);
   1132   StackHandleScope<1> hs(self);
   1133   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
   1134   while (true) {
   1135     LockWord lock_word = obj->GetLockWord(true);
   1136     switch (lock_word.GetState()) {
   1137       case LockWord::kHashCode:
   1138         // Fall-through.
   1139       case LockWord::kUnlocked:
   1140         FailedUnlock(h_obj.Get(), self->GetThreadId(), 0u, nullptr);
   1141         return false;  // Failure.
   1142       case LockWord::kThinLocked: {
   1143         uint32_t thread_id = self->GetThreadId();
   1144         uint32_t owner_thread_id = lock_word.ThinLockOwner();
   1145         if (owner_thread_id != thread_id) {
   1146           FailedUnlock(h_obj.Get(), thread_id, owner_thread_id, nullptr);
   1147           return false;  // Failure.
   1148         } else {
   1149           // We own the lock, decrease the recursion count.
   1150           LockWord new_lw = LockWord::Default();
   1151           if (lock_word.ThinLockCount() != 0) {
   1152             uint32_t new_count = lock_word.ThinLockCount() - 1;
   1153             new_lw = LockWord::FromThinLockId(thread_id, new_count, lock_word.GCState());
   1154           } else {
   1155             new_lw = LockWord::FromDefault(lock_word.GCState());
   1156           }
   1157           if (!kUseReadBarrier) {
   1158             DCHECK_EQ(new_lw.ReadBarrierState(), 0U);
   1159             // TODO: This really only needs memory_order_release, but we currently have
   1160             // no way to specify that. In fact there seem to be no legitimate uses of SetLockWord
   1161             // with a final argument of true. This slows down x86 and ARMv7, but probably not v8.
   1162             h_obj->SetLockWord(new_lw, true);
   1163             AtraceMonitorUnlock();
   1164             // Success!
   1165             return true;
   1166           } else {
   1167             // Use CAS to preserve the read barrier state.
   1168             if (h_obj->CasLockWordWeakRelease(lock_word, new_lw)) {
   1169               AtraceMonitorUnlock();
   1170               // Success!
   1171               return true;
   1172             }
   1173           }
   1174           continue;  // Go again.
   1175         }
   1176       }
   1177       case LockWord::kFatLocked: {
   1178         Monitor* mon = lock_word.FatLockMonitor();
   1179         return mon->Unlock(self);
   1180       }
   1181       default: {
   1182         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
   1183         return false;
   1184       }
   1185     }
   1186   }
   1187 }
   1188 
   1189 void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
   1190                    bool interruptShouldThrow, ThreadState why) {
   1191   DCHECK(self != nullptr);
   1192   DCHECK(obj != nullptr);
   1193   StackHandleScope<1> hs(self);
   1194   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
   1195 
   1196   Runtime::Current()->GetRuntimeCallbacks()->ObjectWaitStart(h_obj, ms);
   1197   if (UNLIKELY(self->ObserveAsyncException() || self->IsExceptionPending())) {
   1198     // See b/65558434 for information on handling of exceptions here.
   1199     return;
   1200   }
   1201 
   1202   LockWord lock_word = h_obj->GetLockWord(true);
   1203   while (lock_word.GetState() != LockWord::kFatLocked) {
   1204     switch (lock_word.GetState()) {
   1205       case LockWord::kHashCode:
   1206         // Fall-through.
   1207       case LockWord::kUnlocked:
   1208         ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
   1209         return;  // Failure.
   1210       case LockWord::kThinLocked: {
   1211         uint32_t thread_id = self->GetThreadId();
   1212         uint32_t owner_thread_id = lock_word.ThinLockOwner();
   1213         if (owner_thread_id != thread_id) {
   1214           ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
   1215           return;  // Failure.
   1216         } else {
   1217           // We own the lock, inflate to enqueue ourself on the Monitor. May fail spuriously so
   1218           // re-load.
   1219           Inflate(self, self, h_obj.Get(), 0);
   1220           lock_word = h_obj->GetLockWord(true);
   1221         }
   1222         break;
   1223       }
   1224       case LockWord::kFatLocked:  // Unreachable given the loop condition above. Fall-through.
   1225       default: {
   1226         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
   1227         return;
   1228       }
   1229     }
   1230   }
   1231   Monitor* mon = lock_word.FatLockMonitor();
   1232   mon->Wait(self, ms, ns, interruptShouldThrow, why);
   1233 }
   1234 
   1235 void Monitor::DoNotify(Thread* self, mirror::Object* obj, bool notify_all) {
   1236   DCHECK(self != nullptr);
   1237   DCHECK(obj != nullptr);
   1238   LockWord lock_word = obj->GetLockWord(true);
   1239   switch (lock_word.GetState()) {
   1240     case LockWord::kHashCode:
   1241       // Fall-through.
   1242     case LockWord::kUnlocked:
   1243       ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
   1244       return;  // Failure.
   1245     case LockWord::kThinLocked: {
   1246       uint32_t thread_id = self->GetThreadId();
   1247       uint32_t owner_thread_id = lock_word.ThinLockOwner();
   1248       if (owner_thread_id != thread_id) {
   1249         ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
   1250         return;  // Failure.
   1251       } else {
   1252         // We own the lock but there's no Monitor and therefore no waiters.
   1253         return;  // Success.
   1254       }
   1255     }
   1256     case LockWord::kFatLocked: {
   1257       Monitor* mon = lock_word.FatLockMonitor();
   1258       if (notify_all) {
   1259         mon->NotifyAll(self);
   1260       } else {
   1261         mon->Notify(self);
   1262       }
   1263       return;  // Success.
   1264     }
   1265     default: {
   1266       LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
   1267       return;
   1268     }
   1269   }
   1270 }
   1271 
   1272 uint32_t Monitor::GetLockOwnerThreadId(mirror::Object* obj) {
   1273   DCHECK(obj != nullptr);
   1274   LockWord lock_word = obj->GetLockWord(true);
   1275   switch (lock_word.GetState()) {
   1276     case LockWord::kHashCode:
   1277       // Fall-through.
   1278     case LockWord::kUnlocked:
   1279       return ThreadList::kInvalidThreadId;
   1280     case LockWord::kThinLocked:
   1281       return lock_word.ThinLockOwner();
   1282     case LockWord::kFatLocked: {
   1283       Monitor* mon = lock_word.FatLockMonitor();
   1284       return mon->GetOwnerThreadId();
   1285     }
   1286     default: {
   1287       LOG(FATAL) << "Unreachable";
   1288       UNREACHABLE();
   1289     }
   1290   }
   1291 }
   1292 
   1293 ThreadState Monitor::FetchState(const Thread* thread,
   1294                                 /* out */ mirror::Object** monitor_object,
   1295                                 /* out */ uint32_t* lock_owner_tid) {
   1296   DCHECK(monitor_object != nullptr);
   1297   DCHECK(lock_owner_tid != nullptr);
   1298 
   1299   *monitor_object = nullptr;
   1300   *lock_owner_tid = ThreadList::kInvalidThreadId;
   1301 
   1302   ThreadState state = thread->GetState();
   1303 
   1304   switch (state) {
   1305     case kWaiting:
   1306     case kTimedWaiting:
   1307     case kSleeping:
   1308     {
   1309       Thread* self = Thread::Current();
   1310       MutexLock mu(self, *thread->GetWaitMutex());
   1311       Monitor* monitor = thread->GetWaitMonitor();
   1312       if (monitor != nullptr) {
   1313         *monitor_object = monitor->GetObject();
   1314       }
   1315     }
   1316     break;
   1317 
   1318     case kBlocked:
   1319     case kWaitingForLockInflation:
   1320     {
   1321       mirror::Object* lock_object = thread->GetMonitorEnterObject();
   1322       if (lock_object != nullptr) {
   1323         if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
   1324           // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
   1325           // may have not been flipped yet and "pretty_object" may be a from-space (stale) ref, in
   1326           // which case the GetLockOwnerThreadId() call below will crash. So explicitly mark/forward
   1327           // it here.
   1328           lock_object = ReadBarrier::Mark(lock_object);
   1329         }
   1330         *monitor_object = lock_object;
   1331         *lock_owner_tid = lock_object->GetLockOwnerThreadId();
   1332       }
   1333     }
   1334     break;
   1335 
   1336     default:
   1337       break;
   1338   }
   1339 
   1340   return state;
   1341 }
   1342 
   1343 mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
   1344   // This is used to implement JDWP's ThreadReference.CurrentContendedMonitor, and has a bizarre
   1345   // definition of contended that includes a monitor a thread is trying to enter...
   1346   mirror::Object* result = thread->GetMonitorEnterObject();
   1347   if (result == nullptr) {
   1348     // ...but also a monitor that the thread is waiting on.
   1349     MutexLock mu(Thread::Current(), *thread->GetWaitMutex());
   1350     Monitor* monitor = thread->GetWaitMonitor();
   1351     if (monitor != nullptr) {
   1352       result = monitor->GetObject();
   1353     }
   1354   }
   1355   return result;
   1356 }
   1357 
   1358 void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
   1359                          void* callback_context, bool abort_on_failure) {
   1360   ArtMethod* m = stack_visitor->GetMethod();
   1361   CHECK(m != nullptr);
   1362 
   1363   // Native methods are an easy special case.
   1364   // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
   1365   if (m->IsNative()) {
   1366     if (m->IsSynchronized()) {
   1367       mirror::Object* jni_this =
   1368           stack_visitor->GetCurrentHandleScope(sizeof(void*))->GetReference(0);
   1369       callback(jni_this, callback_context);
   1370     }
   1371     return;
   1372   }
   1373 
   1374   // Proxy methods should not be synchronized.
   1375   if (m->IsProxyMethod()) {
   1376     CHECK(!m->IsSynchronized());
   1377     return;
   1378   }
   1379 
   1380   // Is there any reason to believe there's any synchronization in this method?
   1381   CHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
   1382   CodeItemDataAccessor accessor(m->DexInstructionData());
   1383   if (accessor.TriesSize() == 0) {
   1384     return;  // No "tries" implies no synchronization, so no held locks to report.
   1385   }
   1386 
   1387   // Get the dex pc. If abort_on_failure is false, GetDexPc will not abort in the case it cannot
   1388   // find the dex pc, and instead return kDexNoIndex. Then bail out, as it indicates we have an
   1389   // inconsistent stack anyways.
   1390   uint32_t dex_pc = stack_visitor->GetDexPc(abort_on_failure);
   1391   if (!abort_on_failure && dex_pc == dex::kDexNoIndex) {
   1392     LOG(ERROR) << "Could not find dex_pc for " << m->PrettyMethod();
   1393     return;
   1394   }
   1395 
   1396   // Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
   1397   // the locks held in this stack frame.
   1398   std::vector<verifier::MethodVerifier::DexLockInfo> monitor_enter_dex_pcs;
   1399   verifier::MethodVerifier::FindLocksAtDexPc(m, dex_pc, &monitor_enter_dex_pcs);
   1400   for (verifier::MethodVerifier::DexLockInfo& dex_lock_info : monitor_enter_dex_pcs) {
   1401     // As a debug check, check that dex PC corresponds to a monitor-enter.
   1402     if (kIsDebugBuild) {
   1403       const Instruction& monitor_enter_instruction = accessor.InstructionAt(dex_lock_info.dex_pc);
   1404       CHECK_EQ(monitor_enter_instruction.Opcode(), Instruction::MONITOR_ENTER)
   1405           << "expected monitor-enter @" << dex_lock_info.dex_pc << "; was "
   1406           << reinterpret_cast<const void*>(&monitor_enter_instruction);
   1407     }
   1408 
   1409     // Iterate through the set of dex registers, as the compiler may not have held all of them
   1410     // live.
   1411     bool success = false;
   1412     for (uint32_t dex_reg : dex_lock_info.dex_registers) {
   1413       uint32_t value;
   1414       success = stack_visitor->GetVReg(m, dex_reg, kReferenceVReg, &value);
   1415       if (success) {
   1416         mirror::Object* o = reinterpret_cast<mirror::Object*>(value);
   1417         callback(o, callback_context);
   1418         break;
   1419       }
   1420     }
   1421     DCHECK(success) << "Failed to find/read reference for monitor-enter at dex pc "
   1422                     << dex_lock_info.dex_pc
   1423                     << " in method "
   1424                     << m->PrettyMethod();
   1425     if (!success) {
   1426       LOG(WARNING) << "Had a lock reported for dex pc " << dex_lock_info.dex_pc
   1427                    << " but was not able to fetch a corresponding object!";
   1428     }
   1429   }
   1430 }
   1431 
   1432 bool Monitor::IsValidLockWord(LockWord lock_word) {
   1433   switch (lock_word.GetState()) {
   1434     case LockWord::kUnlocked:
   1435       // Nothing to check.
   1436       return true;
   1437     case LockWord::kThinLocked:
   1438       // Basic sanity check of owner.
   1439       return lock_word.ThinLockOwner() != ThreadList::kInvalidThreadId;
   1440     case LockWord::kFatLocked: {
   1441       // Check the  monitor appears in the monitor list.
   1442       Monitor* mon = lock_word.FatLockMonitor();
   1443       MonitorList* list = Runtime::Current()->GetMonitorList();
   1444       MutexLock mu(Thread::Current(), list->monitor_list_lock_);
   1445       for (Monitor* list_mon : list->list_) {
   1446         if (mon == list_mon) {
   1447           return true;  // Found our monitor.
   1448         }
   1449       }
   1450       return false;  // Fail - unowned monitor in an object.
   1451     }
   1452     case LockWord::kHashCode:
   1453       return true;
   1454     default:
   1455       LOG(FATAL) << "Unreachable";
   1456       UNREACHABLE();
   1457   }
   1458 }
   1459 
   1460 bool Monitor::IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) {
   1461   MutexLock mu(Thread::Current(), monitor_lock_);
   1462   return owner_ != nullptr;
   1463 }
   1464 
   1465 void Monitor::TranslateLocation(ArtMethod* method,
   1466                                 uint32_t dex_pc,
   1467                                 const char** source_file,
   1468                                 int32_t* line_number) {
   1469   // If method is null, location is unknown
   1470   if (method == nullptr) {
   1471     *source_file = "";
   1472     *line_number = 0;
   1473     return;
   1474   }
   1475   *source_file = method->GetDeclaringClassSourceFile();
   1476   if (*source_file == nullptr) {
   1477     *source_file = "";
   1478   }
   1479   *line_number = method->GetLineNumFromDexPC(dex_pc);
   1480 }
   1481 
   1482 uint32_t Monitor::GetOwnerThreadId() {
   1483   MutexLock mu(Thread::Current(), monitor_lock_);
   1484   Thread* owner = owner_;
   1485   if (owner != nullptr) {
   1486     return owner->GetThreadId();
   1487   } else {
   1488     return ThreadList::kInvalidThreadId;
   1489   }
   1490 }
   1491 
   1492 MonitorList::MonitorList()
   1493     : allow_new_monitors_(true), monitor_list_lock_("MonitorList lock", kMonitorListLock),
   1494       monitor_add_condition_("MonitorList disallow condition", monitor_list_lock_) {
   1495 }
   1496 
   1497 MonitorList::~MonitorList() {
   1498   Thread* self = Thread::Current();
   1499   MutexLock mu(self, monitor_list_lock_);
   1500   // Release all monitors to the pool.
   1501   // TODO: Is it an invariant that *all* open monitors are in the list? Then we could
   1502   // clear faster in the pool.
   1503   MonitorPool::ReleaseMonitors(self, &list_);
   1504 }
   1505 
   1506 void MonitorList::DisallowNewMonitors() {
   1507   CHECK(!kUseReadBarrier);
   1508   MutexLock mu(Thread::Current(), monitor_list_lock_);
   1509   allow_new_monitors_ = false;
   1510 }
   1511 
   1512 void MonitorList::AllowNewMonitors() {
   1513   CHECK(!kUseReadBarrier);
   1514   Thread* self = Thread::Current();
   1515   MutexLock mu(self, monitor_list_lock_);
   1516   allow_new_monitors_ = true;
   1517   monitor_add_condition_.Broadcast(self);
   1518 }
   1519 
   1520 void MonitorList::BroadcastForNewMonitors() {
   1521   Thread* self = Thread::Current();
   1522   MutexLock mu(self, monitor_list_lock_);
   1523   monitor_add_condition_.Broadcast(self);
   1524 }
   1525 
   1526 void MonitorList::Add(Monitor* m) {
   1527   Thread* self = Thread::Current();
   1528   MutexLock mu(self, monitor_list_lock_);
   1529   // CMS needs this to block for concurrent reference processing because an object allocated during
   1530   // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
   1531   // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
   1532   while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
   1533     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
   1534     // presence of threads blocking for weak ref access.
   1535     self->CheckEmptyCheckpointFromWeakRefAccess(&monitor_list_lock_);
   1536     monitor_add_condition_.WaitHoldingLocks(self);
   1537   }
   1538   list_.push_front(m);
   1539 }
   1540 
   1541 void MonitorList::SweepMonitorList(IsMarkedVisitor* visitor) {
   1542   Thread* self = Thread::Current();
   1543   MutexLock mu(self, monitor_list_lock_);
   1544   for (auto it = list_.begin(); it != list_.end(); ) {
   1545     Monitor* m = *it;
   1546     // Disable the read barrier in GetObject() as this is called by GC.
   1547     mirror::Object* obj = m->GetObject<kWithoutReadBarrier>();
   1548     // The object of a monitor can be null if we have deflated it.
   1549     mirror::Object* new_obj = obj != nullptr ? visitor->IsMarked(obj) : nullptr;
   1550     if (new_obj == nullptr) {
   1551       VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
   1552                     << obj;
   1553       MonitorPool::ReleaseMonitor(self, m);
   1554       it = list_.erase(it);
   1555     } else {
   1556       m->SetObject(new_obj);
   1557       ++it;
   1558     }
   1559   }
   1560 }
   1561 
   1562 size_t MonitorList::Size() {
   1563   Thread* self = Thread::Current();
   1564   MutexLock mu(self, monitor_list_lock_);
   1565   return list_.size();
   1566 }
   1567 
   1568 class MonitorDeflateVisitor : public IsMarkedVisitor {
   1569  public:
   1570   MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
   1571 
   1572   virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
   1573       REQUIRES_SHARED(Locks::mutator_lock_) {
   1574     if (Monitor::Deflate(self_, object)) {
   1575       DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
   1576       ++deflate_count_;
   1577       // If we deflated, return null so that the monitor gets removed from the array.
   1578       return nullptr;
   1579     }
   1580     return object;  // Monitor was not deflated.
   1581   }
   1582 
   1583   Thread* const self_;
   1584   size_t deflate_count_;
   1585 };
   1586 
   1587 size_t MonitorList::DeflateMonitors() {
   1588   MonitorDeflateVisitor visitor;
   1589   Locks::mutator_lock_->AssertExclusiveHeld(visitor.self_);
   1590   SweepMonitorList(&visitor);
   1591   return visitor.deflate_count_;
   1592 }
   1593 
   1594 MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(nullptr), entry_count_(0) {
   1595   DCHECK(obj != nullptr);
   1596   LockWord lock_word = obj->GetLockWord(true);
   1597   switch (lock_word.GetState()) {
   1598     case LockWord::kUnlocked:
   1599       // Fall-through.
   1600     case LockWord::kForwardingAddress:
   1601       // Fall-through.
   1602     case LockWord::kHashCode:
   1603       break;
   1604     case LockWord::kThinLocked:
   1605       owner_ = Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
   1606       DCHECK(owner_ != nullptr) << "Thin-locked without owner!";
   1607       entry_count_ = 1 + lock_word.ThinLockCount();
   1608       // Thin locks have no waiters.
   1609       break;
   1610     case LockWord::kFatLocked: {
   1611       Monitor* mon = lock_word.FatLockMonitor();
   1612       owner_ = mon->owner_;
   1613       // Here it is okay for the owner to be null since we don't reset the LockWord back to
   1614       // kUnlocked until we get a GC. In cases where this hasn't happened yet we will have a fat
   1615       // lock without an owner.
   1616       if (owner_ != nullptr) {
   1617         entry_count_ = 1 + mon->lock_count_;
   1618       } else {
   1619         DCHECK_EQ(mon->lock_count_, 0) << "Monitor is fat-locked without any owner!";
   1620       }
   1621       for (Thread* waiter = mon->wait_set_; waiter != nullptr; waiter = waiter->GetWaitNext()) {
   1622         waiters_.push_back(waiter);
   1623       }
   1624       break;
   1625     }
   1626   }
   1627 }
   1628 
   1629 }  // namespace art
   1630