Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "monitor.h"
     18 
     19 #include <vector>
     20 
     21 #include "android-base/stringprintf.h"
     22 
     23 #include "art_method-inl.h"
     24 #include "base/mutex.h"
     25 #include "base/stl_util.h"
     26 #include "base/systrace.h"
     27 #include "base/time_utils.h"
     28 #include "class_linker.h"
     29 #include "dex_file-inl.h"
     30 #include "dex_instruction-inl.h"
     31 #include "lock_word-inl.h"
     32 #include "mirror/class-inl.h"
     33 #include "mirror/object-inl.h"
     34 #include "scoped_thread_state_change-inl.h"
     35 #include "thread.h"
     36 #include "thread_list.h"
     37 #include "verifier/method_verifier.h"
     38 #include "well_known_classes.h"
     39 
     40 namespace art {
     41 
     42 using android::base::StringPrintf;
     43 
     44 static constexpr uint64_t kLongWaitMs = 100;
     45 
     46 /*
     47  * Every Object has a monitor associated with it, but not every Object is actually locked.  Even
     48  * the ones that are locked do not need a full-fledged monitor until a) there is actual contention
     49  * or b) wait() is called on the Object.
     50  *
     51  * For Android, we have implemented a scheme similar to the one described in Bacon et al.'s
     52  * "Thin locks: featherweight synchronization for Java" (ACM 1998).  Things are even easier for us,
     53  * though, because we have a full 32 bits to work with.
     54  *
     55  * The two states of an Object's lock are referred to as "thin" and "fat".  A lock may transition
     56  * from the "thin" state to the "fat" state and this transition is referred to as inflation. Once
     57  * a lock has been inflated it remains in the "fat" state indefinitely.
     58  *
     59  * The lock value itself is stored in mirror::Object::monitor_ and the representation is described
     60  * in the LockWord value type.
     61  *
     62  * Monitors provide:
     63  *  - mutually exclusive access to resources
     64  *  - a way for multiple threads to wait for notification
     65  *
     66  * In effect, they fill the role of both mutexes and condition variables.
     67  *
     68  * Only one thread can own the monitor at any time.  There may be several threads waiting on it
     69  * (the wait call unlocks it).  One or more waiting threads may be getting interrupted or notified
     70  * at any given time.
     71  */
     72 
     73 uint32_t Monitor::lock_profiling_threshold_ = 0;
     74 
     75 void Monitor::Init(uint32_t lock_profiling_threshold) {
     76   lock_profiling_threshold_ = lock_profiling_threshold;
     77 }
     78 
     79 Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
     80     : monitor_lock_("a monitor lock", kMonitorLock),
     81       monitor_contenders_("monitor contenders", monitor_lock_),
     82       num_waiters_(0),
     83       owner_(owner),
     84       lock_count_(0),
     85       obj_(GcRoot<mirror::Object>(obj)),
     86       wait_set_(nullptr),
     87       hash_code_(hash_code),
     88       locking_method_(nullptr),
     89       locking_dex_pc_(0),
     90       monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
     91 #ifdef __LP64__
     92   DCHECK(false) << "Should not be reached in 64b";
     93   next_free_ = nullptr;
     94 #endif
     95   // We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
     96   // with the owner unlocking the thin-lock.
     97   CHECK(owner == nullptr || owner == self || owner->IsSuspended());
     98   // The identity hash code is set for the life time of the monitor.
     99 }
    100 
    101 Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
    102                  MonitorId id)
    103     : monitor_lock_("a monitor lock", kMonitorLock),
    104       monitor_contenders_("monitor contenders", monitor_lock_),
    105       num_waiters_(0),
    106       owner_(owner),
    107       lock_count_(0),
    108       obj_(GcRoot<mirror::Object>(obj)),
    109       wait_set_(nullptr),
    110       hash_code_(hash_code),
    111       locking_method_(nullptr),
    112       locking_dex_pc_(0),
    113       monitor_id_(id) {
    114 #ifdef __LP64__
    115   next_free_ = nullptr;
    116 #endif
    117   // We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
    118   // with the owner unlocking the thin-lock.
    119   CHECK(owner == nullptr || owner == self || owner->IsSuspended());
    120   // The identity hash code is set for the life time of the monitor.
    121 }
    122 
    123 int32_t Monitor::GetHashCode() {
    124   while (!HasHashCode()) {
    125     if (hash_code_.CompareExchangeWeakRelaxed(0, mirror::Object::GenerateIdentityHashCode())) {
    126       break;
    127     }
    128   }
    129   DCHECK(HasHashCode());
    130   return hash_code_.LoadRelaxed();
    131 }
    132 
    133 bool Monitor::Install(Thread* self) {
    134   MutexLock mu(self, monitor_lock_);  // Uncontended mutex acquisition as monitor isn't yet public.
    135   CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
    136   // Propagate the lock state.
    137   LockWord lw(GetObject()->GetLockWord(false));
    138   switch (lw.GetState()) {
    139     case LockWord::kThinLocked: {
    140       CHECK_EQ(owner_->GetThreadId(), lw.ThinLockOwner());
    141       lock_count_ = lw.ThinLockCount();
    142       break;
    143     }
    144     case LockWord::kHashCode: {
    145       CHECK_EQ(hash_code_.LoadRelaxed(), static_cast<int32_t>(lw.GetHashCode()));
    146       break;
    147     }
    148     case LockWord::kFatLocked: {
    149       // The owner_ is suspended but another thread beat us to install a monitor.
    150       return false;
    151     }
    152     case LockWord::kUnlocked: {
    153       LOG(FATAL) << "Inflating unlocked lock word";
    154       break;
    155     }
    156     default: {
    157       LOG(FATAL) << "Invalid monitor state " << lw.GetState();
    158       return false;
    159     }
    160   }
    161   LockWord fat(this, lw.GCState());
    162   // Publish the updated lock word, which may race with other threads.
    163   bool success = GetObject()->CasLockWordWeakRelease(lw, fat);
    164   // Lock profiling.
    165   if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
    166     // Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
    167     // abort.
    168     locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_, false);
    169   }
    170   return success;
    171 }
    172 
    173 Monitor::~Monitor() {
    174   // Deflated monitors have a null object.
    175 }
    176 
    177 void Monitor::AppendToWaitSet(Thread* thread) {
    178   DCHECK(owner_ == Thread::Current());
    179   DCHECK(thread != nullptr);
    180   DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext();
    181   if (wait_set_ == nullptr) {
    182     wait_set_ = thread;
    183     return;
    184   }
    185 
    186   // push_back.
    187   Thread* t = wait_set_;
    188   while (t->GetWaitNext() != nullptr) {
    189     t = t->GetWaitNext();
    190   }
    191   t->SetWaitNext(thread);
    192 }
    193 
    194 void Monitor::RemoveFromWaitSet(Thread *thread) {
    195   DCHECK(owner_ == Thread::Current());
    196   DCHECK(thread != nullptr);
    197   if (wait_set_ == nullptr) {
    198     return;
    199   }
    200   if (wait_set_ == thread) {
    201     wait_set_ = thread->GetWaitNext();
    202     thread->SetWaitNext(nullptr);
    203     return;
    204   }
    205 
    206   Thread* t = wait_set_;
    207   while (t->GetWaitNext() != nullptr) {
    208     if (t->GetWaitNext() == thread) {
    209       t->SetWaitNext(thread->GetWaitNext());
    210       thread->SetWaitNext(nullptr);
    211       return;
    212     }
    213     t = t->GetWaitNext();
    214   }
    215 }
    216 
    217 void Monitor::SetObject(mirror::Object* object) {
    218   obj_ = GcRoot<mirror::Object>(object);
    219 }
    220 
    221 // Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
    222 
    223 struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
    224   explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
    225       REQUIRES_SHARED(Locks::mutator_lock_)
    226       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
    227         method_(nullptr),
    228         dex_pc_(0),
    229         current_frame_number_(0),
    230         wanted_frame_number_(frame) {}
    231   bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
    232     ArtMethod* m = GetMethod();
    233     if (m == nullptr || m->IsRuntimeMethod()) {
    234       // Runtime method, upcall, or resolution issue. Skip.
    235       return true;
    236     }
    237 
    238     // Is this the requested frame?
    239     if (current_frame_number_ == wanted_frame_number_) {
    240       method_ = m;
    241       dex_pc_ = GetDexPc(false /* abort_on_error*/);
    242       return false;
    243     }
    244 
    245     // Look for more.
    246     current_frame_number_++;
    247     return true;
    248   }
    249 
    250   ArtMethod* method_;
    251   uint32_t dex_pc_;
    252 
    253  private:
    254   size_t current_frame_number_;
    255   const size_t wanted_frame_number_;
    256 };
    257 
    258 // This function is inlined and just helps to not have the VLOG and ATRACE check at all the
    259 // potential tracing points.
    260 void Monitor::AtraceMonitorLock(Thread* self, mirror::Object* obj, bool is_wait) {
    261   if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging) && ATRACE_ENABLED())) {
    262     AtraceMonitorLockImpl(self, obj, is_wait);
    263   }
    264 }
    265 
    266 void Monitor::AtraceMonitorLockImpl(Thread* self, mirror::Object* obj, bool is_wait) {
    267   // Wait() requires a deeper call stack to be useful. Otherwise you'll see "Waiting at
    268   // Object.java". Assume that we'll wait a nontrivial amount, so it's OK to do a longer
    269   // stack walk than if !is_wait.
    270   NthCallerWithDexPcVisitor visitor(self, is_wait ? 1U : 0U);
    271   visitor.WalkStack(false);
    272   const char* prefix = is_wait ? "Waiting on " : "Locking ";
    273 
    274   const char* filename;
    275   int32_t line_number;
    276   TranslateLocation(visitor.method_, visitor.dex_pc_, &filename, &line_number);
    277 
    278   // It would be nice to have a stable "ID" for the object here. However, the only stable thing
    279   // would be the identity hashcode. But we cannot use IdentityHashcode here: For one, there are
    280   // times when it is unsafe to make that call (see stack dumping for an explanation). More
    281   // importantly, we would have to give up on thin-locking when adding systrace locks, as the
    282   // identity hashcode is stored in the lockword normally (so can't be used with thin-locks).
    283   //
    284   // Because of thin-locks we also cannot use the monitor id (as there is no monitor). Monitor ids
    285   // also do not have to be stable, as the monitor may be deflated.
    286   std::string tmp = StringPrintf("%s %d at %s:%d",
    287       prefix,
    288       (obj == nullptr ? -1 : static_cast<int32_t>(reinterpret_cast<uintptr_t>(obj))),
    289       (filename != nullptr ? filename : "null"),
    290       line_number);
    291   ATRACE_BEGIN(tmp.c_str());
    292 }
    293 
    294 void Monitor::AtraceMonitorUnlock() {
    295   if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
    296     ATRACE_END();
    297   }
    298 }
    299 
    300 std::string Monitor::PrettyContentionInfo(const std::string& owner_name,
    301                                           pid_t owner_tid,
    302                                           ArtMethod* owners_method,
    303                                           uint32_t owners_dex_pc,
    304                                           size_t num_waiters) {
    305   Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    306   const char* owners_filename;
    307   int32_t owners_line_number = 0;
    308   if (owners_method != nullptr) {
    309     TranslateLocation(owners_method, owners_dex_pc, &owners_filename, &owners_line_number);
    310   }
    311   std::ostringstream oss;
    312   oss << "monitor contention with owner " << owner_name << " (" << owner_tid << ")";
    313   if (owners_method != nullptr) {
    314     oss << " at " << owners_method->PrettyMethod();
    315     oss << "(" << owners_filename << ":" << owners_line_number << ")";
    316   }
    317   oss << " waiters=" << num_waiters;
    318   return oss.str();
    319 }
    320 
    321 bool Monitor::TryLockLocked(Thread* self) {
    322   if (owner_ == nullptr) {  // Unowned.
    323     owner_ = self;
    324     CHECK_EQ(lock_count_, 0);
    325     // When debugging, save the current monitor holder for future
    326     // acquisition failures to use in sampled logging.
    327     if (lock_profiling_threshold_ != 0) {
    328       locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
    329     }
    330   } else if (owner_ == self) {  // Recursive.
    331     lock_count_++;
    332   } else {
    333     return false;
    334   }
    335   AtraceMonitorLock(self, GetObject(), false /* is_wait */);
    336   return true;
    337 }
    338 
    339 bool Monitor::TryLock(Thread* self) {
    340   MutexLock mu(self, monitor_lock_);
    341   return TryLockLocked(self);
    342 }
    343 
    344 void Monitor::Lock(Thread* self) {
    345   MutexLock mu(self, monitor_lock_);
    346   while (true) {
    347     if (TryLockLocked(self)) {
    348       return;
    349     }
    350     // Contended.
    351     const bool log_contention = (lock_profiling_threshold_ != 0);
    352     uint64_t wait_start_ms = log_contention ? MilliTime() : 0;
    353     ArtMethod* owners_method = locking_method_;
    354     uint32_t owners_dex_pc = locking_dex_pc_;
    355     // Do this before releasing the lock so that we don't get deflated.
    356     size_t num_waiters = num_waiters_;
    357     ++num_waiters_;
    358 
    359     // If systrace logging is enabled, first look at the lock owner. Acquiring the monitor's
    360     // lock and then re-acquiring the mutator lock can deadlock.
    361     bool started_trace = false;
    362     if (ATRACE_ENABLED()) {
    363       if (owner_ != nullptr) {  // Did the owner_ give the lock up?
    364         std::ostringstream oss;
    365         std::string name;
    366         owner_->GetThreadName(name);
    367         oss << PrettyContentionInfo(name,
    368                                     owner_->GetTid(),
    369                                     owners_method,
    370                                     owners_dex_pc,
    371                                     num_waiters);
    372         // Add info for contending thread.
    373         uint32_t pc;
    374         ArtMethod* m = self->GetCurrentMethod(&pc);
    375         const char* filename;
    376         int32_t line_number;
    377         TranslateLocation(m, pc, &filename, &line_number);
    378         oss << " blocking from "
    379             << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
    380             << ":" << line_number << ")";
    381         ATRACE_BEGIN(oss.str().c_str());
    382         started_trace = true;
    383       }
    384     }
    385 
    386     monitor_lock_.Unlock(self);  // Let go of locks in order.
    387     self->SetMonitorEnterObject(GetObject());
    388     {
    389       ScopedThreadSuspension tsc(self, kBlocked);  // Change to blocked and give up mutator_lock_.
    390       uint32_t original_owner_thread_id = 0u;
    391       {
    392         // Reacquire monitor_lock_ without mutator_lock_ for Wait.
    393         MutexLock mu2(self, monitor_lock_);
    394         if (owner_ != nullptr) {  // Did the owner_ give the lock up?
    395           original_owner_thread_id = owner_->GetThreadId();
    396           monitor_contenders_.Wait(self);  // Still contended so wait.
    397         }
    398       }
    399       if (original_owner_thread_id != 0u) {
    400         // Woken from contention.
    401         if (log_contention) {
    402           uint32_t original_owner_tid = 0;
    403           std::string original_owner_name;
    404           {
    405             MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
    406             // Re-find the owner in case the thread got killed.
    407             Thread* original_owner = Runtime::Current()->GetThreadList()->FindThreadByThreadId(
    408                 original_owner_thread_id);
    409             // Do not do any work that requires the mutator lock.
    410             if (original_owner != nullptr) {
    411               original_owner_tid = original_owner->GetTid();
    412               original_owner->GetThreadName(original_owner_name);
    413             }
    414           }
    415 
    416           if (original_owner_tid != 0u) {
    417             uint64_t wait_ms = MilliTime() - wait_start_ms;
    418             uint32_t sample_percent;
    419             if (wait_ms >= lock_profiling_threshold_) {
    420               sample_percent = 100;
    421             } else {
    422               sample_percent = 100 * wait_ms / lock_profiling_threshold_;
    423             }
    424             if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
    425               // Reacquire mutator_lock_ for logging.
    426               ScopedObjectAccess soa(self);
    427               if (wait_ms > kLongWaitMs && owners_method != nullptr) {
    428                 uint32_t pc;
    429                 ArtMethod* m = self->GetCurrentMethod(&pc);
    430                 // TODO: We should maybe check that original_owner is still a live thread.
    431                 LOG(WARNING) << "Long "
    432                     << PrettyContentionInfo(original_owner_name,
    433                                             original_owner_tid,
    434                                             owners_method,
    435                                             owners_dex_pc,
    436                                             num_waiters)
    437                     << " in " << ArtMethod::PrettyMethod(m) << " for "
    438                     << PrettyDuration(MsToNs(wait_ms));
    439               }
    440               const char* owners_filename;
    441               int32_t owners_line_number;
    442               TranslateLocation(owners_method,
    443                                 owners_dex_pc,
    444                                 &owners_filename,
    445                                 &owners_line_number);
    446               LogContentionEvent(self,
    447                                  wait_ms,
    448                                  sample_percent,
    449                                  owners_filename,
    450                                  owners_line_number);
    451             }
    452           }
    453         }
    454       }
    455     }
    456     if (started_trace) {
    457       ATRACE_END();
    458     }
    459     self->SetMonitorEnterObject(nullptr);
    460     monitor_lock_.Lock(self);  // Reacquire locks in order.
    461     --num_waiters_;
    462   }
    463 }
    464 
    465 static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
    466                                               __attribute__((format(printf, 1, 2)));
    467 
    468 static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
    469     REQUIRES_SHARED(Locks::mutator_lock_) {
    470   va_list args;
    471   va_start(args, fmt);
    472   Thread* self = Thread::Current();
    473   self->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
    474   if (!Runtime::Current()->IsStarted() || VLOG_IS_ON(monitor)) {
    475     std::ostringstream ss;
    476     self->Dump(ss);
    477     LOG(Runtime::Current()->IsStarted() ? ::android::base::INFO : ::android::base::ERROR)
    478         << self->GetException()->Dump() << "\n" << ss.str();
    479   }
    480   va_end(args);
    481 }
    482 
    483 static std::string ThreadToString(Thread* thread) {
    484   if (thread == nullptr) {
    485     return "nullptr";
    486   }
    487   std::ostringstream oss;
    488   // TODO: alternatively, we could just return the thread's name.
    489   oss << *thread;
    490   return oss.str();
    491 }
    492 
    493 void Monitor::FailedUnlock(mirror::Object* o,
    494                            uint32_t expected_owner_thread_id,
    495                            uint32_t found_owner_thread_id,
    496                            Monitor* monitor) {
    497   // Acquire thread list lock so threads won't disappear from under us.
    498   std::string current_owner_string;
    499   std::string expected_owner_string;
    500   std::string found_owner_string;
    501   uint32_t current_owner_thread_id = 0u;
    502   {
    503     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
    504     ThreadList* const thread_list = Runtime::Current()->GetThreadList();
    505     Thread* expected_owner = thread_list->FindThreadByThreadId(expected_owner_thread_id);
    506     Thread* found_owner = thread_list->FindThreadByThreadId(found_owner_thread_id);
    507 
    508     // Re-read owner now that we hold lock.
    509     Thread* current_owner = (monitor != nullptr) ? monitor->GetOwner() : nullptr;
    510     if (current_owner != nullptr) {
    511       current_owner_thread_id = current_owner->GetThreadId();
    512     }
    513     // Get short descriptions of the threads involved.
    514     current_owner_string = ThreadToString(current_owner);
    515     expected_owner_string = expected_owner != nullptr ? ThreadToString(expected_owner) : "unnamed";
    516     found_owner_string = found_owner != nullptr ? ThreadToString(found_owner) : "unnamed";
    517   }
    518 
    519   if (current_owner_thread_id == 0u) {
    520     if (found_owner_thread_id == 0u) {
    521       ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
    522                                          " on thread '%s'",
    523                                          mirror::Object::PrettyTypeOf(o).c_str(),
    524                                          expected_owner_string.c_str());
    525     } else {
    526       // Race: the original read found an owner but now there is none
    527       ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
    528                                          " (where now the monitor appears unowned) on thread '%s'",
    529                                          found_owner_string.c_str(),
    530                                          mirror::Object::PrettyTypeOf(o).c_str(),
    531                                          expected_owner_string.c_str());
    532     }
    533   } else {
    534     if (found_owner_thread_id == 0u) {
    535       // Race: originally there was no owner, there is now
    536       ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
    537                                          " (originally believed to be unowned) on thread '%s'",
    538                                          current_owner_string.c_str(),
    539                                          mirror::Object::PrettyTypeOf(o).c_str(),
    540                                          expected_owner_string.c_str());
    541     } else {
    542       if (found_owner_thread_id != current_owner_thread_id) {
    543         // Race: originally found and current owner have changed
    544         ThrowIllegalMonitorStateExceptionF("unlock of monitor originally owned by '%s' (now"
    545                                            " owned by '%s') on object of type '%s' on thread '%s'",
    546                                            found_owner_string.c_str(),
    547                                            current_owner_string.c_str(),
    548                                            mirror::Object::PrettyTypeOf(o).c_str(),
    549                                            expected_owner_string.c_str());
    550       } else {
    551         ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
    552                                            " on thread '%s",
    553                                            current_owner_string.c_str(),
    554                                            mirror::Object::PrettyTypeOf(o).c_str(),
    555                                            expected_owner_string.c_str());
    556       }
    557     }
    558   }
    559 }
    560 
    561 bool Monitor::Unlock(Thread* self) {
    562   DCHECK(self != nullptr);
    563   uint32_t owner_thread_id = 0u;
    564   {
    565     MutexLock mu(self, monitor_lock_);
    566     Thread* owner = owner_;
    567     if (owner != nullptr) {
    568       owner_thread_id = owner->GetThreadId();
    569     }
    570     if (owner == self) {
    571       // We own the monitor, so nobody else can be in here.
    572       AtraceMonitorUnlock();
    573       if (lock_count_ == 0) {
    574         owner_ = nullptr;
    575         locking_method_ = nullptr;
    576         locking_dex_pc_ = 0;
    577         // Wake a contender.
    578         monitor_contenders_.Signal(self);
    579       } else {
    580         --lock_count_;
    581       }
    582       return true;
    583     }
    584   }
    585   // We don't own this, so we're not allowed to unlock it.
    586   // The JNI spec says that we should throw IllegalMonitorStateException in this case.
    587   FailedUnlock(GetObject(), self->GetThreadId(), owner_thread_id, this);
    588   return false;
    589 }
    590 
    591 void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
    592                    bool interruptShouldThrow, ThreadState why) {
    593   DCHECK(self != nullptr);
    594   DCHECK(why == kTimedWaiting || why == kWaiting || why == kSleeping);
    595 
    596   monitor_lock_.Lock(self);
    597 
    598   // Make sure that we hold the lock.
    599   if (owner_ != self) {
    600     monitor_lock_.Unlock(self);
    601     ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
    602     return;
    603   }
    604 
    605   // We need to turn a zero-length timed wait into a regular wait because
    606   // Object.wait(0, 0) is defined as Object.wait(0), which is defined as Object.wait().
    607   if (why == kTimedWaiting && (ms == 0 && ns == 0)) {
    608     why = kWaiting;
    609   }
    610 
    611   // Enforce the timeout range.
    612   if (ms < 0 || ns < 0 || ns > 999999) {
    613     monitor_lock_.Unlock(self);
    614     self->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
    615                              "timeout arguments out of range: ms=%" PRId64 " ns=%d", ms, ns);
    616     return;
    617   }
    618 
    619   /*
    620    * Add ourselves to the set of threads waiting on this monitor, and
    621    * release our hold.  We need to let it go even if we're a few levels
    622    * deep in a recursive lock, and we need to restore that later.
    623    *
    624    * We append to the wait set ahead of clearing the count and owner
    625    * fields so the subroutine can check that the calling thread owns
    626    * the monitor.  Aside from that, the order of member updates is
    627    * not order sensitive as we hold the pthread mutex.
    628    */
    629   AppendToWaitSet(self);
    630   ++num_waiters_;
    631   int prev_lock_count = lock_count_;
    632   lock_count_ = 0;
    633   owner_ = nullptr;
    634   ArtMethod* saved_method = locking_method_;
    635   locking_method_ = nullptr;
    636   uintptr_t saved_dex_pc = locking_dex_pc_;
    637   locking_dex_pc_ = 0;
    638 
    639   AtraceMonitorUnlock();  // For the implict Unlock() just above. This will only end the deepest
    640                           // nesting, but that is enough for the visualization, and corresponds to
    641                           // the single Lock() we do afterwards.
    642   AtraceMonitorLock(self, GetObject(), true /* is_wait */);
    643 
    644   bool was_interrupted = false;
    645   {
    646     // Update thread state. If the GC wakes up, it'll ignore us, knowing
    647     // that we won't touch any references in this state, and we'll check
    648     // our suspend mode before we transition out.
    649     ScopedThreadSuspension sts(self, why);
    650 
    651     // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
    652     MutexLock mu(self, *self->GetWaitMutex());
    653 
    654     // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
    655     // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
    656     // up.
    657     DCHECK(self->GetWaitMonitor() == nullptr);
    658     self->SetWaitMonitor(this);
    659 
    660     // Release the monitor lock.
    661     monitor_contenders_.Signal(self);
    662     monitor_lock_.Unlock(self);
    663 
    664     // Handle the case where the thread was interrupted before we called wait().
    665     if (self->IsInterruptedLocked()) {
    666       was_interrupted = true;
    667     } else {
    668       // Wait for a notification or a timeout to occur.
    669       if (why == kWaiting) {
    670         self->GetWaitConditionVariable()->Wait(self);
    671       } else {
    672         DCHECK(why == kTimedWaiting || why == kSleeping) << why;
    673         self->GetWaitConditionVariable()->TimedWait(self, ms, ns);
    674       }
    675       was_interrupted = self->IsInterruptedLocked();
    676     }
    677   }
    678 
    679   {
    680     // We reset the thread's wait_monitor_ field after transitioning back to runnable so
    681     // that a thread in a waiting/sleeping state has a non-null wait_monitor_ for debugging
    682     // and diagnostic purposes. (If you reset this earlier, stack dumps will claim that threads
    683     // are waiting on "null".)
    684     MutexLock mu(self, *self->GetWaitMutex());
    685     DCHECK(self->GetWaitMonitor() != nullptr);
    686     self->SetWaitMonitor(nullptr);
    687   }
    688 
    689   // Allocate the interrupted exception not holding the monitor lock since it may cause a GC.
    690   // If the GC requires acquiring the monitor for enqueuing cleared references, this would
    691   // cause a deadlock if the monitor is held.
    692   if (was_interrupted && interruptShouldThrow) {
    693     /*
    694      * We were interrupted while waiting, or somebody interrupted an
    695      * un-interruptible thread earlier and we're bailing out immediately.
    696      *
    697      * The doc sayeth: "The interrupted status of the current thread is
    698      * cleared when this exception is thrown."
    699      */
    700     {
    701       MutexLock mu(self, *self->GetWaitMutex());
    702       self->SetInterruptedLocked(false);
    703     }
    704     self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
    705   }
    706 
    707   AtraceMonitorUnlock();  // End Wait().
    708 
    709   // Re-acquire the monitor and lock.
    710   Lock(self);
    711   monitor_lock_.Lock(self);
    712   self->GetWaitMutex()->AssertNotHeld(self);
    713 
    714   /*
    715    * We remove our thread from wait set after restoring the count
    716    * and owner fields so the subroutine can check that the calling
    717    * thread owns the monitor. Aside from that, the order of member
    718    * updates is not order sensitive as we hold the pthread mutex.
    719    */
    720   owner_ = self;
    721   lock_count_ = prev_lock_count;
    722   locking_method_ = saved_method;
    723   locking_dex_pc_ = saved_dex_pc;
    724   --num_waiters_;
    725   RemoveFromWaitSet(self);
    726 
    727   monitor_lock_.Unlock(self);
    728 }
    729 
    730 void Monitor::Notify(Thread* self) {
    731   DCHECK(self != nullptr);
    732   MutexLock mu(self, monitor_lock_);
    733   // Make sure that we hold the lock.
    734   if (owner_ != self) {
    735     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
    736     return;
    737   }
    738   // Signal the first waiting thread in the wait set.
    739   while (wait_set_ != nullptr) {
    740     Thread* thread = wait_set_;
    741     wait_set_ = thread->GetWaitNext();
    742     thread->SetWaitNext(nullptr);
    743 
    744     // Check to see if the thread is still waiting.
    745     MutexLock wait_mu(self, *thread->GetWaitMutex());
    746     if (thread->GetWaitMonitor() != nullptr) {
    747       thread->GetWaitConditionVariable()->Signal(self);
    748       return;
    749     }
    750   }
    751 }
    752 
    753 void Monitor::NotifyAll(Thread* self) {
    754   DCHECK(self != nullptr);
    755   MutexLock mu(self, monitor_lock_);
    756   // Make sure that we hold the lock.
    757   if (owner_ != self) {
    758     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
    759     return;
    760   }
    761   // Signal all threads in the wait set.
    762   while (wait_set_ != nullptr) {
    763     Thread* thread = wait_set_;
    764     wait_set_ = thread->GetWaitNext();
    765     thread->SetWaitNext(nullptr);
    766     thread->Notify();
    767   }
    768 }
    769 
    770 bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
    771   DCHECK(obj != nullptr);
    772   // Don't need volatile since we only deflate with mutators suspended.
    773   LockWord lw(obj->GetLockWord(false));
    774   // If the lock isn't an inflated monitor, then we don't need to deflate anything.
    775   if (lw.GetState() == LockWord::kFatLocked) {
    776     Monitor* monitor = lw.FatLockMonitor();
    777     DCHECK(monitor != nullptr);
    778     MutexLock mu(self, monitor->monitor_lock_);
    779     // Can't deflate if we have anybody waiting on the CV.
    780     if (monitor->num_waiters_ > 0) {
    781       return false;
    782     }
    783     Thread* owner = monitor->owner_;
    784     if (owner != nullptr) {
    785       // Can't deflate if we are locked and have a hash code.
    786       if (monitor->HasHashCode()) {
    787         return false;
    788       }
    789       // Can't deflate if our lock count is too high.
    790       if (static_cast<uint32_t>(monitor->lock_count_) > LockWord::kThinLockMaxCount) {
    791         return false;
    792       }
    793       // Deflate to a thin lock.
    794       LockWord new_lw = LockWord::FromThinLockId(owner->GetThreadId(),
    795                                                  monitor->lock_count_,
    796                                                  lw.GCState());
    797       // Assume no concurrent read barrier state changes as mutators are suspended.
    798       obj->SetLockWord(new_lw, false);
    799       VLOG(monitor) << "Deflated " << obj << " to thin lock " << owner->GetTid() << " / "
    800           << monitor->lock_count_;
    801     } else if (monitor->HasHashCode()) {
    802       LockWord new_lw = LockWord::FromHashCode(monitor->GetHashCode(), lw.GCState());
    803       // Assume no concurrent read barrier state changes as mutators are suspended.
    804       obj->SetLockWord(new_lw, false);
    805       VLOG(monitor) << "Deflated " << obj << " to hash monitor " << monitor->GetHashCode();
    806     } else {
    807       // No lock and no hash, just put an empty lock word inside the object.
    808       LockWord new_lw = LockWord::FromDefault(lw.GCState());
    809       // Assume no concurrent read barrier state changes as mutators are suspended.
    810       obj->SetLockWord(new_lw, false);
    811       VLOG(monitor) << "Deflated" << obj << " to empty lock word";
    812     }
    813     // The monitor is deflated, mark the object as null so that we know to delete it during the
    814     // next GC.
    815     monitor->obj_ = GcRoot<mirror::Object>(nullptr);
    816   }
    817   return true;
    818 }
    819 
    820 void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) {
    821   DCHECK(self != nullptr);
    822   DCHECK(obj != nullptr);
    823   // Allocate and acquire a new monitor.
    824   Monitor* m = MonitorPool::CreateMonitor(self, owner, obj, hash_code);
    825   DCHECK(m != nullptr);
    826   if (m->Install(self)) {
    827     if (owner != nullptr) {
    828       VLOG(monitor) << "monitor: thread" << owner->GetThreadId()
    829           << " created monitor " << m << " for object " << obj;
    830     } else {
    831       VLOG(monitor) << "monitor: Inflate with hashcode " << hash_code
    832           << " created monitor " << m << " for object " << obj;
    833     }
    834     Runtime::Current()->GetMonitorList()->Add(m);
    835     CHECK_EQ(obj->GetLockWord(true).GetState(), LockWord::kFatLocked);
    836   } else {
    837     MonitorPool::ReleaseMonitor(self, m);
    838   }
    839 }
    840 
    841 void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
    842                                 uint32_t hash_code) {
    843   DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
    844   uint32_t owner_thread_id = lock_word.ThinLockOwner();
    845   if (owner_thread_id == self->GetThreadId()) {
    846     // We own the monitor, we can easily inflate it.
    847     Inflate(self, self, obj.Get(), hash_code);
    848   } else {
    849     ThreadList* thread_list = Runtime::Current()->GetThreadList();
    850     // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
    851     self->SetMonitorEnterObject(obj.Get());
    852     bool timed_out;
    853     Thread* owner;
    854     {
    855       ScopedThreadSuspension sts(self, kBlocked);
    856       owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
    857     }
    858     if (owner != nullptr) {
    859       // We succeeded in suspending the thread, check the lock's status didn't change.
    860       lock_word = obj->GetLockWord(true);
    861       if (lock_word.GetState() == LockWord::kThinLocked &&
    862           lock_word.ThinLockOwner() == owner_thread_id) {
    863         // Go ahead and inflate the lock.
    864         Inflate(self, owner, obj.Get(), hash_code);
    865       }
    866       thread_list->Resume(owner, false);
    867     }
    868     self->SetMonitorEnterObject(nullptr);
    869   }
    870 }
    871 
    872 // Fool annotalysis into thinking that the lock on obj is acquired.
    873 static mirror::Object* FakeLock(mirror::Object* obj)
    874     EXCLUSIVE_LOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS {
    875   return obj;
    876 }
    877 
    878 // Fool annotalysis into thinking that the lock on obj is release.
    879 static mirror::Object* FakeUnlock(mirror::Object* obj)
    880     UNLOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS {
    881   return obj;
    882 }
    883 
    884 mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool trylock) {
    885   DCHECK(self != nullptr);
    886   DCHECK(obj != nullptr);
    887   self->AssertThreadSuspensionIsAllowable();
    888   obj = FakeLock(obj);
    889   uint32_t thread_id = self->GetThreadId();
    890   size_t contention_count = 0;
    891   StackHandleScope<1> hs(self);
    892   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
    893   while (true) {
    894     // We initially read the lockword with ordinary Java/relaxed semantics. When stronger
    895     // semantics are needed, we address it below. Since GetLockWord bottoms out to a relaxed load,
    896     // we can fix it later, in an infrequently executed case, with a fence.
    897     LockWord lock_word = h_obj->GetLockWord(false);
    898     switch (lock_word.GetState()) {
    899       case LockWord::kUnlocked: {
    900         // No ordering required for preceding lockword read, since we retest.
    901         LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
    902         if (h_obj->CasLockWordWeakAcquire(lock_word, thin_locked)) {
    903           AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
    904           return h_obj.Get();  // Success!
    905         }
    906         continue;  // Go again.
    907       }
    908       case LockWord::kThinLocked: {
    909         uint32_t owner_thread_id = lock_word.ThinLockOwner();
    910         if (owner_thread_id == thread_id) {
    911           // No ordering required for initial lockword read.
    912           // We own the lock, increase the recursion count.
    913           uint32_t new_count = lock_word.ThinLockCount() + 1;
    914           if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
    915             LockWord thin_locked(LockWord::FromThinLockId(thread_id,
    916                                                           new_count,
    917                                                           lock_word.GCState()));
    918             // Only this thread pays attention to the count. Thus there is no need for stronger
    919             // than relaxed memory ordering.
    920             if (!kUseReadBarrier) {
    921               h_obj->SetLockWord(thin_locked, false /* volatile */);
    922               AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
    923               return h_obj.Get();  // Success!
    924             } else {
    925               // Use CAS to preserve the read barrier state.
    926               if (h_obj->CasLockWordWeakRelaxed(lock_word, thin_locked)) {
    927                 AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
    928                 return h_obj.Get();  // Success!
    929               }
    930             }
    931             continue;  // Go again.
    932           } else {
    933             // We'd overflow the recursion count, so inflate the monitor.
    934             InflateThinLocked(self, h_obj, lock_word, 0);
    935           }
    936         } else {
    937           if (trylock) {
    938             return nullptr;
    939           }
    940           // Contention.
    941           contention_count++;
    942           Runtime* runtime = Runtime::Current();
    943           if (contention_count <= runtime->GetMaxSpinsBeforeThinLockInflation()) {
    944             // TODO: Consider switching the thread state to kBlocked when we are yielding.
    945             // Use sched_yield instead of NanoSleep since NanoSleep can wait much longer than the
    946             // parameter you pass in. This can cause thread suspension to take excessively long
    947             // and make long pauses. See b/16307460.
    948             // TODO: We should literally spin first, without sched_yield. Sched_yield either does
    949             // nothing (at significant expense), or guarantees that we wait at least microseconds.
    950             // If the owner is running, I would expect the median lock hold time to be hundreds
    951             // of nanoseconds or less.
    952             sched_yield();
    953           } else {
    954             contention_count = 0;
    955             // No ordering required for initial lockword read. Install rereads it anyway.
    956             InflateThinLocked(self, h_obj, lock_word, 0);
    957           }
    958         }
    959         continue;  // Start from the beginning.
    960       }
    961       case LockWord::kFatLocked: {
    962         // We should have done an acquire read of the lockword initially, to ensure
    963         // visibility of the monitor data structure. Use an explicit fence instead.
    964         QuasiAtomic::ThreadFenceAcquire();
    965         Monitor* mon = lock_word.FatLockMonitor();
    966         if (trylock) {
    967           return mon->TryLock(self) ? h_obj.Get() : nullptr;
    968         } else {
    969           mon->Lock(self);
    970           return h_obj.Get();  // Success!
    971         }
    972       }
    973       case LockWord::kHashCode:
    974         // Inflate with the existing hashcode.
    975         // Again no ordering required for initial lockword read, since we don't rely
    976         // on the visibility of any prior computation.
    977         Inflate(self, nullptr, h_obj.Get(), lock_word.GetHashCode());
    978         continue;  // Start from the beginning.
    979       default: {
    980         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
    981         UNREACHABLE();
    982       }
    983     }
    984   }
    985 }
    986 
    987 bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
    988   DCHECK(self != nullptr);
    989   DCHECK(obj != nullptr);
    990   self->AssertThreadSuspensionIsAllowable();
    991   obj = FakeUnlock(obj);
    992   StackHandleScope<1> hs(self);
    993   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
    994   while (true) {
    995     LockWord lock_word = obj->GetLockWord(true);
    996     switch (lock_word.GetState()) {
    997       case LockWord::kHashCode:
    998         // Fall-through.
    999       case LockWord::kUnlocked:
   1000         FailedUnlock(h_obj.Get(), self->GetThreadId(), 0u, nullptr);
   1001         return false;  // Failure.
   1002       case LockWord::kThinLocked: {
   1003         uint32_t thread_id = self->GetThreadId();
   1004         uint32_t owner_thread_id = lock_word.ThinLockOwner();
   1005         if (owner_thread_id != thread_id) {
   1006           FailedUnlock(h_obj.Get(), thread_id, owner_thread_id, nullptr);
   1007           return false;  // Failure.
   1008         } else {
   1009           // We own the lock, decrease the recursion count.
   1010           LockWord new_lw = LockWord::Default();
   1011           if (lock_word.ThinLockCount() != 0) {
   1012             uint32_t new_count = lock_word.ThinLockCount() - 1;
   1013             new_lw = LockWord::FromThinLockId(thread_id, new_count, lock_word.GCState());
   1014           } else {
   1015             new_lw = LockWord::FromDefault(lock_word.GCState());
   1016           }
   1017           if (!kUseReadBarrier) {
   1018             DCHECK_EQ(new_lw.ReadBarrierState(), 0U);
   1019             // TODO: This really only needs memory_order_release, but we currently have
   1020             // no way to specify that. In fact there seem to be no legitimate uses of SetLockWord
   1021             // with a final argument of true. This slows down x86 and ARMv7, but probably not v8.
   1022             h_obj->SetLockWord(new_lw, true);
   1023             AtraceMonitorUnlock();
   1024             // Success!
   1025             return true;
   1026           } else {
   1027             // Use CAS to preserve the read barrier state.
   1028             if (h_obj->CasLockWordWeakRelease(lock_word, new_lw)) {
   1029               AtraceMonitorUnlock();
   1030               // Success!
   1031               return true;
   1032             }
   1033           }
   1034           continue;  // Go again.
   1035         }
   1036       }
   1037       case LockWord::kFatLocked: {
   1038         Monitor* mon = lock_word.FatLockMonitor();
   1039         return mon->Unlock(self);
   1040       }
   1041       default: {
   1042         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
   1043         return false;
   1044       }
   1045     }
   1046   }
   1047 }
   1048 
   1049 void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
   1050                    bool interruptShouldThrow, ThreadState why) {
   1051   DCHECK(self != nullptr);
   1052   DCHECK(obj != nullptr);
   1053   LockWord lock_word = obj->GetLockWord(true);
   1054   while (lock_word.GetState() != LockWord::kFatLocked) {
   1055     switch (lock_word.GetState()) {
   1056       case LockWord::kHashCode:
   1057         // Fall-through.
   1058       case LockWord::kUnlocked:
   1059         ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
   1060         return;  // Failure.
   1061       case LockWord::kThinLocked: {
   1062         uint32_t thread_id = self->GetThreadId();
   1063         uint32_t owner_thread_id = lock_word.ThinLockOwner();
   1064         if (owner_thread_id != thread_id) {
   1065           ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
   1066           return;  // Failure.
   1067         } else {
   1068           // We own the lock, inflate to enqueue ourself on the Monitor. May fail spuriously so
   1069           // re-load.
   1070           Inflate(self, self, obj, 0);
   1071           lock_word = obj->GetLockWord(true);
   1072         }
   1073         break;
   1074       }
   1075       case LockWord::kFatLocked:  // Unreachable given the loop condition above. Fall-through.
   1076       default: {
   1077         LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
   1078         return;
   1079       }
   1080     }
   1081   }
   1082   Monitor* mon = lock_word.FatLockMonitor();
   1083   mon->Wait(self, ms, ns, interruptShouldThrow, why);
   1084 }
   1085 
   1086 void Monitor::DoNotify(Thread* self, mirror::Object* obj, bool notify_all) {
   1087   DCHECK(self != nullptr);
   1088   DCHECK(obj != nullptr);
   1089   LockWord lock_word = obj->GetLockWord(true);
   1090   switch (lock_word.GetState()) {
   1091     case LockWord::kHashCode:
   1092       // Fall-through.
   1093     case LockWord::kUnlocked:
   1094       ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
   1095       return;  // Failure.
   1096     case LockWord::kThinLocked: {
   1097       uint32_t thread_id = self->GetThreadId();
   1098       uint32_t owner_thread_id = lock_word.ThinLockOwner();
   1099       if (owner_thread_id != thread_id) {
   1100         ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
   1101         return;  // Failure.
   1102       } else {
   1103         // We own the lock but there's no Monitor and therefore no waiters.
   1104         return;  // Success.
   1105       }
   1106     }
   1107     case LockWord::kFatLocked: {
   1108       Monitor* mon = lock_word.FatLockMonitor();
   1109       if (notify_all) {
   1110         mon->NotifyAll(self);
   1111       } else {
   1112         mon->Notify(self);
   1113       }
   1114       return;  // Success.
   1115     }
   1116     default: {
   1117       LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
   1118       return;
   1119     }
   1120   }
   1121 }
   1122 
   1123 uint32_t Monitor::GetLockOwnerThreadId(mirror::Object* obj) {
   1124   DCHECK(obj != nullptr);
   1125   LockWord lock_word = obj->GetLockWord(true);
   1126   switch (lock_word.GetState()) {
   1127     case LockWord::kHashCode:
   1128       // Fall-through.
   1129     case LockWord::kUnlocked:
   1130       return ThreadList::kInvalidThreadId;
   1131     case LockWord::kThinLocked:
   1132       return lock_word.ThinLockOwner();
   1133     case LockWord::kFatLocked: {
   1134       Monitor* mon = lock_word.FatLockMonitor();
   1135       return mon->GetOwnerThreadId();
   1136     }
   1137     default: {
   1138       LOG(FATAL) << "Unreachable";
   1139       UNREACHABLE();
   1140     }
   1141   }
   1142 }
   1143 
   1144 void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
   1145   // Determine the wait message and object we're waiting or blocked upon.
   1146   mirror::Object* pretty_object = nullptr;
   1147   const char* wait_message = nullptr;
   1148   uint32_t lock_owner = ThreadList::kInvalidThreadId;
   1149   ThreadState state = thread->GetState();
   1150   if (state == kWaiting || state == kTimedWaiting || state == kSleeping) {
   1151     wait_message = (state == kSleeping) ? "  - sleeping on " : "  - waiting on ";
   1152     Thread* self = Thread::Current();
   1153     MutexLock mu(self, *thread->GetWaitMutex());
   1154     Monitor* monitor = thread->GetWaitMonitor();
   1155     if (monitor != nullptr) {
   1156       pretty_object = monitor->GetObject();
   1157     }
   1158   } else if (state == kBlocked) {
   1159     wait_message = "  - waiting to lock ";
   1160     pretty_object = thread->GetMonitorEnterObject();
   1161     if (pretty_object != nullptr) {
   1162       if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
   1163         // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
   1164         // may have not been flipped yet and "pretty_object" may be a from-space (stale) ref, in
   1165         // which case the GetLockOwnerThreadId() call below will crash. So explicitly mark/forward
   1166         // it here.
   1167         pretty_object = ReadBarrier::Mark(pretty_object);
   1168       }
   1169       lock_owner = pretty_object->GetLockOwnerThreadId();
   1170     }
   1171   }
   1172 
   1173   if (wait_message != nullptr) {
   1174     if (pretty_object == nullptr) {
   1175       os << wait_message << "an unknown object";
   1176     } else {
   1177       if ((pretty_object->GetLockWord(true).GetState() == LockWord::kThinLocked) &&
   1178           Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
   1179         // Getting the identity hashcode here would result in lock inflation and suspension of the
   1180         // current thread, which isn't safe if this is the only runnable thread.
   1181         os << wait_message << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)",
   1182                                            reinterpret_cast<intptr_t>(pretty_object),
   1183                                            pretty_object->PrettyTypeOf().c_str());
   1184       } else {
   1185         // - waiting on <0x6008c468> (a java.lang.Class<java.lang.ref.ReferenceQueue>)
   1186         // Call PrettyTypeOf before IdentityHashCode since IdentityHashCode can cause thread
   1187         // suspension and move pretty_object.
   1188         const std::string pretty_type(pretty_object->PrettyTypeOf());
   1189         os << wait_message << StringPrintf("<0x%08x> (a %s)", pretty_object->IdentityHashCode(),
   1190                                            pretty_type.c_str());
   1191       }
   1192     }
   1193     // - waiting to lock <0x613f83d8> (a java.lang.Object) held by thread 5
   1194     if (lock_owner != ThreadList::kInvalidThreadId) {
   1195       os << " held by thread " << lock_owner;
   1196     }
   1197     os << "\n";
   1198   }
   1199 }
   1200 
   1201 mirror::Object* Monitor::GetContendedMonitor(Thread* thread) {
   1202   // This is used to implement JDWP's ThreadReference.CurrentContendedMonitor, and has a bizarre
   1203   // definition of contended that includes a monitor a thread is trying to enter...
   1204   mirror::Object* result = thread->GetMonitorEnterObject();
   1205   if (result == nullptr) {
   1206     // ...but also a monitor that the thread is waiting on.
   1207     MutexLock mu(Thread::Current(), *thread->GetWaitMutex());
   1208     Monitor* monitor = thread->GetWaitMonitor();
   1209     if (monitor != nullptr) {
   1210       result = monitor->GetObject();
   1211     }
   1212   }
   1213   return result;
   1214 }
   1215 
   1216 void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
   1217                          void* callback_context, bool abort_on_failure) {
   1218   ArtMethod* m = stack_visitor->GetMethod();
   1219   CHECK(m != nullptr);
   1220 
   1221   // Native methods are an easy special case.
   1222   // TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
   1223   if (m->IsNative()) {
   1224     if (m->IsSynchronized()) {
   1225       mirror::Object* jni_this =
   1226           stack_visitor->GetCurrentHandleScope(sizeof(void*))->GetReference(0);
   1227       callback(jni_this, callback_context);
   1228     }
   1229     return;
   1230   }
   1231 
   1232   // Proxy methods should not be synchronized.
   1233   if (m->IsProxyMethod()) {
   1234     CHECK(!m->IsSynchronized());
   1235     return;
   1236   }
   1237 
   1238   // Is there any reason to believe there's any synchronization in this method?
   1239   const DexFile::CodeItem* code_item = m->GetCodeItem();
   1240   CHECK(code_item != nullptr) << m->PrettyMethod();
   1241   if (code_item->tries_size_ == 0) {
   1242     return;  // No "tries" implies no synchronization, so no held locks to report.
   1243   }
   1244 
   1245   // Get the dex pc. If abort_on_failure is false, GetDexPc will not abort in the case it cannot
   1246   // find the dex pc, and instead return kDexNoIndex. Then bail out, as it indicates we have an
   1247   // inconsistent stack anyways.
   1248   uint32_t dex_pc = stack_visitor->GetDexPc(abort_on_failure);
   1249   if (!abort_on_failure && dex_pc == DexFile::kDexNoIndex) {
   1250     LOG(ERROR) << "Could not find dex_pc for " << m->PrettyMethod();
   1251     return;
   1252   }
   1253 
   1254   // Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
   1255   // the locks held in this stack frame.
   1256   std::vector<uint32_t> monitor_enter_dex_pcs;
   1257   verifier::MethodVerifier::FindLocksAtDexPc(m, dex_pc, &monitor_enter_dex_pcs);
   1258   for (uint32_t monitor_dex_pc : monitor_enter_dex_pcs) {
   1259     // The verifier works in terms of the dex pcs of the monitor-enter instructions.
   1260     // We want the registers used by those instructions (so we can read the values out of them).
   1261     const Instruction* monitor_enter_instruction =
   1262         Instruction::At(&code_item->insns_[monitor_dex_pc]);
   1263 
   1264     // Quick sanity check.
   1265     CHECK_EQ(monitor_enter_instruction->Opcode(), Instruction::MONITOR_ENTER)
   1266       << "expected monitor-enter @" << monitor_dex_pc << "; was "
   1267       << reinterpret_cast<const void*>(monitor_enter_instruction);
   1268 
   1269     uint16_t monitor_register = monitor_enter_instruction->VRegA();
   1270     uint32_t value;
   1271     bool success = stack_visitor->GetVReg(m, monitor_register, kReferenceVReg, &value);
   1272     CHECK(success) << "Failed to read v" << monitor_register << " of kind "
   1273                    << kReferenceVReg << " in method " << m->PrettyMethod();
   1274     mirror::Object* o = reinterpret_cast<mirror::Object*>(value);
   1275     callback(o, callback_context);
   1276   }
   1277 }
   1278 
   1279 bool Monitor::IsValidLockWord(LockWord lock_word) {
   1280   switch (lock_word.GetState()) {
   1281     case LockWord::kUnlocked:
   1282       // Nothing to check.
   1283       return true;
   1284     case LockWord::kThinLocked:
   1285       // Basic sanity check of owner.
   1286       return lock_word.ThinLockOwner() != ThreadList::kInvalidThreadId;
   1287     case LockWord::kFatLocked: {
   1288       // Check the  monitor appears in the monitor list.
   1289       Monitor* mon = lock_word.FatLockMonitor();
   1290       MonitorList* list = Runtime::Current()->GetMonitorList();
   1291       MutexLock mu(Thread::Current(), list->monitor_list_lock_);
   1292       for (Monitor* list_mon : list->list_) {
   1293         if (mon == list_mon) {
   1294           return true;  // Found our monitor.
   1295         }
   1296       }
   1297       return false;  // Fail - unowned monitor in an object.
   1298     }
   1299     case LockWord::kHashCode:
   1300       return true;
   1301     default:
   1302       LOG(FATAL) << "Unreachable";
   1303       UNREACHABLE();
   1304   }
   1305 }
   1306 
   1307 bool Monitor::IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) {
   1308   MutexLock mu(Thread::Current(), monitor_lock_);
   1309   return owner_ != nullptr;
   1310 }
   1311 
   1312 void Monitor::TranslateLocation(ArtMethod* method,
   1313                                 uint32_t dex_pc,
   1314                                 const char** source_file,
   1315                                 int32_t* line_number) {
   1316   // If method is null, location is unknown
   1317   if (method == nullptr) {
   1318     *source_file = "";
   1319     *line_number = 0;
   1320     return;
   1321   }
   1322   *source_file = method->GetDeclaringClassSourceFile();
   1323   if (*source_file == nullptr) {
   1324     *source_file = "";
   1325   }
   1326   *line_number = method->GetLineNumFromDexPC(dex_pc);
   1327 }
   1328 
   1329 uint32_t Monitor::GetOwnerThreadId() {
   1330   MutexLock mu(Thread::Current(), monitor_lock_);
   1331   Thread* owner = owner_;
   1332   if (owner != nullptr) {
   1333     return owner->GetThreadId();
   1334   } else {
   1335     return ThreadList::kInvalidThreadId;
   1336   }
   1337 }
   1338 
   1339 MonitorList::MonitorList()
   1340     : allow_new_monitors_(true), monitor_list_lock_("MonitorList lock", kMonitorListLock),
   1341       monitor_add_condition_("MonitorList disallow condition", monitor_list_lock_) {
   1342 }
   1343 
   1344 MonitorList::~MonitorList() {
   1345   Thread* self = Thread::Current();
   1346   MutexLock mu(self, monitor_list_lock_);
   1347   // Release all monitors to the pool.
   1348   // TODO: Is it an invariant that *all* open monitors are in the list? Then we could
   1349   // clear faster in the pool.
   1350   MonitorPool::ReleaseMonitors(self, &list_);
   1351 }
   1352 
   1353 void MonitorList::DisallowNewMonitors() {
   1354   CHECK(!kUseReadBarrier);
   1355   MutexLock mu(Thread::Current(), monitor_list_lock_);
   1356   allow_new_monitors_ = false;
   1357 }
   1358 
   1359 void MonitorList::AllowNewMonitors() {
   1360   CHECK(!kUseReadBarrier);
   1361   Thread* self = Thread::Current();
   1362   MutexLock mu(self, monitor_list_lock_);
   1363   allow_new_monitors_ = true;
   1364   monitor_add_condition_.Broadcast(self);
   1365 }
   1366 
   1367 void MonitorList::BroadcastForNewMonitors() {
   1368   Thread* self = Thread::Current();
   1369   MutexLock mu(self, monitor_list_lock_);
   1370   monitor_add_condition_.Broadcast(self);
   1371 }
   1372 
   1373 void MonitorList::Add(Monitor* m) {
   1374   Thread* self = Thread::Current();
   1375   MutexLock mu(self, monitor_list_lock_);
   1376   // CMS needs this to block for concurrent reference processing because an object allocated during
   1377   // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
   1378   // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
   1379   while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
   1380     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
   1381     // presence of threads blocking for weak ref access.
   1382     self->CheckEmptyCheckpointFromWeakRefAccess(&monitor_list_lock_);
   1383     monitor_add_condition_.WaitHoldingLocks(self);
   1384   }
   1385   list_.push_front(m);
   1386 }
   1387 
   1388 void MonitorList::SweepMonitorList(IsMarkedVisitor* visitor) {
   1389   Thread* self = Thread::Current();
   1390   MutexLock mu(self, monitor_list_lock_);
   1391   for (auto it = list_.begin(); it != list_.end(); ) {
   1392     Monitor* m = *it;
   1393     // Disable the read barrier in GetObject() as this is called by GC.
   1394     mirror::Object* obj = m->GetObject<kWithoutReadBarrier>();
   1395     // The object of a monitor can be null if we have deflated it.
   1396     mirror::Object* new_obj = obj != nullptr ? visitor->IsMarked(obj) : nullptr;
   1397     if (new_obj == nullptr) {
   1398       VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
   1399                     << obj;
   1400       MonitorPool::ReleaseMonitor(self, m);
   1401       it = list_.erase(it);
   1402     } else {
   1403       m->SetObject(new_obj);
   1404       ++it;
   1405     }
   1406   }
   1407 }
   1408 
   1409 size_t MonitorList::Size() {
   1410   Thread* self = Thread::Current();
   1411   MutexLock mu(self, monitor_list_lock_);
   1412   return list_.size();
   1413 }
   1414 
   1415 class MonitorDeflateVisitor : public IsMarkedVisitor {
   1416  public:
   1417   MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
   1418 
   1419   virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
   1420       REQUIRES_SHARED(Locks::mutator_lock_) {
   1421     if (Monitor::Deflate(self_, object)) {
   1422       DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
   1423       ++deflate_count_;
   1424       // If we deflated, return null so that the monitor gets removed from the array.
   1425       return nullptr;
   1426     }
   1427     return object;  // Monitor was not deflated.
   1428   }
   1429 
   1430   Thread* const self_;
   1431   size_t deflate_count_;
   1432 };
   1433 
   1434 size_t MonitorList::DeflateMonitors() {
   1435   MonitorDeflateVisitor visitor;
   1436   Locks::mutator_lock_->AssertExclusiveHeld(visitor.self_);
   1437   SweepMonitorList(&visitor);
   1438   return visitor.deflate_count_;
   1439 }
   1440 
   1441 MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(nullptr), entry_count_(0) {
   1442   DCHECK(obj != nullptr);
   1443   LockWord lock_word = obj->GetLockWord(true);
   1444   switch (lock_word.GetState()) {
   1445     case LockWord::kUnlocked:
   1446       // Fall-through.
   1447     case LockWord::kForwardingAddress:
   1448       // Fall-through.
   1449     case LockWord::kHashCode:
   1450       break;
   1451     case LockWord::kThinLocked:
   1452       owner_ = Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
   1453       entry_count_ = 1 + lock_word.ThinLockCount();
   1454       // Thin locks have no waiters.
   1455       break;
   1456     case LockWord::kFatLocked: {
   1457       Monitor* mon = lock_word.FatLockMonitor();
   1458       owner_ = mon->owner_;
   1459       entry_count_ = 1 + mon->lock_count_;
   1460       for (Thread* waiter = mon->wait_set_; waiter != nullptr; waiter = waiter->GetWaitNext()) {
   1461         waiters_.push_back(waiter);
   1462       }
   1463       break;
   1464     }
   1465   }
   1466 }
   1467 
   1468 }  // namespace art
   1469