Home | History | Annotate | Download | only in openjdkjvmti
      1 /* Copyright (C) 2016 The Android Open Source Project
      2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      3  *
      4  * This file implements interfaces from the file jvmti.h. This implementation
      5  * is licensed under the same terms as the file jvmti.h.  The
      6  * copyright and license information for the file jvmti.h follows.
      7  *
      8  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
      9  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     10  *
     11  * This code is free software; you can redistribute it and/or modify it
     12  * under the terms of the GNU General Public License version 2 only, as
     13  * published by the Free Software Foundation.  Oracle designates this
     14  * particular file as subject to the "Classpath" exception as provided
     15  * by Oracle in the LICENSE file that accompanied this code.
     16  *
     17  * This code is distributed in the hope that it will be useful, but WITHOUT
     18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
     19  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     20  * version 2 for more details (a copy is included in the LICENSE file that
     21  * accompanied this code).
     22  *
     23  * You should have received a copy of the GNU General Public License version
     24  * 2 along with this work; if not, write to the Free Software Foundation,
     25  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
     26  *
     27  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
     28  * or visit www.oracle.com if you need additional information or have any
     29  * questions.
     30  */
     31 
     32 #include "ti_stack.h"
     33 
     34 #include <algorithm>
     35 #include <list>
     36 #include <unordered_map>
     37 #include <vector>
     38 
     39 #include "art_field-inl.h"
     40 #include "art_method-inl.h"
     41 #include "art_jvmti.h"
     42 #include "art_method-inl.h"
     43 #include "barrier.h"
     44 #include "base/bit_utils.h"
     45 #include "base/enums.h"
     46 #include "base/mutex.h"
     47 #include "dex/code_item_accessors-inl.h"
     48 #include "dex/dex_file.h"
     49 #include "dex/dex_file_annotations.h"
     50 #include "dex/dex_file_types.h"
     51 #include "gc_root.h"
     52 #include "handle_scope-inl.h"
     53 #include "jni_env_ext.h"
     54 #include "jni_internal.h"
     55 #include "mirror/class.h"
     56 #include "mirror/dex_cache.h"
     57 #include "nativehelper/scoped_local_ref.h"
     58 #include "scoped_thread_state_change-inl.h"
     59 #include "stack.h"
     60 #include "ti_thread.h"
     61 #include "thread-current-inl.h"
     62 #include "thread_list.h"
     63 #include "thread_pool.h"
     64 #include "ti_thread.h"
     65 #include "well_known_classes.h"
     66 
     67 namespace openjdkjvmti {
     68 
     69 template <typename FrameFn>
     70 struct GetStackTraceVisitor : public art::StackVisitor {
     71   GetStackTraceVisitor(art::Thread* thread_in,
     72                        size_t start_,
     73                        size_t stop_,
     74                        FrameFn fn_)
     75       : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
     76         fn(fn_),
     77         start(start_),
     78         stop(stop_) {}
     79   GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
     80   GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
     81 
     82   bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
     83     art::ArtMethod* m = GetMethod();
     84     if (m->IsRuntimeMethod()) {
     85       return true;
     86     }
     87 
     88     if (start == 0) {
     89       m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
     90       jmethodID id = art::jni::EncodeArtMethod(m);
     91 
     92       uint32_t dex_pc = GetDexPc(false);
     93       jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
     94 
     95       jvmtiFrameInfo info = { id, dex_location };
     96       fn(info);
     97 
     98       if (stop == 1) {
     99         return false;  // We're done.
    100       } else if (stop > 0) {
    101         stop--;
    102       }
    103     } else {
    104       start--;
    105     }
    106 
    107     return true;
    108   }
    109 
    110   FrameFn fn;
    111   size_t start;
    112   size_t stop;
    113 };
    114 
    115 template <typename FrameFn>
    116 GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
    117                                                     size_t start,
    118                                                     size_t stop,
    119                                                     FrameFn fn) {
    120   return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
    121 }
    122 
    123 struct GetStackTraceVectorClosure : public art::Closure {
    124  public:
    125   GetStackTraceVectorClosure(size_t start, size_t stop)
    126       : start_input(start),
    127         stop_input(stop),
    128         start_result(0),
    129         stop_result(0) {}
    130 
    131   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
    132     auto frames_fn = [&](jvmtiFrameInfo info) {
    133       frames.push_back(info);
    134     };
    135     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
    136     visitor.WalkStack(/* include_transitions */ false);
    137 
    138     start_result = visitor.start;
    139     stop_result = visitor.stop;
    140   }
    141 
    142   const size_t start_input;
    143   const size_t stop_input;
    144 
    145   std::vector<jvmtiFrameInfo> frames;
    146   size_t start_result;
    147   size_t stop_result;
    148 };
    149 
    150 static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
    151                                        jint start_depth,
    152                                        size_t start_result,
    153                                        jint max_frame_count,
    154                                        jvmtiFrameInfo* frame_buffer,
    155                                        jint* count_ptr) {
    156   size_t collected_frames = frames.size();
    157 
    158   // Assume we're here having collected something.
    159   DCHECK_GT(max_frame_count, 0);
    160 
    161   // Frames from the top.
    162   if (start_depth >= 0) {
    163     if (start_result != 0) {
    164       // Not enough frames.
    165       return ERR(ILLEGAL_ARGUMENT);
    166     }
    167     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
    168     if (frames.size() > 0) {
    169       memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
    170     }
    171     *count_ptr = static_cast<jint>(frames.size());
    172     return ERR(NONE);
    173   }
    174 
    175   // Frames from the bottom.
    176   if (collected_frames < static_cast<size_t>(-start_depth)) {
    177     return ERR(ILLEGAL_ARGUMENT);
    178   }
    179 
    180   size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
    181   memcpy(frame_buffer,
    182          &frames.data()[collected_frames + start_depth],
    183          count * sizeof(jvmtiFrameInfo));
    184   *count_ptr = static_cast<jint>(count);
    185   return ERR(NONE);
    186 }
    187 
    188 struct GetStackTraceDirectClosure : public art::Closure {
    189  public:
    190   GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
    191       : frame_buffer(frame_buffer_),
    192         start_input(start),
    193         stop_input(stop),
    194         index(0) {
    195     DCHECK_GE(start_input, 0u);
    196   }
    197 
    198   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
    199     auto frames_fn = [&](jvmtiFrameInfo info) {
    200       frame_buffer[index] = info;
    201       ++index;
    202     };
    203     auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
    204     visitor.WalkStack(/* include_transitions */ false);
    205   }
    206 
    207   jvmtiFrameInfo* frame_buffer;
    208 
    209   const size_t start_input;
    210   const size_t stop_input;
    211 
    212   size_t index = 0;
    213 };
    214 
    215 jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
    216                                     jthread java_thread,
    217                                     jint start_depth,
    218                                     jint max_frame_count,
    219                                     jvmtiFrameInfo* frame_buffer,
    220                                     jint* count_ptr) {
    221   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
    222   // that the thread isn't dying on us.
    223   art::ScopedObjectAccess soa(art::Thread::Current());
    224   art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
    225 
    226   art::Thread* thread;
    227   jvmtiError thread_error = ERR(INTERNAL);
    228   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
    229     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    230     return thread_error;
    231   }
    232   DCHECK(thread != nullptr);
    233 
    234   art::ThreadState state = thread->GetState();
    235   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
    236     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    237     return ERR(THREAD_NOT_ALIVE);
    238   }
    239 
    240   if (max_frame_count < 0) {
    241     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    242     return ERR(ILLEGAL_ARGUMENT);
    243   }
    244   if (frame_buffer == nullptr || count_ptr == nullptr) {
    245     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    246     return ERR(NULL_POINTER);
    247   }
    248 
    249   if (max_frame_count == 0) {
    250     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    251     *count_ptr = 0;
    252     return ERR(NONE);
    253   }
    254 
    255   if (start_depth >= 0) {
    256     // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
    257     GetStackTraceDirectClosure closure(frame_buffer,
    258                                        static_cast<size_t>(start_depth),
    259                                        static_cast<size_t>(max_frame_count));
    260     // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
    261     if (!thread->RequestSynchronousCheckpoint(&closure)) {
    262       return ERR(THREAD_NOT_ALIVE);
    263     }
    264     *count_ptr = static_cast<jint>(closure.index);
    265     if (closure.index < static_cast<size_t>(start_depth)) {
    266       return ERR(ILLEGAL_ARGUMENT);
    267     }
    268     return ERR(NONE);
    269   } else {
    270     GetStackTraceVectorClosure closure(0, 0);
    271     // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
    272     if (!thread->RequestSynchronousCheckpoint(&closure)) {
    273       return ERR(THREAD_NOT_ALIVE);
    274     }
    275 
    276     return TranslateFrameVector(closure.frames,
    277                                 start_depth,
    278                                 closure.start_result,
    279                                 max_frame_count,
    280                                 frame_buffer,
    281                                 count_ptr);
    282   }
    283 }
    284 
    285 template <typename Data>
    286 struct GetAllStackTracesVectorClosure : public art::Closure {
    287   GetAllStackTracesVectorClosure(size_t stop, Data* data_)
    288       : barrier(0), stop_input(stop), data(data_) {}
    289 
    290   void Run(art::Thread* thread) OVERRIDE
    291       REQUIRES_SHARED(art::Locks::mutator_lock_)
    292       REQUIRES(!data->mutex) {
    293     art::Thread* self = art::Thread::Current();
    294     Work(thread, self);
    295     barrier.Pass(self);
    296   }
    297 
    298   void Work(art::Thread* thread, art::Thread* self)
    299       REQUIRES_SHARED(art::Locks::mutator_lock_)
    300       REQUIRES(!data->mutex) {
    301     // Skip threads that are still starting.
    302     if (thread->IsStillStarting()) {
    303       return;
    304     }
    305 
    306     std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
    307     if (thread_frames == nullptr) {
    308       return;
    309     }
    310 
    311     // Now collect the data.
    312     auto frames_fn = [&](jvmtiFrameInfo info) {
    313       thread_frames->push_back(info);
    314     };
    315     auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
    316     visitor.WalkStack(/* include_transitions */ false);
    317   }
    318 
    319   art::Barrier barrier;
    320   const size_t stop_input;
    321   Data* data;
    322 };
    323 
    324 template <typename Data>
    325 static void RunCheckpointAndWait(Data* data, size_t max_frame_count) {
    326   GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
    327   size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
    328   if (barrier_count == 0) {
    329     return;
    330   }
    331   art::Thread* self = art::Thread::Current();
    332   art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
    333   closure.barrier.Increment(self, barrier_count);
    334 }
    335 
    336 jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
    337                                         jint max_frame_count,
    338                                         jvmtiStackInfo** stack_info_ptr,
    339                                         jint* thread_count_ptr) {
    340   if (max_frame_count < 0) {
    341     return ERR(ILLEGAL_ARGUMENT);
    342   }
    343   if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
    344     return ERR(NULL_POINTER);
    345   }
    346 
    347   struct AllStackTracesData {
    348     AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
    349     ~AllStackTracesData() {
    350       JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
    351       for (jthread global_thread_ref : thread_peers) {
    352         jni_env->DeleteGlobalRef(global_thread_ref);
    353       }
    354     }
    355 
    356     std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
    357         REQUIRES_SHARED(art::Locks::mutator_lock_)
    358         REQUIRES(!mutex) {
    359       art::MutexLock mu(self, mutex);
    360 
    361       threads.push_back(thread);
    362 
    363       jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
    364           self, thread->GetPeerFromOtherThread());
    365       thread_peers.push_back(peer);
    366 
    367       frames.emplace_back(new std::vector<jvmtiFrameInfo>());
    368       return frames.back().get();
    369     }
    370 
    371     art::Mutex mutex;
    372 
    373     // Storage. Only access directly after completion.
    374 
    375     std::vector<art::Thread*> threads;
    376     // "thread_peers" contains global references to their peers.
    377     std::vector<jthread> thread_peers;
    378 
    379     std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
    380   };
    381 
    382   AllStackTracesData data;
    383   RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
    384 
    385   art::Thread* current = art::Thread::Current();
    386 
    387   // Convert the data into our output format.
    388 
    389   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
    390   //       allocate one big chunk for this and the actual frames, which means we need
    391   //       to either be conservative or rearrange things later (the latter is implemented).
    392   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
    393   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
    394   frame_infos.reserve(data.frames.size());
    395 
    396   // Now run through and add data for each thread.
    397   size_t sum_frames = 0;
    398   for (size_t index = 0; index < data.frames.size(); ++index) {
    399     jvmtiStackInfo& stack_info = stack_info_array.get()[index];
    400     memset(&stack_info, 0, sizeof(jvmtiStackInfo));
    401 
    402     const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
    403 
    404     // For the time being, set the thread to null. We'll fix it up in the second stage.
    405     stack_info.thread = nullptr;
    406     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
    407 
    408     size_t collected_frames = thread_frames.size();
    409     if (max_frame_count == 0 || collected_frames == 0) {
    410       stack_info.frame_count = 0;
    411       stack_info.frame_buffer = nullptr;
    412       continue;
    413     }
    414     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
    415 
    416     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
    417     frame_infos.emplace_back(frame_info);
    418 
    419     jint count;
    420     jvmtiError translate_result = TranslateFrameVector(thread_frames,
    421                                                        0,
    422                                                        0,
    423                                                        static_cast<jint>(collected_frames),
    424                                                        frame_info,
    425                                                        &count);
    426     DCHECK(translate_result == JVMTI_ERROR_NONE);
    427     stack_info.frame_count = static_cast<jint>(collected_frames);
    428     stack_info.frame_buffer = frame_info;
    429     sum_frames += static_cast<size_t>(count);
    430   }
    431 
    432   // No errors, yet. Now put it all into an output buffer.
    433   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
    434                                                 alignof(jvmtiFrameInfo));
    435   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
    436   unsigned char* chunk_data;
    437   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
    438   if (alloc_result != ERR(NONE)) {
    439     return alloc_result;
    440   }
    441 
    442   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
    443   // First copy in all the basic data.
    444   memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
    445 
    446   // Now copy the frames and fix up the pointers.
    447   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
    448       chunk_data + rounded_stack_info_size);
    449   for (size_t i = 0; i < data.frames.size(); ++i) {
    450     jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
    451     jvmtiStackInfo& new_stack_info = stack_info[i];
    452 
    453     // Translate the global ref into a local ref.
    454     new_stack_info.thread =
    455         static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
    456 
    457     if (old_stack_info.frame_count > 0) {
    458       // Only copy when there's data - leave the nullptr alone.
    459       size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
    460       memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
    461       new_stack_info.frame_buffer = frame_info;
    462       frame_info += old_stack_info.frame_count;
    463     }
    464   }
    465 
    466   *stack_info_ptr = stack_info;
    467   *thread_count_ptr = static_cast<jint>(data.frames.size());
    468 
    469   return ERR(NONE);
    470 }
    471 
    472 jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
    473                                                jint thread_count,
    474                                                const jthread* thread_list,
    475                                                jint max_frame_count,
    476                                                jvmtiStackInfo** stack_info_ptr) {
    477   if (max_frame_count < 0) {
    478     return ERR(ILLEGAL_ARGUMENT);
    479   }
    480   if (thread_count < 0) {
    481     return ERR(ILLEGAL_ARGUMENT);
    482   }
    483   if (thread_count == 0) {
    484     *stack_info_ptr = nullptr;
    485     return ERR(NONE);
    486   }
    487   if (thread_list == nullptr || stack_info_ptr == nullptr) {
    488     return ERR(NULL_POINTER);
    489   }
    490 
    491   art::Thread* current = art::Thread::Current();
    492   art::ScopedObjectAccess soa(current);      // Now we know we have the shared lock.
    493 
    494   struct SelectStackTracesData {
    495     SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
    496 
    497     std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
    498               REQUIRES_SHARED(art::Locks::mutator_lock_)
    499               REQUIRES(!mutex) {
    500       art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
    501       for (size_t index = 0; index != handles.size(); ++index) {
    502         if (peer == handles[index].Get()) {
    503           // Found the thread.
    504           art::MutexLock mu(self, mutex);
    505 
    506           threads.push_back(thread);
    507           thread_list_indices.push_back(index);
    508 
    509           frames.emplace_back(new std::vector<jvmtiFrameInfo>());
    510           return frames.back().get();
    511         }
    512       }
    513       return nullptr;
    514     }
    515 
    516     art::Mutex mutex;
    517 
    518     // Selection data.
    519 
    520     std::vector<art::Handle<art::mirror::Object>> handles;
    521 
    522     // Storage. Only access directly after completion.
    523 
    524     std::vector<art::Thread*> threads;
    525     std::vector<size_t> thread_list_indices;
    526 
    527     std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
    528   };
    529 
    530   SelectStackTracesData data;
    531 
    532   // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
    533   art::VariableSizedHandleScope hs(current);
    534   for (jint i = 0; i != thread_count; ++i) {
    535     if (thread_list[i] == nullptr) {
    536       return ERR(INVALID_THREAD);
    537     }
    538     if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
    539       return ERR(INVALID_THREAD);
    540     }
    541     data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
    542   }
    543 
    544   RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
    545 
    546   // Convert the data into our output format.
    547 
    548   // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
    549   //       allocate one big chunk for this and the actual frames, which means we need
    550   //       to either be conservative or rearrange things later (the latter is implemented).
    551   std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
    552   std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
    553   frame_infos.reserve(data.frames.size());
    554 
    555   // Now run through and add data for each thread.
    556   size_t sum_frames = 0;
    557   for (size_t index = 0; index < data.frames.size(); ++index) {
    558     jvmtiStackInfo& stack_info = stack_info_array.get()[index];
    559     memset(&stack_info, 0, sizeof(jvmtiStackInfo));
    560 
    561     art::Thread* self = data.threads[index];
    562     const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
    563 
    564     // For the time being, set the thread to null. We don't have good ScopedLocalRef
    565     // infrastructure.
    566     DCHECK(self->GetPeerFromOtherThread() != nullptr);
    567     stack_info.thread = nullptr;
    568     stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
    569 
    570     size_t collected_frames = thread_frames.size();
    571     if (max_frame_count == 0 || collected_frames == 0) {
    572       stack_info.frame_count = 0;
    573       stack_info.frame_buffer = nullptr;
    574       continue;
    575     }
    576     DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
    577 
    578     jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
    579     frame_infos.emplace_back(frame_info);
    580 
    581     jint count;
    582     jvmtiError translate_result = TranslateFrameVector(thread_frames,
    583                                                        0,
    584                                                        0,
    585                                                        static_cast<jint>(collected_frames),
    586                                                        frame_info,
    587                                                        &count);
    588     DCHECK(translate_result == JVMTI_ERROR_NONE);
    589     stack_info.frame_count = static_cast<jint>(collected_frames);
    590     stack_info.frame_buffer = frame_info;
    591     sum_frames += static_cast<size_t>(count);
    592   }
    593 
    594   // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
    595   // potentially.
    596   size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
    597                                                 alignof(jvmtiFrameInfo));
    598   size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
    599   unsigned char* chunk_data;
    600   jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
    601   if (alloc_result != ERR(NONE)) {
    602     return alloc_result;
    603   }
    604 
    605   jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
    606   jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
    607       chunk_data + rounded_stack_info_size);
    608 
    609   for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
    610     // Check whether we found a running thread for this.
    611     // Note: For simplicity, and with the expectation that the list is usually small, use a simple
    612     //       search. (The list is *not* sorted!)
    613     auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
    614     if (it == data.thread_list_indices.end()) {
    615       // No native thread. Must be new or dead. We need to fill out the stack info now.
    616       // (Need to read the Java "started" field to know whether this is starting or terminated.)
    617       art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
    618       art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
    619       art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
    620       CHECK(started_field != nullptr);
    621       bool started = started_field->GetBoolean(peer) != 0;
    622       constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
    623       constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
    624           JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
    625       stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
    626       stack_info[i].state = started ? kTerminatedState : kStartedState;
    627       stack_info[i].frame_count = 0;
    628       stack_info[i].frame_buffer = nullptr;
    629     } else {
    630       // Had a native thread and frames.
    631       size_t f_index = it - data.thread_list_indices.begin();
    632 
    633       jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
    634       jvmtiStackInfo& new_stack_info = stack_info[i];
    635 
    636       memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
    637       new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
    638       if (old_stack_info.frame_count > 0) {
    639         // Only copy when there's data - leave the nullptr alone.
    640         size_t frames_size =
    641             static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
    642         memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
    643         new_stack_info.frame_buffer = frame_info;
    644         frame_info += old_stack_info.frame_count;
    645       }
    646     }
    647   }
    648 
    649   *stack_info_ptr = stack_info;
    650 
    651   return ERR(NONE);
    652 }
    653 
    654 // Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
    655 // runtime methods and transitions must not be counted.
    656 struct GetFrameCountVisitor : public art::StackVisitor {
    657   explicit GetFrameCountVisitor(art::Thread* thread)
    658       : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
    659         count(0) {}
    660 
    661   bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
    662     art::ArtMethod* m = GetMethod();
    663     const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
    664     if (do_count) {
    665       count++;
    666     }
    667     return true;
    668   }
    669 
    670   size_t count;
    671 };
    672 
    673 struct GetFrameCountClosure : public art::Closure {
    674  public:
    675   GetFrameCountClosure() : count(0) {}
    676 
    677   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
    678     GetFrameCountVisitor visitor(self);
    679     visitor.WalkStack(false);
    680 
    681     count = visitor.count;
    682   }
    683 
    684   size_t count;
    685 };
    686 
    687 jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
    688                                     jthread java_thread,
    689                                     jint* count_ptr) {
    690   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
    691   // that the thread isn't dying on us.
    692   art::ScopedObjectAccess soa(art::Thread::Current());
    693   art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
    694 
    695   art::Thread* thread;
    696   jvmtiError thread_error = ERR(INTERNAL);
    697   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
    698     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    699     return thread_error;
    700   }
    701 
    702   DCHECK(thread != nullptr);
    703   art::ThreadState state = thread->GetState();
    704   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
    705     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    706     return ERR(THREAD_NOT_ALIVE);
    707   }
    708 
    709   if (count_ptr == nullptr) {
    710     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    711     return ERR(NULL_POINTER);
    712   }
    713 
    714   GetFrameCountClosure closure;
    715   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
    716   if (!thread->RequestSynchronousCheckpoint(&closure)) {
    717     return ERR(THREAD_NOT_ALIVE);
    718   }
    719 
    720   *count_ptr = closure.count;
    721   return ERR(NONE);
    722 }
    723 
    724 // Walks up the stack 'n' callers, when used with Thread::WalkStack.
    725 struct GetLocationVisitor : public art::StackVisitor {
    726   GetLocationVisitor(art::Thread* thread, size_t n_in)
    727       : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
    728         n(n_in),
    729         count(0),
    730         caller(nullptr),
    731         caller_dex_pc(0) {}
    732 
    733   bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
    734     art::ArtMethod* m = GetMethod();
    735     const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
    736     if (do_count) {
    737       DCHECK(caller == nullptr);
    738       if (count == n) {
    739         caller = m;
    740         caller_dex_pc = GetDexPc(false);
    741         return false;
    742       }
    743       count++;
    744     }
    745     return true;
    746   }
    747 
    748   const size_t n;
    749   size_t count;
    750   art::ArtMethod* caller;
    751   uint32_t caller_dex_pc;
    752 };
    753 
    754 struct GetLocationClosure : public art::Closure {
    755  public:
    756   explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
    757 
    758   void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
    759     GetLocationVisitor visitor(self, n);
    760     visitor.WalkStack(false);
    761 
    762     method = visitor.caller;
    763     dex_pc = visitor.caller_dex_pc;
    764   }
    765 
    766   const size_t n;
    767   art::ArtMethod* method;
    768   uint32_t dex_pc;
    769 };
    770 
    771 jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
    772                                        jthread java_thread,
    773                                        jint depth,
    774                                        jmethodID* method_ptr,
    775                                        jlocation* location_ptr) {
    776   // It is not great that we have to hold these locks for so long, but it is necessary to ensure
    777   // that the thread isn't dying on us.
    778   art::ScopedObjectAccess soa(art::Thread::Current());
    779   art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
    780 
    781   art::Thread* thread;
    782   jvmtiError thread_error = ERR(INTERNAL);
    783   if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
    784     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    785     return thread_error;
    786   }
    787   DCHECK(thread != nullptr);
    788 
    789   art::ThreadState state = thread->GetState();
    790   if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
    791     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    792     return ERR(THREAD_NOT_ALIVE);
    793   }
    794 
    795   if (depth < 0) {
    796     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    797     return ERR(ILLEGAL_ARGUMENT);
    798   }
    799   if (method_ptr == nullptr || location_ptr == nullptr) {
    800     art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
    801     return ERR(NULL_POINTER);
    802   }
    803 
    804   GetLocationClosure closure(static_cast<size_t>(depth));
    805   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
    806   if (!thread->RequestSynchronousCheckpoint(&closure)) {
    807     return ERR(THREAD_NOT_ALIVE);
    808   }
    809 
    810   if (closure.method == nullptr) {
    811     return ERR(NO_MORE_FRAMES);
    812   }
    813 
    814   *method_ptr = art::jni::EncodeArtMethod(closure.method);
    815   if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
    816     *location_ptr = -1;
    817   } else {
    818     if (closure.dex_pc == art::dex::kDexNoIndex) {
    819       return ERR(INTERNAL);
    820     }
    821     *location_ptr = static_cast<jlocation>(closure.dex_pc);
    822   }
    823 
    824   return ERR(NONE);
    825 }
    826 
    827 struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
    828   // We need a context because VisitLocks needs it retrieve the monitor objects.
    829   explicit MonitorVisitor(art::Thread* thread)
    830       REQUIRES_SHARED(art::Locks::mutator_lock_)
    831       : art::StackVisitor(thread,
    832                           art::Context::Create(),
    833                           art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
    834         hs(art::Thread::Current()),
    835         current_stack_depth(0) {}
    836 
    837   ~MonitorVisitor() {
    838     delete context_;
    839   }
    840 
    841   bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
    842     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
    843     if (!GetMethod()->IsRuntimeMethod()) {
    844       art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
    845       ++current_stack_depth;
    846     }
    847     return true;
    848   }
    849 
    850   static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg)
    851       REQUIRES_SHARED(art::Locks::mutator_lock_) {
    852     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
    853     MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
    854     art::ObjPtr<art::mirror::Object> mon(owned_monitor);
    855     // Filter out duplicates.
    856     for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
    857       if (monitor.Get() == mon.Ptr()) {
    858         return;
    859       }
    860     }
    861     visitor->monitors.push_back(visitor->hs.NewHandle(mon));
    862     visitor->stack_depths.push_back(visitor->current_stack_depth);
    863   }
    864 
    865   void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
    866       OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
    867     for (const art::Handle<art::mirror::Object>& m : monitors) {
    868       if (m.Get() == obj) {
    869         return;
    870       }
    871     }
    872     monitors.push_back(hs.NewHandle(obj));
    873     stack_depths.push_back(-1);
    874   }
    875 
    876   art::VariableSizedHandleScope hs;
    877   jint current_stack_depth;
    878   std::vector<art::Handle<art::mirror::Object>> monitors;
    879   std::vector<jint> stack_depths;
    880 };
    881 
    882 template<typename Fn>
    883 struct MonitorInfoClosure : public art::Closure {
    884  public:
    885   explicit MonitorInfoClosure(Fn handle_results)
    886       : err_(OK), handle_results_(handle_results) {}
    887 
    888   void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
    889     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
    890     // Find the monitors on the stack.
    891     MonitorVisitor visitor(target);
    892     visitor.WalkStack(/* include_transitions */ false);
    893     // Find any other monitors, including ones acquired in native code.
    894     art::RootInfo root_info(art::kRootVMInternal);
    895     target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
    896     err_ = handle_results_(visitor);
    897   }
    898 
    899   jvmtiError GetError() {
    900     return err_;
    901   }
    902 
    903  private:
    904   jvmtiError err_;
    905   Fn handle_results_;
    906 };
    907 
    908 
    909 template <typename Fn>
    910 static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
    911                                             jthread thread,
    912                                             Fn handle_results)
    913     REQUIRES_SHARED(art::Locks::mutator_lock_) {
    914   art::Thread* self = art::Thread::Current();
    915   MonitorInfoClosure<Fn> closure(handle_results);
    916   bool called_method = false;
    917   {
    918     art::Locks::thread_list_lock_->ExclusiveLock(self);
    919     art::Thread* target = nullptr;
    920     jvmtiError err = ERR(INTERNAL);
    921     if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
    922       art::Locks::thread_list_lock_->ExclusiveUnlock(self);
    923       return err;
    924     }
    925     if (target != self) {
    926       called_method = true;
    927       // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
    928       // Since this deals with object references we need to avoid going to sleep.
    929       art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
    930       if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
    931         return ERR(THREAD_NOT_ALIVE);
    932       }
    933     } else {
    934       art::Locks::thread_list_lock_->ExclusiveUnlock(self);
    935     }
    936   }
    937   // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
    938   // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
    939   // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
    940   // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
    941   if (!called_method) {
    942     closure.Run(self);
    943   }
    944   return closure.GetError();
    945 }
    946 
    947 jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
    948                                                     jthread thread,
    949                                                     jint* info_cnt,
    950                                                     jvmtiMonitorStackDepthInfo** info_ptr) {
    951   if (info_cnt == nullptr || info_ptr == nullptr) {
    952     return ERR(NULL_POINTER);
    953   }
    954   art::ScopedObjectAccess soa(art::Thread::Current());
    955   std::vector<art::GcRoot<art::mirror::Object>> mons;
    956   std::vector<uint32_t> depths;
    957   auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
    958     for (size_t i = 0; i < visitor.monitors.size(); i++) {
    959       mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
    960       depths.push_back(visitor.stack_depths[i]);
    961     }
    962     return OK;
    963   };
    964   jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
    965   if (err != OK) {
    966     return err;
    967   }
    968   auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
    969   err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
    970   if (err != OK) {
    971     return err;
    972   }
    973   *info_cnt = mons.size();
    974   for (uint32_t i = 0; i < mons.size(); i++) {
    975     (*info_ptr)[i] = {
    976       soa.AddLocalReference<jobject>(mons[i].Read()),
    977       static_cast<jint>(depths[i])
    978     };
    979   }
    980   return err;
    981 }
    982 
    983 jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
    984                                           jthread thread,
    985                                           jint* owned_monitor_count_ptr,
    986                                           jobject** owned_monitors_ptr) {
    987   if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
    988     return ERR(NULL_POINTER);
    989   }
    990   art::ScopedObjectAccess soa(art::Thread::Current());
    991   std::vector<art::GcRoot<art::mirror::Object>> mons;
    992   auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
    993     for (size_t i = 0; i < visitor.monitors.size(); i++) {
    994       mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
    995     }
    996     return OK;
    997   };
    998   jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
    999   if (err != OK) {
   1000     return err;
   1001   }
   1002   auto nbytes = sizeof(jobject) * mons.size();
   1003   err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
   1004   if (err != OK) {
   1005     return err;
   1006   }
   1007   *owned_monitor_count_ptr = mons.size();
   1008   for (uint32_t i = 0; i < mons.size(); i++) {
   1009     (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
   1010   }
   1011   return err;
   1012 }
   1013 
   1014 jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
   1015   if (depth < 0) {
   1016     return ERR(ILLEGAL_ARGUMENT);
   1017   }
   1018   ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
   1019   art::Thread* self = art::Thread::Current();
   1020   art::Thread* target;
   1021   do {
   1022     ThreadUtil::SuspendCheck(self);
   1023     art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
   1024     // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
   1025     // user-code suspension. We retry and do another SuspendCheck to clear this.
   1026     if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
   1027       continue;
   1028     }
   1029     // From now on we know we cannot get suspended by user-code.
   1030     // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
   1031     // have the 'suspend_lock' locked here.
   1032     art::ScopedObjectAccess soa(self);
   1033     art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
   1034     jvmtiError err = ERR(INTERNAL);
   1035     if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
   1036       return err;
   1037     }
   1038     if (target != self) {
   1039       // TODO This is part of the spec but we could easily avoid needing to do it. We would just put
   1040       // all the logic into a sync-checkpoint.
   1041       art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
   1042       if (target->GetUserCodeSuspendCount() == 0) {
   1043         return ERR(THREAD_NOT_SUSPENDED);
   1044       }
   1045     }
   1046     // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
   1047     // done (unless it's 'self' in which case we don't care since we aren't going to be returning).
   1048     // TODO We could implement this using a synchronous checkpoint and not bother with any of the
   1049     // suspension stuff. The spec does specifically say to return THREAD_NOT_SUSPENDED though.
   1050     // Find the requested stack frame.
   1051     std::unique_ptr<art::Context> context(art::Context::Create());
   1052     FindFrameAtDepthVisitor visitor(target, context.get(), depth);
   1053     visitor.WalkStack();
   1054     if (!visitor.FoundFrame()) {
   1055       return ERR(NO_MORE_FRAMES);
   1056     }
   1057     art::ArtMethod* method = visitor.GetMethod();
   1058     if (method->IsNative()) {
   1059       return ERR(OPAQUE_FRAME);
   1060     }
   1061     // From here we are sure to succeed.
   1062     bool needs_instrument = false;
   1063     // Get/create a shadow frame
   1064     art::ShadowFrame* shadow_frame = visitor.GetCurrentShadowFrame();
   1065     if (shadow_frame == nullptr) {
   1066       needs_instrument = true;
   1067       const size_t frame_id = visitor.GetFrameId();
   1068       const uint16_t num_regs = method->DexInstructionData().RegistersSize();
   1069       shadow_frame = target->FindOrCreateDebuggerShadowFrame(frame_id,
   1070                                                              num_regs,
   1071                                                              method,
   1072                                                              visitor.GetDexPc());
   1073     }
   1074     {
   1075       art::WriterMutexLock lk(self, tienv->event_info_mutex_);
   1076       // Mark shadow frame as needs_notify_pop_
   1077       shadow_frame->SetNotifyPop(true);
   1078       tienv->notify_frames.insert(shadow_frame);
   1079     }
   1080     // Make sure can we will go to the interpreter and use the shadow frames.
   1081     if (needs_instrument) {
   1082       art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
   1083     }
   1084     return OK;
   1085   } while (true);
   1086 }
   1087 
   1088 }  // namespace openjdkjvmti
   1089