1 /* Copyright (C) 2016 The Android Open Source Project 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * 4 * This file implements interfaces from the file jvmti.h. This implementation 5 * is licensed under the same terms as the file jvmti.h. The 6 * copyright and license information for the file jvmti.h follows. 7 * 8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 10 * 11 * This code is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License version 2 only, as 13 * published by the Free Software Foundation. Oracle designates this 14 * particular file as subject to the "Classpath" exception as provided 15 * by Oracle in the LICENSE file that accompanied this code. 16 * 17 * This code is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 20 * version 2 for more details (a copy is included in the LICENSE file that 21 * accompanied this code). 22 * 23 * You should have received a copy of the GNU General Public License version 24 * 2 along with this work; if not, write to the Free Software Foundation, 25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 26 * 27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 28 * or visit www.oracle.com if you need additional information or have any 29 * questions. 30 */ 31 32 #include "ti_stack.h" 33 34 #include <algorithm> 35 #include <list> 36 #include <unordered_map> 37 #include <vector> 38 39 #include "art_field-inl.h" 40 #include "art_jvmti.h" 41 #include "art_method-inl.h" 42 #include "barrier.h" 43 #include "base/bit_utils.h" 44 #include "base/enums.h" 45 #include "base/mutex.h" 46 #include "dex_file.h" 47 #include "dex_file_annotations.h" 48 #include "handle_scope-inl.h" 49 #include "jni_env_ext.h" 50 #include "jni_internal.h" 51 #include "mirror/class.h" 52 #include "mirror/dex_cache.h" 53 #include "nativehelper/ScopedLocalRef.h" 54 #include "scoped_thread_state_change-inl.h" 55 #include "stack.h" 56 #include "thread-current-inl.h" 57 #include "thread_list.h" 58 #include "thread_pool.h" 59 #include "well_known_classes.h" 60 61 namespace openjdkjvmti { 62 63 template <typename FrameFn> 64 struct GetStackTraceVisitor : public art::StackVisitor { 65 GetStackTraceVisitor(art::Thread* thread_in, 66 size_t start_, 67 size_t stop_, 68 FrameFn fn_) 69 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 70 fn(fn_), 71 start(start_), 72 stop(stop_) {} 73 GetStackTraceVisitor(const GetStackTraceVisitor&) = default; 74 GetStackTraceVisitor(GetStackTraceVisitor&&) = default; 75 76 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) { 77 art::ArtMethod* m = GetMethod(); 78 if (m->IsRuntimeMethod()) { 79 return true; 80 } 81 82 if (start == 0) { 83 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize); 84 jmethodID id = art::jni::EncodeArtMethod(m); 85 86 uint32_t dex_pc = GetDexPc(false); 87 jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc); 88 89 jvmtiFrameInfo info = { id, dex_location }; 90 fn(info); 91 92 if (stop == 1) { 93 return false; // We're done. 94 } else if (stop > 0) { 95 stop--; 96 } 97 } else { 98 start--; 99 } 100 101 return true; 102 } 103 104 FrameFn fn; 105 size_t start; 106 size_t stop; 107 }; 108 109 template <typename FrameFn> 110 GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in, 111 size_t start, 112 size_t stop, 113 FrameFn fn) { 114 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn); 115 } 116 117 struct GetStackTraceVectorClosure : public art::Closure { 118 public: 119 GetStackTraceVectorClosure(size_t start, size_t stop) 120 : start_input(start), 121 stop_input(stop), 122 start_result(0), 123 stop_result(0) {} 124 125 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { 126 auto frames_fn = [&](jvmtiFrameInfo info) { 127 frames.push_back(info); 128 }; 129 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn); 130 visitor.WalkStack(/* include_transitions */ false); 131 132 start_result = visitor.start; 133 stop_result = visitor.stop; 134 } 135 136 const size_t start_input; 137 const size_t stop_input; 138 139 std::vector<jvmtiFrameInfo> frames; 140 size_t start_result; 141 size_t stop_result; 142 }; 143 144 static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames, 145 jint start_depth, 146 size_t start_result, 147 jint max_frame_count, 148 jvmtiFrameInfo* frame_buffer, 149 jint* count_ptr) { 150 size_t collected_frames = frames.size(); 151 152 // Assume we're here having collected something. 153 DCHECK_GT(max_frame_count, 0); 154 155 // Frames from the top. 156 if (start_depth >= 0) { 157 if (start_result != 0) { 158 // Not enough frames. 159 return ERR(ILLEGAL_ARGUMENT); 160 } 161 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); 162 if (frames.size() > 0) { 163 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo)); 164 } 165 *count_ptr = static_cast<jint>(frames.size()); 166 return ERR(NONE); 167 } 168 169 // Frames from the bottom. 170 if (collected_frames < static_cast<size_t>(-start_depth)) { 171 return ERR(ILLEGAL_ARGUMENT); 172 } 173 174 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count)); 175 memcpy(frame_buffer, 176 &frames.data()[collected_frames + start_depth], 177 count * sizeof(jvmtiFrameInfo)); 178 *count_ptr = static_cast<jint>(count); 179 return ERR(NONE); 180 } 181 182 struct GetStackTraceDirectClosure : public art::Closure { 183 public: 184 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop) 185 : frame_buffer(frame_buffer_), 186 start_input(start), 187 stop_input(stop), 188 index(0) { 189 DCHECK_GE(start_input, 0u); 190 } 191 192 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { 193 auto frames_fn = [&](jvmtiFrameInfo info) { 194 frame_buffer[index] = info; 195 ++index; 196 }; 197 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn); 198 visitor.WalkStack(/* include_transitions */ false); 199 } 200 201 jvmtiFrameInfo* frame_buffer; 202 203 const size_t start_input; 204 const size_t stop_input; 205 206 size_t index = 0; 207 }; 208 209 static jvmtiError GetThread(JNIEnv* env, 210 art::ScopedObjectAccessAlreadyRunnable& soa, 211 jthread java_thread, 212 art::Thread** thread) 213 REQUIRES_SHARED(art::Locks::mutator_lock_) // Needed for FromManagedThread. 214 REQUIRES(art::Locks::thread_list_lock_) { // Needed for FromManagedThread. 215 if (java_thread == nullptr) { 216 *thread = art::Thread::Current(); 217 if (*thread == nullptr) { 218 // GetStackTrace can only be run during the live phase, so the current thread should be 219 // attached and thus available. Getting a null for current means we're starting up or 220 // dying. 221 return ERR(WRONG_PHASE); 222 } 223 } else { 224 if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) { 225 return ERR(INVALID_THREAD); 226 } 227 228 // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD. 229 *thread = art::Thread::FromManagedThread(soa, java_thread); 230 if (*thread == nullptr) { 231 return ERR(THREAD_NOT_ALIVE); 232 } 233 } 234 return ERR(NONE); 235 } 236 237 jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED, 238 jthread java_thread, 239 jint start_depth, 240 jint max_frame_count, 241 jvmtiFrameInfo* frame_buffer, 242 jint* count_ptr) { 243 // It is not great that we have to hold these locks for so long, but it is necessary to ensure 244 // that the thread isn't dying on us. 245 art::ScopedObjectAccess soa(art::Thread::Current()); 246 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); 247 248 art::Thread* thread; 249 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), 250 soa, 251 java_thread, 252 &thread); 253 if (thread_error != ERR(NONE)) { 254 return thread_error; 255 } 256 DCHECK(thread != nullptr); 257 258 art::ThreadState state = thread->GetState(); 259 if (state == art::ThreadState::kStarting || 260 state == art::ThreadState::kTerminated || 261 thread->IsStillStarting()) { 262 return ERR(THREAD_NOT_ALIVE); 263 } 264 265 if (max_frame_count < 0) { 266 return ERR(ILLEGAL_ARGUMENT); 267 } 268 if (frame_buffer == nullptr || count_ptr == nullptr) { 269 return ERR(NULL_POINTER); 270 } 271 272 if (max_frame_count == 0) { 273 *count_ptr = 0; 274 return ERR(NONE); 275 } 276 277 if (start_depth >= 0) { 278 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly. 279 GetStackTraceDirectClosure closure(frame_buffer, 280 static_cast<size_t>(start_depth), 281 static_cast<size_t>(max_frame_count)); 282 thread->RequestSynchronousCheckpoint(&closure); 283 *count_ptr = static_cast<jint>(closure.index); 284 if (closure.index < static_cast<size_t>(start_depth)) { 285 return ERR(ILLEGAL_ARGUMENT); 286 } 287 return ERR(NONE); 288 } 289 290 GetStackTraceVectorClosure closure(0, 0); 291 thread->RequestSynchronousCheckpoint(&closure); 292 293 return TranslateFrameVector(closure.frames, 294 start_depth, 295 closure.start_result, 296 max_frame_count, 297 frame_buffer, 298 count_ptr); 299 } 300 301 template <typename Data> 302 struct GetAllStackTracesVectorClosure : public art::Closure { 303 GetAllStackTracesVectorClosure(size_t stop, Data* data_) 304 : barrier(0), stop_input(stop), data(data_) {} 305 306 void Run(art::Thread* thread) OVERRIDE 307 REQUIRES_SHARED(art::Locks::mutator_lock_) 308 REQUIRES(!data->mutex) { 309 art::Thread* self = art::Thread::Current(); 310 Work(thread, self); 311 barrier.Pass(self); 312 } 313 314 void Work(art::Thread* thread, art::Thread* self) 315 REQUIRES_SHARED(art::Locks::mutator_lock_) 316 REQUIRES(!data->mutex) { 317 // Skip threads that are still starting. 318 if (thread->IsStillStarting()) { 319 return; 320 } 321 322 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread); 323 if (thread_frames == nullptr) { 324 return; 325 } 326 327 // Now collect the data. 328 auto frames_fn = [&](jvmtiFrameInfo info) { 329 thread_frames->push_back(info); 330 }; 331 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn); 332 visitor.WalkStack(/* include_transitions */ false); 333 } 334 335 art::Barrier barrier; 336 const size_t stop_input; 337 Data* data; 338 }; 339 340 template <typename Data> 341 static void RunCheckpointAndWait(Data* data, size_t max_frame_count) { 342 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data); 343 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr); 344 if (barrier_count == 0) { 345 return; 346 } 347 art::Thread* self = art::Thread::Current(); 348 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun); 349 closure.barrier.Increment(self, barrier_count); 350 } 351 352 jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env, 353 jint max_frame_count, 354 jvmtiStackInfo** stack_info_ptr, 355 jint* thread_count_ptr) { 356 if (max_frame_count < 0) { 357 return ERR(ILLEGAL_ARGUMENT); 358 } 359 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) { 360 return ERR(NULL_POINTER); 361 } 362 363 struct AllStackTracesData { 364 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {} 365 ~AllStackTracesData() { 366 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv(); 367 for (jthread global_thread_ref : thread_peers) { 368 jni_env->DeleteGlobalRef(global_thread_ref); 369 } 370 } 371 372 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread) 373 REQUIRES_SHARED(art::Locks::mutator_lock_) 374 REQUIRES(!mutex) { 375 art::MutexLock mu(self, mutex); 376 377 threads.push_back(thread); 378 379 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef( 380 self, thread->GetPeerFromOtherThread()); 381 thread_peers.push_back(peer); 382 383 frames.emplace_back(new std::vector<jvmtiFrameInfo>()); 384 return frames.back().get(); 385 } 386 387 art::Mutex mutex; 388 389 // Storage. Only access directly after completion. 390 391 std::vector<art::Thread*> threads; 392 // "thread_peers" contains global references to their peers. 393 std::vector<jthread> thread_peers; 394 395 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames; 396 }; 397 398 AllStackTracesData data; 399 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count)); 400 401 art::Thread* current = art::Thread::Current(); 402 403 // Convert the data into our output format. 404 405 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to 406 // allocate one big chunk for this and the actual frames, which means we need 407 // to either be conservative or rearrange things later (the latter is implemented). 408 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]); 409 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos; 410 frame_infos.reserve(data.frames.size()); 411 412 // Now run through and add data for each thread. 413 size_t sum_frames = 0; 414 for (size_t index = 0; index < data.frames.size(); ++index) { 415 jvmtiStackInfo& stack_info = stack_info_array.get()[index]; 416 memset(&stack_info, 0, sizeof(jvmtiStackInfo)); 417 418 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get(); 419 420 // For the time being, set the thread to null. We'll fix it up in the second stage. 421 stack_info.thread = nullptr; 422 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED; 423 424 size_t collected_frames = thread_frames.size(); 425 if (max_frame_count == 0 || collected_frames == 0) { 426 stack_info.frame_count = 0; 427 stack_info.frame_buffer = nullptr; 428 continue; 429 } 430 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); 431 432 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames]; 433 frame_infos.emplace_back(frame_info); 434 435 jint count; 436 jvmtiError translate_result = TranslateFrameVector(thread_frames, 437 0, 438 0, 439 static_cast<jint>(collected_frames), 440 frame_info, 441 &count); 442 DCHECK(translate_result == JVMTI_ERROR_NONE); 443 stack_info.frame_count = static_cast<jint>(collected_frames); 444 stack_info.frame_buffer = frame_info; 445 sum_frames += static_cast<size_t>(count); 446 } 447 448 // No errors, yet. Now put it all into an output buffer. 449 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(), 450 alignof(jvmtiFrameInfo)); 451 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo); 452 unsigned char* chunk_data; 453 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data); 454 if (alloc_result != ERR(NONE)) { 455 return alloc_result; 456 } 457 458 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data); 459 // First copy in all the basic data. 460 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size()); 461 462 // Now copy the frames and fix up the pointers. 463 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>( 464 chunk_data + rounded_stack_info_size); 465 for (size_t i = 0; i < data.frames.size(); ++i) { 466 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i]; 467 jvmtiStackInfo& new_stack_info = stack_info[i]; 468 469 // Translate the global ref into a local ref. 470 new_stack_info.thread = 471 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]); 472 473 if (old_stack_info.frame_count > 0) { 474 // Only copy when there's data - leave the nullptr alone. 475 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo); 476 memcpy(frame_info, old_stack_info.frame_buffer, frames_size); 477 new_stack_info.frame_buffer = frame_info; 478 frame_info += old_stack_info.frame_count; 479 } 480 } 481 482 *stack_info_ptr = stack_info; 483 *thread_count_ptr = static_cast<jint>(data.frames.size()); 484 485 return ERR(NONE); 486 } 487 488 jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env, 489 jint thread_count, 490 const jthread* thread_list, 491 jint max_frame_count, 492 jvmtiStackInfo** stack_info_ptr) { 493 if (max_frame_count < 0) { 494 return ERR(ILLEGAL_ARGUMENT); 495 } 496 if (thread_count < 0) { 497 return ERR(ILLEGAL_ARGUMENT); 498 } 499 if (thread_count == 0) { 500 *stack_info_ptr = nullptr; 501 return ERR(NONE); 502 } 503 if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) { 504 return ERR(NULL_POINTER); 505 } 506 507 art::Thread* current = art::Thread::Current(); 508 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock. 509 510 struct SelectStackTracesData { 511 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {} 512 513 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread) 514 REQUIRES_SHARED(art::Locks::mutator_lock_) 515 REQUIRES(!mutex) { 516 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread(); 517 for (size_t index = 0; index != handles.size(); ++index) { 518 if (peer == handles[index].Get()) { 519 // Found the thread. 520 art::MutexLock mu(self, mutex); 521 522 threads.push_back(thread); 523 thread_list_indices.push_back(index); 524 525 frames.emplace_back(new std::vector<jvmtiFrameInfo>()); 526 return frames.back().get(); 527 } 528 } 529 return nullptr; 530 } 531 532 art::Mutex mutex; 533 534 // Selection data. 535 536 std::vector<art::Handle<art::mirror::Object>> handles; 537 538 // Storage. Only access directly after completion. 539 540 std::vector<art::Thread*> threads; 541 std::vector<size_t> thread_list_indices; 542 543 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames; 544 }; 545 546 SelectStackTracesData data; 547 548 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs. 549 art::VariableSizedHandleScope hs(current); 550 for (jint i = 0; i != thread_count; ++i) { 551 if (thread_list[i] == nullptr) { 552 return ERR(INVALID_THREAD); 553 } 554 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) { 555 return ERR(INVALID_THREAD); 556 } 557 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i]))); 558 } 559 560 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count)); 561 562 // Convert the data into our output format. 563 564 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to 565 // allocate one big chunk for this and the actual frames, which means we need 566 // to either be conservative or rearrange things later (the latter is implemented). 567 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]); 568 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos; 569 frame_infos.reserve(data.frames.size()); 570 571 // Now run through and add data for each thread. 572 size_t sum_frames = 0; 573 for (size_t index = 0; index < data.frames.size(); ++index) { 574 jvmtiStackInfo& stack_info = stack_info_array.get()[index]; 575 memset(&stack_info, 0, sizeof(jvmtiStackInfo)); 576 577 art::Thread* self = data.threads[index]; 578 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get(); 579 580 // For the time being, set the thread to null. We don't have good ScopedLocalRef 581 // infrastructure. 582 DCHECK(self->GetPeerFromOtherThread() != nullptr); 583 stack_info.thread = nullptr; 584 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED; 585 586 size_t collected_frames = thread_frames.size(); 587 if (max_frame_count == 0 || collected_frames == 0) { 588 stack_info.frame_count = 0; 589 stack_info.frame_buffer = nullptr; 590 continue; 591 } 592 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count)); 593 594 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames]; 595 frame_infos.emplace_back(frame_info); 596 597 jint count; 598 jvmtiError translate_result = TranslateFrameVector(thread_frames, 599 0, 600 0, 601 static_cast<jint>(collected_frames), 602 frame_info, 603 &count); 604 DCHECK(translate_result == JVMTI_ERROR_NONE); 605 stack_info.frame_count = static_cast<jint>(collected_frames); 606 stack_info.frame_buffer = frame_info; 607 sum_frames += static_cast<size_t>(count); 608 } 609 610 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(), 611 // potentially. 612 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count, 613 alignof(jvmtiFrameInfo)); 614 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo); 615 unsigned char* chunk_data; 616 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data); 617 if (alloc_result != ERR(NONE)) { 618 return alloc_result; 619 } 620 621 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data); 622 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>( 623 chunk_data + rounded_stack_info_size); 624 625 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) { 626 // Check whether we found a running thread for this. 627 // Note: For simplicity, and with the expectation that the list is usually small, use a simple 628 // search. (The list is *not* sorted!) 629 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i); 630 if (it == data.thread_list_indices.end()) { 631 // No native thread. Must be new or dead. We need to fill out the stack info now. 632 // (Need to read the Java "started" field to know whether this is starting or terminated.) 633 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]); 634 art::ObjPtr<art::mirror::Class> klass = peer->GetClass(); 635 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z"); 636 CHECK(started_field != nullptr); 637 bool started = started_field->GetBoolean(peer) != 0; 638 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW; 639 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED | 640 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED; 641 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]); 642 stack_info[i].state = started ? kTerminatedState : kStartedState; 643 stack_info[i].frame_count = 0; 644 stack_info[i].frame_buffer = nullptr; 645 } else { 646 // Had a native thread and frames. 647 size_t f_index = it - data.thread_list_indices.begin(); 648 649 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index]; 650 jvmtiStackInfo& new_stack_info = stack_info[i]; 651 652 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo)); 653 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]); 654 if (old_stack_info.frame_count > 0) { 655 // Only copy when there's data - leave the nullptr alone. 656 size_t frames_size = 657 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo); 658 memcpy(frame_info, old_stack_info.frame_buffer, frames_size); 659 new_stack_info.frame_buffer = frame_info; 660 frame_info += old_stack_info.frame_count; 661 } 662 } 663 } 664 665 *stack_info_ptr = stack_info; 666 667 return ERR(NONE); 668 } 669 670 // Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as 671 // runtime methods and transitions must not be counted. 672 struct GetFrameCountVisitor : public art::StackVisitor { 673 explicit GetFrameCountVisitor(art::Thread* thread) 674 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames), 675 count(0) {} 676 677 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) { 678 art::ArtMethod* m = GetMethod(); 679 const bool do_count = !(m == nullptr || m->IsRuntimeMethod()); 680 if (do_count) { 681 count++; 682 } 683 return true; 684 } 685 686 size_t count; 687 }; 688 689 struct GetFrameCountClosure : public art::Closure { 690 public: 691 GetFrameCountClosure() : count(0) {} 692 693 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { 694 GetFrameCountVisitor visitor(self); 695 visitor.WalkStack(false); 696 697 count = visitor.count; 698 } 699 700 size_t count; 701 }; 702 703 jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED, 704 jthread java_thread, 705 jint* count_ptr) { 706 // It is not great that we have to hold these locks for so long, but it is necessary to ensure 707 // that the thread isn't dying on us. 708 art::ScopedObjectAccess soa(art::Thread::Current()); 709 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); 710 711 art::Thread* thread; 712 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), 713 soa, 714 java_thread, 715 &thread); 716 717 if (thread_error != ERR(NONE)) { 718 return thread_error; 719 } 720 DCHECK(thread != nullptr); 721 722 if (count_ptr == nullptr) { 723 return ERR(NULL_POINTER); 724 } 725 726 GetFrameCountClosure closure; 727 thread->RequestSynchronousCheckpoint(&closure); 728 729 *count_ptr = closure.count; 730 return ERR(NONE); 731 } 732 733 // Walks up the stack 'n' callers, when used with Thread::WalkStack. 734 struct GetLocationVisitor : public art::StackVisitor { 735 GetLocationVisitor(art::Thread* thread, size_t n_in) 736 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames), 737 n(n_in), 738 count(0), 739 caller(nullptr), 740 caller_dex_pc(0) {} 741 742 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) { 743 art::ArtMethod* m = GetMethod(); 744 const bool do_count = !(m == nullptr || m->IsRuntimeMethod()); 745 if (do_count) { 746 DCHECK(caller == nullptr); 747 if (count == n) { 748 caller = m; 749 caller_dex_pc = GetDexPc(false); 750 return false; 751 } 752 count++; 753 } 754 return true; 755 } 756 757 const size_t n; 758 size_t count; 759 art::ArtMethod* caller; 760 uint32_t caller_dex_pc; 761 }; 762 763 struct GetLocationClosure : public art::Closure { 764 public: 765 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {} 766 767 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { 768 GetLocationVisitor visitor(self, n); 769 visitor.WalkStack(false); 770 771 method = visitor.caller; 772 dex_pc = visitor.caller_dex_pc; 773 } 774 775 const size_t n; 776 art::ArtMethod* method; 777 uint32_t dex_pc; 778 }; 779 780 jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED, 781 jthread java_thread, 782 jint depth, 783 jmethodID* method_ptr, 784 jlocation* location_ptr) { 785 // It is not great that we have to hold these locks for so long, but it is necessary to ensure 786 // that the thread isn't dying on us. 787 art::ScopedObjectAccess soa(art::Thread::Current()); 788 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_); 789 790 art::Thread* thread; 791 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), 792 soa, 793 java_thread, 794 &thread); 795 if (thread_error != ERR(NONE)) { 796 return thread_error; 797 } 798 DCHECK(thread != nullptr); 799 800 if (depth < 0) { 801 return ERR(ILLEGAL_ARGUMENT); 802 } 803 if (method_ptr == nullptr || location_ptr == nullptr) { 804 return ERR(NULL_POINTER); 805 } 806 807 GetLocationClosure closure(static_cast<size_t>(depth)); 808 thread->RequestSynchronousCheckpoint(&closure); 809 810 if (closure.method == nullptr) { 811 return ERR(NO_MORE_FRAMES); 812 } 813 814 *method_ptr = art::jni::EncodeArtMethod(closure.method); 815 if (closure.method->IsNative()) { 816 *location_ptr = -1; 817 } else { 818 if (closure.dex_pc == art::DexFile::kDexNoIndex) { 819 return ERR(INTERNAL); 820 } 821 *location_ptr = static_cast<jlocation>(closure.dex_pc); 822 } 823 824 return ERR(NONE); 825 } 826 827 } // namespace openjdkjvmti 828