1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "debugger.h" 18 19 #include <sys/uio.h> 20 21 #include <functional> 22 #include <memory> 23 #include <set> 24 #include <vector> 25 26 #include "android-base/stringprintf.h" 27 28 #include "arch/context.h" 29 #include "art_field-inl.h" 30 #include "art_method-inl.h" 31 #include "base/enums.h" 32 #include "base/safe_map.h" 33 #include "base/strlcpy.h" 34 #include "base/time_utils.h" 35 #include "class_linker-inl.h" 36 #include "class_linker.h" 37 #include "dex/descriptors_names.h" 38 #include "dex/dex_file-inl.h" 39 #include "dex/dex_file_annotations.h" 40 #include "dex/dex_file_types.h" 41 #include "dex/dex_instruction.h" 42 #include "dex/utf.h" 43 #include "entrypoints/runtime_asm_entrypoints.h" 44 #include "gc/accounting/card_table-inl.h" 45 #include "gc/allocation_record.h" 46 #include "gc/gc_cause.h" 47 #include "gc/scoped_gc_critical_section.h" 48 #include "gc/space/bump_pointer_space-walk-inl.h" 49 #include "gc/space/large_object_space.h" 50 #include "gc/space/space-inl.h" 51 #include "handle_scope-inl.h" 52 #include "jdwp/jdwp_priv.h" 53 #include "jdwp/object_registry.h" 54 #include "jni_internal.h" 55 #include "jvalue-inl.h" 56 #include "mirror/class-inl.h" 57 #include "mirror/class.h" 58 #include "mirror/class_loader.h" 59 #include "mirror/object-inl.h" 60 #include "mirror/object_array-inl.h" 61 #include "mirror/string-inl.h" 62 #include "mirror/throwable.h" 63 #include "nativehelper/scoped_local_ref.h" 64 #include "nativehelper/scoped_primitive_array.h" 65 #include "oat_file.h" 66 #include "obj_ptr-inl.h" 67 #include "reflection.h" 68 #include "scoped_thread_state_change-inl.h" 69 #include "stack.h" 70 #include "thread_list.h" 71 #include "well_known_classes.h" 72 73 namespace art { 74 75 using android::base::StringPrintf; 76 77 // The key identifying the debugger to update instrumentation. 78 static constexpr const char* kDbgInstrumentationKey = "Debugger"; 79 80 // Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol. 81 static uint16_t CappedAllocRecordCount(size_t alloc_record_count) { 82 const size_t cap = 0xffff; 83 if (alloc_record_count > cap) { 84 return cap; 85 } 86 return alloc_record_count; 87 } 88 89 class Breakpoint : public ValueObject { 90 public: 91 Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind) 92 : method_(method->GetCanonicalMethod(kRuntimePointerSize)), 93 dex_pc_(dex_pc), 94 deoptimization_kind_(deoptimization_kind) { 95 CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing || 96 deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization || 97 deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization); 98 } 99 100 Breakpoint(const Breakpoint& other) REQUIRES_SHARED(Locks::mutator_lock_) 101 : method_(other.method_), 102 dex_pc_(other.dex_pc_), 103 deoptimization_kind_(other.deoptimization_kind_) {} 104 105 // Method() is called from root visiting, do not use ScopedObjectAccess here or it can cause 106 // GC to deadlock if another thread tries to call SuspendAll while the GC is in a runnable state. 107 ArtMethod* Method() const { 108 return method_; 109 } 110 111 uint32_t DexPc() const { 112 return dex_pc_; 113 } 114 115 DeoptimizationRequest::Kind GetDeoptimizationKind() const { 116 return deoptimization_kind_; 117 } 118 119 // Returns true if the method of this breakpoint and the passed in method should be considered the 120 // same. That is, they are either the same method or they are copied from the same method. 121 bool IsInMethod(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_) { 122 return method_ == m->GetCanonicalMethod(kRuntimePointerSize); 123 } 124 125 private: 126 // The location of this breakpoint. 127 ArtMethod* method_; 128 uint32_t dex_pc_; 129 130 // Indicates whether breakpoint needs full deoptimization or selective deoptimization. 131 DeoptimizationRequest::Kind deoptimization_kind_; 132 }; 133 134 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs) 135 REQUIRES_SHARED(Locks::mutator_lock_) { 136 os << StringPrintf("Breakpoint[%s @%#x]", ArtMethod::PrettyMethod(rhs.Method()).c_str(), 137 rhs.DexPc()); 138 return os; 139 } 140 141 class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener { 142 public: 143 DebugInstrumentationListener() {} 144 virtual ~DebugInstrumentationListener() {} 145 146 void MethodEntered(Thread* thread, 147 Handle<mirror::Object> this_object, 148 ArtMethod* method, 149 uint32_t dex_pc) 150 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 151 if (method->IsNative()) { 152 // TODO: post location events is a suspension point and native method entry stubs aren't. 153 return; 154 } 155 if (IsListeningToDexPcMoved()) { 156 // We also listen to kDexPcMoved instrumentation event so we know the DexPcMoved method is 157 // going to be called right after us. To avoid sending JDWP events twice for this location, 158 // we report the event in DexPcMoved. However, we must remind this is method entry so we 159 // send the METHOD_ENTRY event. And we can also group it with other events for this location 160 // like BREAKPOINT or SINGLE_STEP (or even METHOD_EXIT if this is a RETURN instruction). 161 thread->SetDebugMethodEntry(); 162 } else if (IsListeningToMethodExit() && IsReturn(method, dex_pc)) { 163 // We also listen to kMethodExited instrumentation event and the current instruction is a 164 // RETURN so we know the MethodExited method is going to be called right after us. To avoid 165 // sending JDWP events twice for this location, we report the event(s) in MethodExited. 166 // However, we must remind this is method entry so we send the METHOD_ENTRY event. And we can 167 // also group it with other events for this location like BREAKPOINT or SINGLE_STEP. 168 thread->SetDebugMethodEntry(); 169 } else { 170 Dbg::UpdateDebugger(thread, this_object.Get(), method, 0, Dbg::kMethodEntry, nullptr); 171 } 172 } 173 174 void MethodExited(Thread* thread, 175 Handle<mirror::Object> this_object, 176 ArtMethod* method, 177 uint32_t dex_pc, 178 const JValue& return_value) 179 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 180 if (method->IsNative()) { 181 // TODO: post location events is a suspension point and native method entry stubs aren't. 182 return; 183 } 184 uint32_t events = Dbg::kMethodExit; 185 if (thread->IsDebugMethodEntry()) { 186 // It is also the method entry. 187 DCHECK(IsReturn(method, dex_pc)); 188 events |= Dbg::kMethodEntry; 189 thread->ClearDebugMethodEntry(); 190 } 191 Dbg::UpdateDebugger(thread, this_object.Get(), method, dex_pc, events, &return_value); 192 } 193 194 void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, 195 Handle<mirror::Object> this_object ATTRIBUTE_UNUSED, 196 ArtMethod* method, 197 uint32_t dex_pc) 198 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 199 // We're not recorded to listen to this kind of event, so complain. 200 LOG(ERROR) << "Unexpected method unwind event in debugger " << ArtMethod::PrettyMethod(method) 201 << " " << dex_pc; 202 } 203 204 void DexPcMoved(Thread* thread, 205 Handle<mirror::Object> this_object, 206 ArtMethod* method, 207 uint32_t new_dex_pc) 208 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 209 if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) { 210 // We also listen to kMethodExited instrumentation event and the current instruction is a 211 // RETURN so we know the MethodExited method is going to be called right after us. Like in 212 // MethodEntered, we delegate event reporting to MethodExited. 213 // Besides, if this RETURN instruction is the only one in the method, we can send multiple 214 // JDWP events in the same packet: METHOD_ENTRY, METHOD_EXIT, BREAKPOINT and/or SINGLE_STEP. 215 // Therefore, we must not clear the debug method entry flag here. 216 } else { 217 uint32_t events = 0; 218 if (thread->IsDebugMethodEntry()) { 219 // It is also the method entry. 220 events = Dbg::kMethodEntry; 221 thread->ClearDebugMethodEntry(); 222 } 223 Dbg::UpdateDebugger(thread, this_object.Get(), method, new_dex_pc, events, nullptr); 224 } 225 } 226 227 void FieldRead(Thread* thread ATTRIBUTE_UNUSED, 228 Handle<mirror::Object> this_object, 229 ArtMethod* method, 230 uint32_t dex_pc, 231 ArtField* field) 232 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 233 Dbg::PostFieldAccessEvent(method, dex_pc, this_object.Get(), field); 234 } 235 236 void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, 237 Handle<mirror::Object> this_object, 238 ArtMethod* method, 239 uint32_t dex_pc, 240 ArtField* field, 241 const JValue& field_value) 242 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 243 Dbg::PostFieldModificationEvent(method, dex_pc, this_object.Get(), field, &field_value); 244 } 245 246 void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED, 247 Handle<mirror::Throwable> exception_object) 248 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 249 Dbg::PostException(exception_object.Get()); 250 } 251 252 // We only care about branches in the Jit. 253 void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset) 254 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 255 LOG(ERROR) << "Unexpected branch event in debugger " << ArtMethod::PrettyMethod(method) 256 << " " << dex_pc << ", " << dex_pc_offset; 257 } 258 259 // We only care about invokes in the Jit. 260 void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED, 261 Handle<mirror::Object> this_object ATTRIBUTE_UNUSED, 262 ArtMethod* method, 263 uint32_t dex_pc, 264 ArtMethod* target ATTRIBUTE_UNUSED) 265 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 266 LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method) 267 << " " << dex_pc; 268 } 269 270 // TODO Might be worth it to post ExceptionCatch event. 271 void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED, 272 Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) OVERRIDE { 273 LOG(ERROR) << "Unexpected exception handled event in debugger"; 274 } 275 276 // TODO Might be worth it to implement this. 277 void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, 278 const ShadowFrame& frame ATTRIBUTE_UNUSED) OVERRIDE { 279 LOG(ERROR) << "Unexpected WatchedFramePop event in debugger"; 280 } 281 282 private: 283 static bool IsReturn(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { 284 return method->DexInstructions().InstructionAt(dex_pc).IsReturn(); 285 } 286 287 static bool IsListeningToDexPcMoved() REQUIRES_SHARED(Locks::mutator_lock_) { 288 return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved); 289 } 290 291 static bool IsListeningToMethodExit() REQUIRES_SHARED(Locks::mutator_lock_) { 292 return IsListeningTo(instrumentation::Instrumentation::kMethodExited); 293 } 294 295 static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event) 296 REQUIRES_SHARED(Locks::mutator_lock_) { 297 return (Dbg::GetInstrumentationEvents() & event) != 0; 298 } 299 300 DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener); 301 } gDebugInstrumentationListener; 302 303 // JDWP is allowed unless the Zygote forbids it. 304 static bool gJdwpAllowed = true; 305 306 // Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line? 307 static bool gJdwpConfigured = false; 308 309 // JDWP options for debugging. Only valid if IsJdwpConfigured() is true. 310 static JDWP::JdwpOptions gJdwpOptions; 311 312 // Runtime JDWP state. 313 static JDWP::JdwpState* gJdwpState = nullptr; 314 static bool gDebuggerConnected; // debugger or DDMS is connected. 315 316 static bool gDdmThreadNotification = false; 317 318 // DDMS GC-related settings. 319 static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER; 320 static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER; 321 static Dbg::HpsgWhat gDdmHpsgWhat; 322 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER; 323 static Dbg::HpsgWhat gDdmNhsgWhat; 324 325 bool Dbg::gDebuggerActive = false; 326 bool Dbg::gDisposed = false; 327 ObjectRegistry* Dbg::gRegistry = nullptr; 328 DebuggerActiveMethodInspectionCallback Dbg::gDebugActiveCallback; 329 DebuggerDdmCallback Dbg::gDebugDdmCallback; 330 InternalDebuggerControlCallback Dbg::gDebuggerControlCallback; 331 332 // Deoptimization support. 333 std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_; 334 size_t Dbg::full_deoptimization_event_count_ = 0; 335 336 // Instrumentation event reference counters. 337 size_t Dbg::dex_pc_change_event_ref_count_ = 0; 338 size_t Dbg::method_enter_event_ref_count_ = 0; 339 size_t Dbg::method_exit_event_ref_count_ = 0; 340 size_t Dbg::field_read_event_ref_count_ = 0; 341 size_t Dbg::field_write_event_ref_count_ = 0; 342 size_t Dbg::exception_catch_event_ref_count_ = 0; 343 uint32_t Dbg::instrumentation_events_ = 0; 344 345 Dbg::DbgThreadLifecycleCallback Dbg::thread_lifecycle_callback_; 346 Dbg::DbgClassLoadCallback Dbg::class_load_callback_; 347 348 void DebuggerDdmCallback::DdmPublishChunk(uint32_t type, const ArrayRef<const uint8_t>& data) { 349 if (gJdwpState == nullptr) { 350 VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type; 351 } else { 352 iovec vec[1]; 353 vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(data.data())); 354 vec[0].iov_len = data.size(); 355 gJdwpState->DdmSendChunkV(type, vec, 1); 356 } 357 } 358 359 bool DebuggerActiveMethodInspectionCallback::IsMethodBeingInspected(ArtMethod* m ATTRIBUTE_UNUSED) { 360 return Dbg::IsDebuggerActive(); 361 } 362 363 bool DebuggerActiveMethodInspectionCallback::IsMethodSafeToJit(ArtMethod* m) { 364 return !Dbg::MethodHasAnyBreakpoints(m); 365 } 366 367 bool DebuggerActiveMethodInspectionCallback::MethodNeedsDebugVersion( 368 ArtMethod* m ATTRIBUTE_UNUSED) { 369 return Dbg::IsDebuggerActive(); 370 } 371 372 void InternalDebuggerControlCallback::StartDebugger() { 373 // Release the mutator lock. 374 ScopedThreadStateChange stsc(art::Thread::Current(), kNative); 375 Dbg::StartJdwp(); 376 } 377 378 void InternalDebuggerControlCallback::StopDebugger() { 379 Dbg::StopJdwp(); 380 } 381 382 bool InternalDebuggerControlCallback::IsDebuggerConfigured() { 383 return Dbg::IsJdwpConfigured(); 384 } 385 386 // Breakpoints. 387 static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_); 388 389 void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) { 390 receiver.VisitRootIfNonNull(visitor, root_info); // null for static method call. 391 klass.VisitRoot(visitor, root_info); 392 } 393 394 void SingleStepControl::AddDexPc(uint32_t dex_pc) { 395 dex_pcs_.insert(dex_pc); 396 } 397 398 bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const { 399 return dex_pcs_.find(dex_pc) == dex_pcs_.end(); 400 } 401 402 static bool IsBreakpoint(ArtMethod* m, uint32_t dex_pc) 403 REQUIRES(!Locks::breakpoint_lock_) 404 REQUIRES_SHARED(Locks::mutator_lock_) { 405 ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 406 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { 407 if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].IsInMethod(m)) { 408 VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i]; 409 return true; 410 } 411 } 412 return false; 413 } 414 415 static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread) 416 REQUIRES(!Locks::thread_suspend_count_lock_) { 417 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 418 // A thread may be suspended for GC; in this code, we really want to know whether 419 // there's a debugger suspension active. 420 return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0; 421 } 422 423 static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error) 424 REQUIRES_SHARED(Locks::mutator_lock_) { 425 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error); 426 if (o == nullptr) { 427 *error = JDWP::ERR_INVALID_OBJECT; 428 return nullptr; 429 } 430 if (!o->IsArrayInstance()) { 431 *error = JDWP::ERR_INVALID_ARRAY; 432 return nullptr; 433 } 434 *error = JDWP::ERR_NONE; 435 return o->AsArray(); 436 } 437 438 static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error) 439 REQUIRES_SHARED(Locks::mutator_lock_) { 440 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error); 441 if (o == nullptr) { 442 *error = JDWP::ERR_INVALID_OBJECT; 443 return nullptr; 444 } 445 if (!o->IsClass()) { 446 *error = JDWP::ERR_INVALID_CLASS; 447 return nullptr; 448 } 449 *error = JDWP::ERR_NONE; 450 return o->AsClass(); 451 } 452 453 static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, 454 JDWP::JdwpError* error) 455 REQUIRES_SHARED(Locks::mutator_lock_) 456 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) { 457 mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error); 458 if (thread_peer == nullptr) { 459 // This isn't even an object. 460 *error = JDWP::ERR_INVALID_OBJECT; 461 return nullptr; 462 } 463 464 ObjPtr<mirror::Class> java_lang_Thread = 465 soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread); 466 if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) { 467 // This isn't a thread. 468 *error = JDWP::ERR_INVALID_THREAD; 469 return nullptr; 470 } 471 472 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 473 Thread* thread = Thread::FromManagedThread(soa, thread_peer); 474 // If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a 475 // zombie. 476 *error = (thread == nullptr) ? JDWP::ERR_THREAD_NOT_ALIVE : JDWP::ERR_NONE; 477 return thread; 478 } 479 480 static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { 481 // JDWP deliberately uses the descriptor characters' ASCII values for its enum. 482 // Note that by "basic" we mean that we don't get more specific than JT_OBJECT. 483 return static_cast<JDWP::JdwpTag>(descriptor[0]); 484 } 485 486 static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass) 487 REQUIRES_SHARED(Locks::mutator_lock_) { 488 std::string temp; 489 const char* descriptor = klass->GetDescriptor(&temp); 490 return BasicTagFromDescriptor(descriptor); 491 } 492 493 static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c) 494 REQUIRES_SHARED(Locks::mutator_lock_) { 495 CHECK(c != nullptr); 496 if (c->IsArrayClass()) { 497 return JDWP::JT_ARRAY; 498 } 499 if (c->IsStringClass()) { 500 return JDWP::JT_STRING; 501 } 502 if (c->IsClassClass()) { 503 return JDWP::JT_CLASS_OBJECT; 504 } 505 { 506 ObjPtr<mirror::Class> thread_class = 507 soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread); 508 if (thread_class->IsAssignableFrom(c)) { 509 return JDWP::JT_THREAD; 510 } 511 } 512 { 513 ObjPtr<mirror::Class> thread_group_class = 514 soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ThreadGroup); 515 if (thread_group_class->IsAssignableFrom(c)) { 516 return JDWP::JT_THREAD_GROUP; 517 } 518 } 519 { 520 ObjPtr<mirror::Class> class_loader_class = 521 soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ClassLoader); 522 if (class_loader_class->IsAssignableFrom(c)) { 523 return JDWP::JT_CLASS_LOADER; 524 } 525 } 526 return JDWP::JT_OBJECT; 527 } 528 529 /* 530 * Objects declared to hold Object might actually hold a more specific 531 * type. The debugger may take a special interest in these (e.g. it 532 * wants to display the contents of Strings), so we want to return an 533 * appropriate tag. 534 * 535 * Null objects are tagged JT_OBJECT. 536 */ 537 JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) { 538 return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass()); 539 } 540 541 static bool IsPrimitiveTag(JDWP::JdwpTag tag) { 542 switch (tag) { 543 case JDWP::JT_BOOLEAN: 544 case JDWP::JT_BYTE: 545 case JDWP::JT_CHAR: 546 case JDWP::JT_FLOAT: 547 case JDWP::JT_DOUBLE: 548 case JDWP::JT_INT: 549 case JDWP::JT_LONG: 550 case JDWP::JT_SHORT: 551 case JDWP::JT_VOID: 552 return true; 553 default: 554 return false; 555 } 556 } 557 558 void Dbg::StartJdwp() { 559 if (!gJdwpAllowed || !IsJdwpConfigured()) { 560 // No JDWP for you! 561 return; 562 } 563 564 CHECK(gRegistry == nullptr); 565 gRegistry = new ObjectRegistry; 566 567 { 568 // Setup the Ddm listener 569 ScopedObjectAccess soa(Thread::Current()); 570 Runtime::Current()->GetRuntimeCallbacks()->AddDdmCallback(&gDebugDdmCallback); 571 } 572 573 // Init JDWP if the debugger is enabled. This may connect out to a 574 // debugger, passively listen for a debugger, or block waiting for a 575 // debugger. 576 gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions); 577 if (gJdwpState == nullptr) { 578 // We probably failed because some other process has the port already, which means that 579 // if we don't abort the user is likely to think they're talking to us when they're actually 580 // talking to that other process. 581 LOG(FATAL) << "Debugger thread failed to initialize"; 582 } 583 584 // If a debugger has already attached, send the "welcome" message. 585 // This may cause us to suspend all threads. 586 if (gJdwpState->IsActive()) { 587 ScopedObjectAccess soa(Thread::Current()); 588 gJdwpState->PostVMStart(); 589 } 590 } 591 592 void Dbg::StopJdwp() { 593 // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the 594 // destruction of gJdwpState). 595 if (gJdwpState != nullptr && gJdwpState->IsActive()) { 596 gJdwpState->PostVMDeath(); 597 } 598 // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection. 599 Dispose(); 600 delete gJdwpState; 601 gJdwpState = nullptr; 602 delete gRegistry; 603 gRegistry = nullptr; 604 } 605 606 void Dbg::GcDidFinish() { 607 if (gDdmHpifWhen != HPIF_WHEN_NEVER) { 608 ScopedObjectAccess soa(Thread::Current()); 609 VLOG(jdwp) << "Sending heap info to DDM"; 610 DdmSendHeapInfo(gDdmHpifWhen); 611 } 612 if (gDdmHpsgWhen != HPSG_WHEN_NEVER) { 613 ScopedObjectAccess soa(Thread::Current()); 614 VLOG(jdwp) << "Dumping heap to DDM"; 615 DdmSendHeapSegments(false); 616 } 617 if (gDdmNhsgWhen != HPSG_WHEN_NEVER) { 618 ScopedObjectAccess soa(Thread::Current()); 619 VLOG(jdwp) << "Dumping native heap to DDM"; 620 DdmSendHeapSegments(true); 621 } 622 } 623 624 void Dbg::SetJdwpAllowed(bool allowed) { 625 gJdwpAllowed = allowed; 626 } 627 628 bool Dbg::IsJdwpAllowed() { 629 return gJdwpAllowed; 630 } 631 632 DebugInvokeReq* Dbg::GetInvokeReq() { 633 return Thread::Current()->GetInvokeReq(); 634 } 635 636 Thread* Dbg::GetDebugThread() { 637 return (gJdwpState != nullptr) ? gJdwpState->GetDebugThread() : nullptr; 638 } 639 640 void Dbg::ClearWaitForEventThread() { 641 gJdwpState->ReleaseJdwpTokenForEvent(); 642 } 643 644 void Dbg::Connected() { 645 CHECK(!gDebuggerConnected); 646 VLOG(jdwp) << "JDWP has attached"; 647 gDebuggerConnected = true; 648 gDisposed = false; 649 } 650 651 bool Dbg::RequiresDeoptimization() { 652 // We don't need deoptimization if everything runs with interpreter after 653 // enabling -Xint mode. 654 return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly(); 655 } 656 657 void Dbg::GoActive() { 658 // Enable all debugging features, including scans for breakpoints. 659 // This is a no-op if we're already active. 660 // Only called from the JDWP handler thread. 661 if (IsDebuggerActive()) { 662 return; 663 } 664 665 Thread* const self = Thread::Current(); 666 { 667 // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected? 668 ReaderMutexLock mu(self, *Locks::breakpoint_lock_); 669 CHECK_EQ(gBreakpoints.size(), 0U); 670 } 671 672 { 673 MutexLock mu(self, *Locks::deoptimization_lock_); 674 CHECK_EQ(deoptimization_requests_.size(), 0U); 675 CHECK_EQ(full_deoptimization_event_count_, 0U); 676 CHECK_EQ(dex_pc_change_event_ref_count_, 0U); 677 CHECK_EQ(method_enter_event_ref_count_, 0U); 678 CHECK_EQ(method_exit_event_ref_count_, 0U); 679 CHECK_EQ(field_read_event_ref_count_, 0U); 680 CHECK_EQ(field_write_event_ref_count_, 0U); 681 CHECK_EQ(exception_catch_event_ref_count_, 0U); 682 } 683 684 Runtime* runtime = Runtime::Current(); 685 // Best effort deoptimization if the runtime is non-Java debuggable. This happens when 686 // ro.debuggable is set, but the application is not debuggable, or when a standalone 687 // dalvikvm invocation is not passed the debuggable option (-Xcompiler-option --debuggable). 688 // 689 // The performance cost of this is non-negligible during native-debugging due to the 690 // forced JIT, so we keep the AOT code in that case in exchange for limited native debugging. 691 if (!runtime->IsJavaDebuggable() && 692 !runtime->GetInstrumentation()->IsForcedInterpretOnly() && 693 !runtime->IsNativeDebuggable()) { 694 runtime->DeoptimizeBootImage(); 695 } 696 697 ScopedSuspendAll ssa(__FUNCTION__); 698 if (RequiresDeoptimization()) { 699 runtime->GetInstrumentation()->EnableDeoptimization(); 700 } 701 instrumentation_events_ = 0; 702 gDebuggerActive = true; 703 Runtime::Current()->GetRuntimeCallbacks()->AddMethodInspectionCallback(&gDebugActiveCallback); 704 LOG(INFO) << "Debugger is active"; 705 } 706 707 void Dbg::Disconnected() { 708 CHECK(gDebuggerConnected); 709 710 LOG(INFO) << "Debugger is no longer active"; 711 712 // Suspend all threads and exclusively acquire the mutator lock. Remove the debugger as a listener 713 // and clear the object registry. 714 Runtime* runtime = Runtime::Current(); 715 Thread* self = Thread::Current(); 716 { 717 // Required for DisableDeoptimization. 718 gc::ScopedGCCriticalSection gcs(self, 719 gc::kGcCauseInstrumentation, 720 gc::kCollectorTypeInstrumentation); 721 ScopedSuspendAll ssa(__FUNCTION__); 722 // Debugger may not be active at this point. 723 if (IsDebuggerActive()) { 724 { 725 // Since we're going to disable deoptimization, we clear the deoptimization requests queue. 726 // This prevents us from having any pending deoptimization request when the debugger attaches 727 // to us again while no event has been requested yet. 728 MutexLock mu(self, *Locks::deoptimization_lock_); 729 deoptimization_requests_.clear(); 730 full_deoptimization_event_count_ = 0U; 731 } 732 if (instrumentation_events_ != 0) { 733 runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, 734 instrumentation_events_); 735 instrumentation_events_ = 0; 736 } 737 if (RequiresDeoptimization()) { 738 runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey); 739 } 740 gDebuggerActive = false; 741 Runtime::Current()->GetRuntimeCallbacks()->RemoveMethodInspectionCallback( 742 &gDebugActiveCallback); 743 } 744 } 745 746 { 747 ScopedObjectAccess soa(self); 748 gRegistry->Clear(); 749 } 750 751 gDebuggerConnected = false; 752 } 753 754 void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) { 755 CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown); 756 gJdwpOptions = jdwp_options; 757 gJdwpConfigured = true; 758 Runtime::Current()->GetRuntimeCallbacks()->AddDebuggerControlCallback(&gDebuggerControlCallback); 759 } 760 761 bool Dbg::IsJdwpConfigured() { 762 return gJdwpConfigured; 763 } 764 765 int64_t Dbg::LastDebuggerActivity() { 766 return gJdwpState->LastDebuggerActivity(); 767 } 768 769 void Dbg::UndoDebuggerSuspensions() { 770 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions(); 771 } 772 773 std::string Dbg::GetClassName(JDWP::RefTypeId class_id) { 774 JDWP::JdwpError error; 775 mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error); 776 if (o == nullptr) { 777 if (error == JDWP::ERR_NONE) { 778 return "null"; 779 } else { 780 return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id)); 781 } 782 } 783 if (!o->IsClass()) { 784 return StringPrintf("non-class %p", o); // This is only used for debugging output anyway. 785 } 786 return GetClassName(o->AsClass()); 787 } 788 789 std::string Dbg::GetClassName(mirror::Class* klass) { 790 if (klass == nullptr) { 791 return "null"; 792 } 793 std::string temp; 794 return DescriptorToName(klass->GetDescriptor(&temp)); 795 } 796 797 JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) { 798 JDWP::JdwpError status; 799 mirror::Class* c = DecodeClass(id, &status); 800 if (c == nullptr) { 801 *class_object_id = 0; 802 return status; 803 } 804 *class_object_id = gRegistry->Add(c); 805 return JDWP::ERR_NONE; 806 } 807 808 JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) { 809 JDWP::JdwpError status; 810 mirror::Class* c = DecodeClass(id, &status); 811 if (c == nullptr) { 812 *superclass_id = 0; 813 return status; 814 } 815 if (c->IsInterface()) { 816 // http://code.google.com/p/android/issues/detail?id=20856 817 *superclass_id = 0; 818 } else { 819 *superclass_id = gRegistry->Add(c->GetSuperClass()); 820 } 821 return JDWP::ERR_NONE; 822 } 823 824 JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { 825 JDWP::JdwpError error; 826 mirror::Class* c = DecodeClass(id, &error); 827 if (c == nullptr) { 828 return error; 829 } 830 expandBufAddObjectId(pReply, gRegistry->Add(c->GetClassLoader())); 831 return JDWP::ERR_NONE; 832 } 833 834 JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { 835 JDWP::JdwpError error; 836 mirror::Class* c = DecodeClass(id, &error); 837 if (c == nullptr) { 838 return error; 839 } 840 841 uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask; 842 843 // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set, 844 // not interfaces. 845 // Class.getModifiers doesn't return it, but JDWP does, so we set it here. 846 if ((access_flags & kAccInterface) == 0) { 847 access_flags |= kAccSuper; 848 } 849 850 expandBufAdd4BE(pReply, access_flags); 851 852 return JDWP::ERR_NONE; 853 } 854 855 JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) { 856 JDWP::JdwpError error; 857 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error); 858 if (o == nullptr) { 859 return JDWP::ERR_INVALID_OBJECT; 860 } 861 862 // Ensure all threads are suspended while we read objects' lock words. 863 Thread* self = Thread::Current(); 864 CHECK_EQ(self->GetState(), kRunnable); 865 866 MonitorInfo monitor_info; 867 { 868 ScopedThreadSuspension sts(self, kSuspended); 869 ScopedSuspendAll ssa(__FUNCTION__); 870 monitor_info = MonitorInfo(o); 871 } 872 if (monitor_info.owner_ != nullptr) { 873 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeerFromOtherThread())); 874 } else { 875 expandBufAddObjectId(reply, gRegistry->Add(nullptr)); 876 } 877 expandBufAdd4BE(reply, monitor_info.entry_count_); 878 expandBufAdd4BE(reply, monitor_info.waiters_.size()); 879 for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) { 880 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeerFromOtherThread())); 881 } 882 return JDWP::ERR_NONE; 883 } 884 885 JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id, 886 std::vector<JDWP::ObjectId>* monitors, 887 std::vector<uint32_t>* stack_depths) { 888 struct OwnedMonitorVisitor : public StackVisitor { 889 OwnedMonitorVisitor(Thread* thread, Context* context, 890 std::vector<JDWP::ObjectId>* monitor_vector, 891 std::vector<uint32_t>* stack_depth_vector) 892 REQUIRES_SHARED(Locks::mutator_lock_) 893 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 894 current_stack_depth(0), 895 monitors(monitor_vector), 896 stack_depths(stack_depth_vector) {} 897 898 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 899 // annotalysis. 900 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 901 if (!GetMethod()->IsRuntimeMethod()) { 902 Monitor::VisitLocks(this, AppendOwnedMonitors, this); 903 ++current_stack_depth; 904 } 905 return true; 906 } 907 908 static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg) 909 REQUIRES_SHARED(Locks::mutator_lock_) { 910 OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg); 911 visitor->monitors->push_back(gRegistry->Add(owned_monitor)); 912 visitor->stack_depths->push_back(visitor->current_stack_depth); 913 } 914 915 size_t current_stack_depth; 916 std::vector<JDWP::ObjectId>* const monitors; 917 std::vector<uint32_t>* const stack_depths; 918 }; 919 920 ScopedObjectAccessUnchecked soa(Thread::Current()); 921 JDWP::JdwpError error; 922 Thread* thread = DecodeThread(soa, thread_id, &error); 923 if (thread == nullptr) { 924 return error; 925 } 926 if (!IsSuspendedForDebugger(soa, thread)) { 927 return JDWP::ERR_THREAD_NOT_SUSPENDED; 928 } 929 std::unique_ptr<Context> context(Context::Create()); 930 OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths); 931 visitor.WalkStack(); 932 return JDWP::ERR_NONE; 933 } 934 935 JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, 936 JDWP::ObjectId* contended_monitor) { 937 ScopedObjectAccessUnchecked soa(Thread::Current()); 938 *contended_monitor = 0; 939 JDWP::JdwpError error; 940 Thread* thread = DecodeThread(soa, thread_id, &error); 941 if (thread == nullptr) { 942 return error; 943 } 944 if (!IsSuspendedForDebugger(soa, thread)) { 945 return JDWP::ERR_THREAD_NOT_SUSPENDED; 946 } 947 mirror::Object* contended_monitor_obj = Monitor::GetContendedMonitor(thread); 948 // Add() requires the thread_list_lock_ not held to avoid the lock 949 // level violation. 950 *contended_monitor = gRegistry->Add(contended_monitor_obj); 951 return JDWP::ERR_NONE; 952 } 953 954 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids, 955 std::vector<uint64_t>* counts) { 956 gc::Heap* heap = Runtime::Current()->GetHeap(); 957 heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger); 958 VariableSizedHandleScope hs(Thread::Current()); 959 std::vector<Handle<mirror::Class>> classes; 960 counts->clear(); 961 for (size_t i = 0; i < class_ids.size(); ++i) { 962 JDWP::JdwpError error; 963 ObjPtr<mirror::Class> c = DecodeClass(class_ids[i], &error); 964 if (c == nullptr) { 965 return error; 966 } 967 classes.push_back(hs.NewHandle(c)); 968 counts->push_back(0); 969 } 970 heap->CountInstances(classes, false, &(*counts)[0]); 971 return JDWP::ERR_NONE; 972 } 973 974 JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, 975 std::vector<JDWP::ObjectId>* instances) { 976 gc::Heap* heap = Runtime::Current()->GetHeap(); 977 // We only want reachable instances, so do a GC. 978 heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger); 979 JDWP::JdwpError error; 980 ObjPtr<mirror::Class> c = DecodeClass(class_id, &error); 981 if (c == nullptr) { 982 return error; 983 } 984 VariableSizedHandleScope hs(Thread::Current()); 985 std::vector<Handle<mirror::Object>> raw_instances; 986 Runtime::Current()->GetHeap()->GetInstances(hs, 987 hs.NewHandle(c), 988 /* use_is_assignable_from */ false, 989 max_count, 990 raw_instances); 991 for (size_t i = 0; i < raw_instances.size(); ++i) { 992 instances->push_back(gRegistry->Add(raw_instances[i].Get())); 993 } 994 return JDWP::ERR_NONE; 995 } 996 997 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count, 998 std::vector<JDWP::ObjectId>* referring_objects) { 999 gc::Heap* heap = Runtime::Current()->GetHeap(); 1000 heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger); 1001 JDWP::JdwpError error; 1002 ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error); 1003 if (o == nullptr) { 1004 return JDWP::ERR_INVALID_OBJECT; 1005 } 1006 VariableSizedHandleScope hs(Thread::Current()); 1007 std::vector<Handle<mirror::Object>> raw_instances; 1008 heap->GetReferringObjects(hs, hs.NewHandle(o), max_count, raw_instances); 1009 for (size_t i = 0; i < raw_instances.size(); ++i) { 1010 referring_objects->push_back(gRegistry->Add(raw_instances[i].Get())); 1011 } 1012 return JDWP::ERR_NONE; 1013 } 1014 1015 JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) { 1016 JDWP::JdwpError error; 1017 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error); 1018 if (o == nullptr) { 1019 return JDWP::ERR_INVALID_OBJECT; 1020 } 1021 gRegistry->DisableCollection(object_id); 1022 return JDWP::ERR_NONE; 1023 } 1024 1025 JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) { 1026 JDWP::JdwpError error; 1027 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error); 1028 // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI 1029 // also ignores these cases and never return an error. However it's not obvious why this command 1030 // should behave differently from DisableCollection and IsCollected commands. So let's be more 1031 // strict and return an error if this happens. 1032 if (o == nullptr) { 1033 return JDWP::ERR_INVALID_OBJECT; 1034 } 1035 gRegistry->EnableCollection(object_id); 1036 return JDWP::ERR_NONE; 1037 } 1038 1039 JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool* is_collected) { 1040 *is_collected = true; 1041 if (object_id == 0) { 1042 // Null object id is invalid. 1043 return JDWP::ERR_INVALID_OBJECT; 1044 } 1045 // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However 1046 // the RI seems to ignore this and assume object has been collected. 1047 JDWP::JdwpError error; 1048 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error); 1049 if (o != nullptr) { 1050 *is_collected = gRegistry->IsCollected(object_id); 1051 } 1052 return JDWP::ERR_NONE; 1053 } 1054 1055 void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) { 1056 gRegistry->DisposeObject(object_id, reference_count); 1057 } 1058 1059 JDWP::JdwpTypeTag Dbg::GetTypeTag(ObjPtr<mirror::Class> klass) { 1060 DCHECK(klass != nullptr); 1061 if (klass->IsArrayClass()) { 1062 return JDWP::TT_ARRAY; 1063 } else if (klass->IsInterface()) { 1064 return JDWP::TT_INTERFACE; 1065 } else { 1066 return JDWP::TT_CLASS; 1067 } 1068 } 1069 1070 JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) { 1071 JDWP::JdwpError error; 1072 mirror::Class* c = DecodeClass(class_id, &error); 1073 if (c == nullptr) { 1074 return error; 1075 } 1076 1077 JDWP::JdwpTypeTag type_tag = GetTypeTag(c); 1078 expandBufAdd1(pReply, type_tag); 1079 expandBufAddRefTypeId(pReply, class_id); 1080 return JDWP::ERR_NONE; 1081 } 1082 1083 // Get the complete list of reference classes (i.e. all classes except 1084 // the primitive types). 1085 // Returns a newly-allocated buffer full of RefTypeId values. 1086 class ClassListCreator : public ClassVisitor { 1087 public: 1088 explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {} 1089 1090 bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 1091 if (!c->IsPrimitive()) { 1092 classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c)); 1093 } 1094 return true; 1095 } 1096 1097 private: 1098 std::vector<JDWP::RefTypeId>* const classes_; 1099 }; 1100 1101 void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) { 1102 ClassListCreator clc(classes); 1103 Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc); 1104 } 1105 1106 JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, 1107 uint32_t* pStatus, std::string* pDescriptor) { 1108 JDWP::JdwpError error; 1109 mirror::Class* c = DecodeClass(class_id, &error); 1110 if (c == nullptr) { 1111 return error; 1112 } 1113 1114 if (c->IsArrayClass()) { 1115 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED; 1116 *pTypeTag = JDWP::TT_ARRAY; 1117 } else { 1118 if (c->IsErroneous()) { 1119 *pStatus = JDWP::CS_ERROR; 1120 } else { 1121 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED; 1122 } 1123 *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS; 1124 } 1125 1126 if (pDescriptor != nullptr) { 1127 std::string temp; 1128 *pDescriptor = c->GetDescriptor(&temp); 1129 } 1130 return JDWP::ERR_NONE; 1131 } 1132 1133 void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) { 1134 std::vector<ObjPtr<mirror::Class>> classes; 1135 Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes); 1136 ids->clear(); 1137 for (ObjPtr<mirror::Class> c : classes) { 1138 ids->push_back(gRegistry->Add(c)); 1139 } 1140 } 1141 1142 JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) { 1143 JDWP::JdwpError error; 1144 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error); 1145 if (o == nullptr) { 1146 return JDWP::ERR_INVALID_OBJECT; 1147 } 1148 1149 JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass()); 1150 JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass()); 1151 1152 expandBufAdd1(pReply, type_tag); 1153 expandBufAddRefTypeId(pReply, type_id); 1154 1155 return JDWP::ERR_NONE; 1156 } 1157 1158 JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) { 1159 JDWP::JdwpError error; 1160 mirror::Class* c = DecodeClass(class_id, &error); 1161 if (c == nullptr) { 1162 return error; 1163 } 1164 std::string temp; 1165 *signature = c->GetDescriptor(&temp); 1166 return JDWP::ERR_NONE; 1167 } 1168 1169 JDWP::JdwpError Dbg::GetSourceDebugExtension(JDWP::RefTypeId class_id, 1170 std::string* extension_data) { 1171 JDWP::JdwpError error; 1172 mirror::Class* c = DecodeClass(class_id, &error); 1173 if (c == nullptr) { 1174 return error; 1175 } 1176 StackHandleScope<1> hs(Thread::Current()); 1177 Handle<mirror::Class> klass(hs.NewHandle(c)); 1178 const char* data = annotations::GetSourceDebugExtension(klass); 1179 if (data == nullptr) { 1180 return JDWP::ERR_ABSENT_INFORMATION; 1181 } 1182 *extension_data = data; 1183 return JDWP::ERR_NONE; 1184 } 1185 1186 JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) { 1187 JDWP::JdwpError error; 1188 mirror::Class* c = DecodeClass(class_id, &error); 1189 if (c == nullptr) { 1190 return error; 1191 } 1192 const char* source_file = c->GetSourceFile(); 1193 if (source_file == nullptr) { 1194 return JDWP::ERR_ABSENT_INFORMATION; 1195 } 1196 *result = source_file; 1197 return JDWP::ERR_NONE; 1198 } 1199 1200 JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) { 1201 ScopedObjectAccessUnchecked soa(Thread::Current()); 1202 JDWP::JdwpError error; 1203 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error); 1204 if (error != JDWP::ERR_NONE) { 1205 *tag = JDWP::JT_VOID; 1206 return error; 1207 } 1208 *tag = TagFromObject(soa, o); 1209 return JDWP::ERR_NONE; 1210 } 1211 1212 size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) { 1213 switch (tag) { 1214 case JDWP::JT_VOID: 1215 return 0; 1216 case JDWP::JT_BYTE: 1217 case JDWP::JT_BOOLEAN: 1218 return 1; 1219 case JDWP::JT_CHAR: 1220 case JDWP::JT_SHORT: 1221 return 2; 1222 case JDWP::JT_FLOAT: 1223 case JDWP::JT_INT: 1224 return 4; 1225 case JDWP::JT_ARRAY: 1226 case JDWP::JT_OBJECT: 1227 case JDWP::JT_STRING: 1228 case JDWP::JT_THREAD: 1229 case JDWP::JT_THREAD_GROUP: 1230 case JDWP::JT_CLASS_LOADER: 1231 case JDWP::JT_CLASS_OBJECT: 1232 return sizeof(JDWP::ObjectId); 1233 case JDWP::JT_DOUBLE: 1234 case JDWP::JT_LONG: 1235 return 8; 1236 default: 1237 LOG(FATAL) << "Unknown tag " << tag; 1238 return -1; 1239 } 1240 } 1241 1242 JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int32_t* length) { 1243 JDWP::JdwpError error; 1244 mirror::Array* a = DecodeNonNullArray(array_id, &error); 1245 if (a == nullptr) { 1246 return error; 1247 } 1248 *length = a->GetLength(); 1249 return JDWP::ERR_NONE; 1250 } 1251 1252 JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) { 1253 JDWP::JdwpError error; 1254 mirror::Array* a = DecodeNonNullArray(array_id, &error); 1255 if (a == nullptr) { 1256 return error; 1257 } 1258 1259 if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) { 1260 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count; 1261 return JDWP::ERR_INVALID_LENGTH; 1262 } 1263 JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType()); 1264 expandBufAdd1(pReply, element_tag); 1265 expandBufAdd4BE(pReply, count); 1266 1267 if (IsPrimitiveTag(element_tag)) { 1268 size_t width = GetTagWidth(element_tag); 1269 uint8_t* dst = expandBufAddSpace(pReply, count * width); 1270 if (width == 8) { 1271 const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0)); 1272 for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]); 1273 } else if (width == 4) { 1274 const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0)); 1275 for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]); 1276 } else if (width == 2) { 1277 const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0)); 1278 for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]); 1279 } else { 1280 const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0)); 1281 memcpy(dst, &src[offset * width], count * width); 1282 } 1283 } else { 1284 ScopedObjectAccessUnchecked soa(Thread::Current()); 1285 mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>(); 1286 for (int i = 0; i < count; ++i) { 1287 mirror::Object* element = oa->Get(offset + i); 1288 JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element) 1289 : element_tag; 1290 expandBufAdd1(pReply, specific_tag); 1291 expandBufAddObjectId(pReply, gRegistry->Add(element)); 1292 } 1293 } 1294 1295 return JDWP::ERR_NONE; 1296 } 1297 1298 template <typename T> 1299 static void CopyArrayData(mirror::Array* a, JDWP::Request* src, int offset, int count) 1300 NO_THREAD_SAFETY_ANALYSIS { 1301 // TODO: fix when annotalysis correctly handles non-member functions. 1302 DCHECK(a->GetClass()->IsPrimitiveArray()); 1303 1304 T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset)); 1305 for (int i = 0; i < count; ++i) { 1306 *dst++ = src->ReadValue(sizeof(T)); 1307 } 1308 } 1309 1310 JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count, 1311 JDWP::Request* request) { 1312 JDWP::JdwpError error; 1313 mirror::Array* dst = DecodeNonNullArray(array_id, &error); 1314 if (dst == nullptr) { 1315 return error; 1316 } 1317 1318 if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) { 1319 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count; 1320 return JDWP::ERR_INVALID_LENGTH; 1321 } 1322 JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType()); 1323 1324 if (IsPrimitiveTag(element_tag)) { 1325 size_t width = GetTagWidth(element_tag); 1326 if (width == 8) { 1327 CopyArrayData<uint64_t>(dst, request, offset, count); 1328 } else if (width == 4) { 1329 CopyArrayData<uint32_t>(dst, request, offset, count); 1330 } else if (width == 2) { 1331 CopyArrayData<uint16_t>(dst, request, offset, count); 1332 } else { 1333 CopyArrayData<uint8_t>(dst, request, offset, count); 1334 } 1335 } else { 1336 mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>(); 1337 for (int i = 0; i < count; ++i) { 1338 JDWP::ObjectId id = request->ReadObjectId(); 1339 mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error); 1340 if (error != JDWP::ERR_NONE) { 1341 return error; 1342 } 1343 // Check if the object's type is compatible with the array's type. 1344 if (o != nullptr && !o->InstanceOf(oa->GetClass()->GetComponentType())) { 1345 return JDWP::ERR_TYPE_MISMATCH; 1346 } 1347 oa->Set<false>(offset + i, o); 1348 } 1349 } 1350 1351 return JDWP::ERR_NONE; 1352 } 1353 1354 JDWP::JdwpError Dbg::CreateString(const std::string& str, JDWP::ObjectId* new_string_id) { 1355 Thread* self = Thread::Current(); 1356 mirror::String* new_string = mirror::String::AllocFromModifiedUtf8(self, str.c_str()); 1357 if (new_string == nullptr) { 1358 DCHECK(self->IsExceptionPending()); 1359 self->ClearException(); 1360 LOG(ERROR) << "Could not allocate string"; 1361 *new_string_id = 0; 1362 return JDWP::ERR_OUT_OF_MEMORY; 1363 } 1364 *new_string_id = gRegistry->Add(new_string); 1365 return JDWP::ERR_NONE; 1366 } 1367 1368 JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) { 1369 JDWP::JdwpError error; 1370 mirror::Class* c = DecodeClass(class_id, &error); 1371 if (c == nullptr) { 1372 *new_object_id = 0; 1373 return error; 1374 } 1375 Thread* self = Thread::Current(); 1376 ObjPtr<mirror::Object> new_object; 1377 if (c->IsStringClass()) { 1378 // Special case for java.lang.String. 1379 gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator(); 1380 new_object = mirror::String::AllocEmptyString<true>(self, allocator_type); 1381 } else { 1382 new_object = c->AllocObject(self); 1383 } 1384 if (new_object == nullptr) { 1385 DCHECK(self->IsExceptionPending()); 1386 self->ClearException(); 1387 LOG(ERROR) << "Could not allocate object of type " << mirror::Class::PrettyDescriptor(c); 1388 *new_object_id = 0; 1389 return JDWP::ERR_OUT_OF_MEMORY; 1390 } 1391 *new_object_id = gRegistry->Add(new_object.Ptr()); 1392 return JDWP::ERR_NONE; 1393 } 1394 1395 /* 1396 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]". 1397 */ 1398 JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length, 1399 JDWP::ObjectId* new_array_id) { 1400 JDWP::JdwpError error; 1401 mirror::Class* c = DecodeClass(array_class_id, &error); 1402 if (c == nullptr) { 1403 *new_array_id = 0; 1404 return error; 1405 } 1406 Thread* self = Thread::Current(); 1407 gc::Heap* heap = Runtime::Current()->GetHeap(); 1408 mirror::Array* new_array = mirror::Array::Alloc<true>(self, c, length, 1409 c->GetComponentSizeShift(), 1410 heap->GetCurrentAllocator()); 1411 if (new_array == nullptr) { 1412 DCHECK(self->IsExceptionPending()); 1413 self->ClearException(); 1414 LOG(ERROR) << "Could not allocate array of type " << mirror::Class::PrettyDescriptor(c); 1415 *new_array_id = 0; 1416 return JDWP::ERR_OUT_OF_MEMORY; 1417 } 1418 *new_array_id = gRegistry->Add(new_array); 1419 return JDWP::ERR_NONE; 1420 } 1421 1422 JDWP::FieldId Dbg::ToFieldId(const ArtField* f) { 1423 return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f)); 1424 } 1425 1426 static JDWP::MethodId ToMethodId(ArtMethod* m) 1427 REQUIRES_SHARED(Locks::mutator_lock_) { 1428 return static_cast<JDWP::MethodId>( 1429 reinterpret_cast<uintptr_t>(m->GetCanonicalMethod(kRuntimePointerSize))); 1430 } 1431 1432 static ArtField* FromFieldId(JDWP::FieldId fid) 1433 REQUIRES_SHARED(Locks::mutator_lock_) { 1434 return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid)); 1435 } 1436 1437 static ArtMethod* FromMethodId(JDWP::MethodId mid) 1438 REQUIRES_SHARED(Locks::mutator_lock_) { 1439 return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid)); 1440 } 1441 1442 bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) { 1443 CHECK(event_thread != nullptr); 1444 JDWP::JdwpError error; 1445 mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>( 1446 expected_thread_id, &error); 1447 return expected_thread_peer == event_thread->GetPeerFromOtherThread(); 1448 } 1449 1450 bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location, 1451 const JDWP::EventLocation& event_location) { 1452 if (expected_location.dex_pc != event_location.dex_pc) { 1453 return false; 1454 } 1455 ArtMethod* m = FromMethodId(expected_location.method_id); 1456 return m == event_location.method; 1457 } 1458 1459 bool Dbg::MatchType(ObjPtr<mirror::Class> event_class, JDWP::RefTypeId class_id) { 1460 if (event_class == nullptr) { 1461 return false; 1462 } 1463 JDWP::JdwpError error; 1464 ObjPtr<mirror::Class> expected_class = DecodeClass(class_id, &error); 1465 CHECK(expected_class != nullptr); 1466 return expected_class->IsAssignableFrom(event_class); 1467 } 1468 1469 bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id, 1470 ArtField* event_field) { 1471 ArtField* expected_field = FromFieldId(expected_field_id); 1472 if (expected_field != event_field) { 1473 return false; 1474 } 1475 return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id); 1476 } 1477 1478 bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) { 1479 JDWP::JdwpError error; 1480 mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id, &error); 1481 return modifier_instance == event_instance; 1482 } 1483 1484 void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) { 1485 if (m == nullptr) { 1486 memset(location, 0, sizeof(*location)); 1487 } else { 1488 mirror::Class* c = m->GetDeclaringClass(); 1489 location->type_tag = GetTypeTag(c); 1490 location->class_id = gRegistry->AddRefType(c); 1491 // The RI Seems to return 0 for all obsolete methods. For compatibility we shall do the same. 1492 location->method_id = m->IsObsolete() ? 0 : ToMethodId(m); 1493 location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc; 1494 } 1495 } 1496 1497 std::string Dbg::GetMethodName(JDWP::MethodId method_id) { 1498 ArtMethod* m = FromMethodId(method_id); 1499 if (m == nullptr) { 1500 return "null"; 1501 } 1502 return m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName(); 1503 } 1504 1505 bool Dbg::IsMethodObsolete(JDWP::MethodId method_id) { 1506 ArtMethod* m = FromMethodId(method_id); 1507 if (m == nullptr) { 1508 // NB Since we return 0 as MID for obsolete methods we want to default to true here. 1509 return true; 1510 } 1511 return m->IsObsolete(); 1512 } 1513 1514 std::string Dbg::GetFieldName(JDWP::FieldId field_id) { 1515 ArtField* f = FromFieldId(field_id); 1516 if (f == nullptr) { 1517 return "null"; 1518 } 1519 return f->GetName(); 1520 } 1521 1522 /* 1523 * Augment the access flags for synthetic methods and fields by setting 1524 * the (as described by the spec) "0xf0000000 bit". Also, strip out any 1525 * flags not specified by the Java programming language. 1526 */ 1527 static uint32_t MangleAccessFlags(uint32_t accessFlags) { 1528 accessFlags &= kAccJavaFlagsMask; 1529 if ((accessFlags & kAccSynthetic) != 0) { 1530 accessFlags |= 0xf0000000; 1531 } 1532 return accessFlags; 1533 } 1534 1535 /* 1536 * Circularly shifts registers so that arguments come first. Debuggers 1537 * expect slots to begin with arguments, but dex code places them at 1538 * the end. 1539 */ 1540 static uint16_t MangleSlot(uint16_t slot, ArtMethod* m) 1541 REQUIRES_SHARED(Locks::mutator_lock_) { 1542 CodeItemDataAccessor accessor(m->DexInstructionData()); 1543 if (!accessor.HasCodeItem()) { 1544 // We should not get here for a method without code (native, proxy or abstract). Log it and 1545 // return the slot as is since all registers are arguments. 1546 LOG(WARNING) << "Trying to mangle slot for method without code " << m->PrettyMethod(); 1547 return slot; 1548 } 1549 uint16_t ins_size = accessor.InsSize(); 1550 uint16_t locals_size = accessor.RegistersSize() - ins_size; 1551 if (slot >= locals_size) { 1552 return slot - locals_size; 1553 } else { 1554 return slot + ins_size; 1555 } 1556 } 1557 1558 static size_t GetMethodNumArgRegistersIncludingThis(ArtMethod* method) 1559 REQUIRES_SHARED(Locks::mutator_lock_) { 1560 uint32_t num_registers = ArtMethod::NumArgRegisters(method->GetShorty()); 1561 if (!method->IsStatic()) { 1562 ++num_registers; 1563 } 1564 return num_registers; 1565 } 1566 1567 /* 1568 * Circularly shifts registers so that arguments come last. Reverts 1569 * slots to dex style argument placement. 1570 */ 1571 static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error) 1572 REQUIRES_SHARED(Locks::mutator_lock_) { 1573 CodeItemDataAccessor accessor(m->DexInstructionData()); 1574 if (!accessor.HasCodeItem()) { 1575 // We should not get here for a method without code (native, proxy or abstract). Log it and 1576 // return the slot as is since all registers are arguments. 1577 LOG(WARNING) << "Trying to demangle slot for method without code " 1578 << m->PrettyMethod(); 1579 uint16_t vreg_count = GetMethodNumArgRegistersIncludingThis(m); 1580 if (slot < vreg_count) { 1581 *error = JDWP::ERR_NONE; 1582 return slot; 1583 } 1584 } else { 1585 if (slot < accessor.RegistersSize()) { 1586 uint16_t ins_size = accessor.InsSize(); 1587 uint16_t locals_size = accessor.RegistersSize() - ins_size; 1588 *error = JDWP::ERR_NONE; 1589 return (slot < ins_size) ? slot + locals_size : slot - ins_size; 1590 } 1591 } 1592 1593 // Slot is invalid in the method. 1594 LOG(ERROR) << "Invalid local slot " << slot << " for method " << m->PrettyMethod(); 1595 *error = JDWP::ERR_INVALID_SLOT; 1596 return DexFile::kDexNoIndex16; 1597 } 1598 1599 JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, 1600 JDWP::ExpandBuf* pReply) { 1601 JDWP::JdwpError error; 1602 mirror::Class* c = DecodeClass(class_id, &error); 1603 if (c == nullptr) { 1604 return error; 1605 } 1606 1607 size_t instance_field_count = c->NumInstanceFields(); 1608 size_t static_field_count = c->NumStaticFields(); 1609 1610 expandBufAdd4BE(pReply, instance_field_count + static_field_count); 1611 1612 for (size_t i = 0; i < instance_field_count + static_field_count; ++i) { 1613 ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : 1614 c->GetStaticField(i - instance_field_count); 1615 expandBufAddFieldId(pReply, ToFieldId(f)); 1616 expandBufAddUtf8String(pReply, f->GetName()); 1617 expandBufAddUtf8String(pReply, f->GetTypeDescriptor()); 1618 if (with_generic) { 1619 static const char genericSignature[1] = ""; 1620 expandBufAddUtf8String(pReply, genericSignature); 1621 } 1622 expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags())); 1623 } 1624 return JDWP::ERR_NONE; 1625 } 1626 1627 JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic, 1628 JDWP::ExpandBuf* pReply) { 1629 JDWP::JdwpError error; 1630 mirror::Class* c = DecodeClass(class_id, &error); 1631 if (c == nullptr) { 1632 return error; 1633 } 1634 1635 expandBufAdd4BE(pReply, c->NumMethods()); 1636 1637 auto* cl = Runtime::Current()->GetClassLinker(); 1638 auto ptr_size = cl->GetImagePointerSize(); 1639 for (ArtMethod& m : c->GetMethods(ptr_size)) { 1640 expandBufAddMethodId(pReply, ToMethodId(&m)); 1641 expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName()); 1642 expandBufAddUtf8String( 1643 pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetSignature().ToString()); 1644 if (with_generic) { 1645 const char* generic_signature = ""; 1646 expandBufAddUtf8String(pReply, generic_signature); 1647 } 1648 expandBufAdd4BE(pReply, MangleAccessFlags(m.GetAccessFlags())); 1649 } 1650 return JDWP::ERR_NONE; 1651 } 1652 1653 JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) { 1654 JDWP::JdwpError error; 1655 Thread* self = Thread::Current(); 1656 ObjPtr<mirror::Class> c = DecodeClass(class_id, &error); 1657 if (c == nullptr) { 1658 return error; 1659 } 1660 size_t interface_count = c->NumDirectInterfaces(); 1661 expandBufAdd4BE(pReply, interface_count); 1662 for (size_t i = 0; i < interface_count; ++i) { 1663 ObjPtr<mirror::Class> interface = mirror::Class::GetDirectInterface(self, c, i); 1664 DCHECK(interface != nullptr); 1665 expandBufAddRefTypeId(pReply, gRegistry->AddRefType(interface)); 1666 } 1667 return JDWP::ERR_NONE; 1668 } 1669 1670 void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) { 1671 struct DebugCallbackContext { 1672 int numItems; 1673 JDWP::ExpandBuf* pReply; 1674 1675 static bool Callback(void* context, const DexFile::PositionInfo& entry) { 1676 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context); 1677 expandBufAdd8BE(pContext->pReply, entry.address_); 1678 expandBufAdd4BE(pContext->pReply, entry.line_); 1679 pContext->numItems++; 1680 return false; 1681 } 1682 }; 1683 ArtMethod* m = FromMethodId(method_id); 1684 CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo()); 1685 uint64_t start, end; 1686 if (!accessor.HasCodeItem()) { 1687 DCHECK(m->IsNative() || m->IsProxyMethod()); 1688 start = -1; 1689 end = -1; 1690 } else { 1691 start = 0; 1692 // Return the index of the last instruction 1693 end = accessor.InsnsSizeInCodeUnits() - 1; 1694 } 1695 1696 expandBufAdd8BE(pReply, start); 1697 expandBufAdd8BE(pReply, end); 1698 1699 // Add numLines later 1700 size_t numLinesOffset = expandBufGetLength(pReply); 1701 expandBufAdd4BE(pReply, 0); 1702 1703 DebugCallbackContext context; 1704 context.numItems = 0; 1705 context.pReply = pReply; 1706 1707 if (accessor.HasCodeItem()) { 1708 m->GetDexFile()->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), 1709 DebugCallbackContext::Callback, 1710 &context); 1711 } 1712 1713 JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems); 1714 } 1715 1716 void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic, 1717 JDWP::ExpandBuf* pReply) { 1718 struct DebugCallbackContext { 1719 ArtMethod* method; 1720 JDWP::ExpandBuf* pReply; 1721 size_t variable_count; 1722 bool with_generic; 1723 1724 static void Callback(void* context, const DexFile::LocalInfo& entry) 1725 REQUIRES_SHARED(Locks::mutator_lock_) { 1726 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context); 1727 1728 uint16_t slot = entry.reg_; 1729 VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d", 1730 pContext->variable_count, entry.start_address_, 1731 entry.end_address_ - entry.start_address_, 1732 entry.name_, entry.descriptor_, entry.signature_, slot, 1733 MangleSlot(slot, pContext->method)); 1734 1735 slot = MangleSlot(slot, pContext->method); 1736 1737 expandBufAdd8BE(pContext->pReply, entry.start_address_); 1738 expandBufAddUtf8String(pContext->pReply, entry.name_); 1739 expandBufAddUtf8String(pContext->pReply, entry.descriptor_); 1740 if (pContext->with_generic) { 1741 expandBufAddUtf8String(pContext->pReply, entry.signature_); 1742 } 1743 expandBufAdd4BE(pContext->pReply, entry.end_address_- entry.start_address_); 1744 expandBufAdd4BE(pContext->pReply, slot); 1745 1746 ++pContext->variable_count; 1747 } 1748 }; 1749 ArtMethod* m = FromMethodId(method_id); 1750 CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo()); 1751 1752 // arg_count considers doubles and longs to take 2 units. 1753 // variable_count considers everything to take 1 unit. 1754 expandBufAdd4BE(pReply, GetMethodNumArgRegistersIncludingThis(m)); 1755 1756 // We don't know the total number of variables yet, so leave a blank and update it later. 1757 size_t variable_count_offset = expandBufGetLength(pReply); 1758 expandBufAdd4BE(pReply, 0); 1759 1760 DebugCallbackContext context; 1761 context.method = m; 1762 context.pReply = pReply; 1763 context.variable_count = 0; 1764 context.with_generic = with_generic; 1765 1766 if (accessor.HasCodeItem()) { 1767 m->GetDexFile()->DecodeDebugLocalInfo(accessor.RegistersSize(), 1768 accessor.InsSize(), 1769 accessor.InsnsSizeInCodeUnits(), 1770 accessor.DebugInfoOffset(), 1771 m->IsStatic(), 1772 m->GetDexMethodIndex(), 1773 DebugCallbackContext::Callback, 1774 &context); 1775 } 1776 1777 JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count); 1778 } 1779 1780 void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value, 1781 JDWP::ExpandBuf* pReply) { 1782 ArtMethod* m = FromMethodId(method_id); 1783 JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty()); 1784 OutputJValue(tag, return_value, pReply); 1785 } 1786 1787 void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value, 1788 JDWP::ExpandBuf* pReply) { 1789 ArtField* f = FromFieldId(field_id); 1790 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); 1791 OutputJValue(tag, field_value, pReply); 1792 } 1793 1794 JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id, 1795 std::vector<uint8_t>* bytecodes) { 1796 ArtMethod* m = FromMethodId(method_id); 1797 if (m == nullptr) { 1798 return JDWP::ERR_INVALID_METHODID; 1799 } 1800 CodeItemDataAccessor accessor(m->DexInstructionData()); 1801 size_t byte_count = accessor.InsnsSizeInCodeUnits() * 2; 1802 const uint8_t* begin = reinterpret_cast<const uint8_t*>(accessor.Insns()); 1803 const uint8_t* end = begin + byte_count; 1804 for (const uint8_t* p = begin; p != end; ++p) { 1805 bytecodes->push_back(*p); 1806 } 1807 return JDWP::ERR_NONE; 1808 } 1809 1810 JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) { 1811 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor()); 1812 } 1813 1814 JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) { 1815 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor()); 1816 } 1817 1818 static JValue GetArtFieldValue(ArtField* f, mirror::Object* o) 1819 REQUIRES_SHARED(Locks::mutator_lock_) { 1820 Primitive::Type fieldType = f->GetTypeAsPrimitiveType(); 1821 JValue field_value; 1822 switch (fieldType) { 1823 case Primitive::kPrimBoolean: 1824 field_value.SetZ(f->GetBoolean(o)); 1825 return field_value; 1826 1827 case Primitive::kPrimByte: 1828 field_value.SetB(f->GetByte(o)); 1829 return field_value; 1830 1831 case Primitive::kPrimChar: 1832 field_value.SetC(f->GetChar(o)); 1833 return field_value; 1834 1835 case Primitive::kPrimShort: 1836 field_value.SetS(f->GetShort(o)); 1837 return field_value; 1838 1839 case Primitive::kPrimInt: 1840 case Primitive::kPrimFloat: 1841 // Int and Float must be treated as 32-bit values in JDWP. 1842 field_value.SetI(f->GetInt(o)); 1843 return field_value; 1844 1845 case Primitive::kPrimLong: 1846 case Primitive::kPrimDouble: 1847 // Long and Double must be treated as 64-bit values in JDWP. 1848 field_value.SetJ(f->GetLong(o)); 1849 return field_value; 1850 1851 case Primitive::kPrimNot: 1852 field_value.SetL(f->GetObject(o).Ptr()); 1853 return field_value; 1854 1855 case Primitive::kPrimVoid: 1856 LOG(FATAL) << "Attempt to read from field of type 'void'"; 1857 UNREACHABLE(); 1858 } 1859 LOG(FATAL) << "Attempt to read from field of unknown type"; 1860 UNREACHABLE(); 1861 } 1862 1863 static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id, 1864 JDWP::FieldId field_id, JDWP::ExpandBuf* pReply, 1865 bool is_static) 1866 REQUIRES_SHARED(Locks::mutator_lock_) { 1867 JDWP::JdwpError error; 1868 mirror::Class* c = DecodeClass(ref_type_id, &error); 1869 if (ref_type_id != 0 && c == nullptr) { 1870 return error; 1871 } 1872 1873 Thread* self = Thread::Current(); 1874 StackHandleScope<2> hs(self); 1875 MutableHandle<mirror::Object> 1876 o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error))); 1877 if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) { 1878 return JDWP::ERR_INVALID_OBJECT; 1879 } 1880 ArtField* f = FromFieldId(field_id); 1881 1882 mirror::Class* receiver_class = c; 1883 if (receiver_class == nullptr && o != nullptr) { 1884 receiver_class = o->GetClass(); 1885 } 1886 1887 // TODO: should we give up now if receiver_class is null? 1888 if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) { 1889 LOG(INFO) << "ERR_INVALID_FIELDID: " << f->PrettyField() << " " 1890 << receiver_class->PrettyClass(); 1891 return JDWP::ERR_INVALID_FIELDID; 1892 } 1893 1894 // Ensure the field's class is initialized. 1895 Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass())); 1896 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) { 1897 LOG(WARNING) << "Not able to initialize class for SetValues: " 1898 << mirror::Class::PrettyClass(klass.Get()); 1899 } 1900 1901 // The RI only enforces the static/non-static mismatch in one direction. 1902 // TODO: should we change the tests and check both? 1903 if (is_static) { 1904 if (!f->IsStatic()) { 1905 return JDWP::ERR_INVALID_FIELDID; 1906 } 1907 } else { 1908 if (f->IsStatic()) { 1909 LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.GetValues" 1910 << " on static field " << f->PrettyField(); 1911 } 1912 } 1913 if (f->IsStatic()) { 1914 o.Assign(f->GetDeclaringClass()); 1915 } 1916 1917 JValue field_value(GetArtFieldValue(f, o.Get())); 1918 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); 1919 Dbg::OutputJValue(tag, &field_value, pReply); 1920 return JDWP::ERR_NONE; 1921 } 1922 1923 JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, 1924 JDWP::ExpandBuf* pReply) { 1925 return GetFieldValueImpl(0, object_id, field_id, pReply, false); 1926 } 1927 1928 JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, 1929 JDWP::ExpandBuf* pReply) { 1930 return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true); 1931 } 1932 1933 static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width) 1934 REQUIRES_SHARED(Locks::mutator_lock_) { 1935 Primitive::Type fieldType = f->GetTypeAsPrimitiveType(); 1936 // Debugging only happens at runtime so we know we are not running in a transaction. 1937 static constexpr bool kNoTransactionMode = false; 1938 switch (fieldType) { 1939 case Primitive::kPrimBoolean: 1940 CHECK_EQ(width, 1); 1941 f->SetBoolean<kNoTransactionMode>(o, static_cast<uint8_t>(value)); 1942 return JDWP::ERR_NONE; 1943 1944 case Primitive::kPrimByte: 1945 CHECK_EQ(width, 1); 1946 f->SetByte<kNoTransactionMode>(o, static_cast<uint8_t>(value)); 1947 return JDWP::ERR_NONE; 1948 1949 case Primitive::kPrimChar: 1950 CHECK_EQ(width, 2); 1951 f->SetChar<kNoTransactionMode>(o, static_cast<uint16_t>(value)); 1952 return JDWP::ERR_NONE; 1953 1954 case Primitive::kPrimShort: 1955 CHECK_EQ(width, 2); 1956 f->SetShort<kNoTransactionMode>(o, static_cast<int16_t>(value)); 1957 return JDWP::ERR_NONE; 1958 1959 case Primitive::kPrimInt: 1960 case Primitive::kPrimFloat: 1961 CHECK_EQ(width, 4); 1962 // Int and Float must be treated as 32-bit values in JDWP. 1963 f->SetInt<kNoTransactionMode>(o, static_cast<int32_t>(value)); 1964 return JDWP::ERR_NONE; 1965 1966 case Primitive::kPrimLong: 1967 case Primitive::kPrimDouble: 1968 CHECK_EQ(width, 8); 1969 // Long and Double must be treated as 64-bit values in JDWP. 1970 f->SetLong<kNoTransactionMode>(o, value); 1971 return JDWP::ERR_NONE; 1972 1973 case Primitive::kPrimNot: { 1974 JDWP::JdwpError error; 1975 mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error); 1976 if (error != JDWP::ERR_NONE) { 1977 return JDWP::ERR_INVALID_OBJECT; 1978 } 1979 if (v != nullptr) { 1980 ObjPtr<mirror::Class> field_type; 1981 { 1982 StackHandleScope<2> hs(Thread::Current()); 1983 HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v)); 1984 HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o)); 1985 field_type = f->ResolveType(); 1986 } 1987 if (!field_type->IsAssignableFrom(v->GetClass())) { 1988 return JDWP::ERR_INVALID_OBJECT; 1989 } 1990 } 1991 f->SetObject<kNoTransactionMode>(o, v); 1992 return JDWP::ERR_NONE; 1993 } 1994 1995 case Primitive::kPrimVoid: 1996 LOG(FATAL) << "Attempt to write to field of type 'void'"; 1997 UNREACHABLE(); 1998 } 1999 LOG(FATAL) << "Attempt to write to field of unknown type"; 2000 UNREACHABLE(); 2001 } 2002 2003 static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id, 2004 uint64_t value, int width, bool is_static) 2005 REQUIRES_SHARED(Locks::mutator_lock_) { 2006 JDWP::JdwpError error; 2007 Thread* self = Thread::Current(); 2008 StackHandleScope<2> hs(self); 2009 MutableHandle<mirror::Object> 2010 o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error))); 2011 if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) { 2012 return JDWP::ERR_INVALID_OBJECT; 2013 } 2014 ArtField* f = FromFieldId(field_id); 2015 2016 // Ensure the field's class is initialized. 2017 Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass())); 2018 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) { 2019 LOG(WARNING) << "Not able to initialize class for SetValues: " 2020 << mirror::Class::PrettyClass(klass.Get()); 2021 } 2022 2023 // The RI only enforces the static/non-static mismatch in one direction. 2024 // TODO: should we change the tests and check both? 2025 if (is_static) { 2026 if (!f->IsStatic()) { 2027 return JDWP::ERR_INVALID_FIELDID; 2028 } 2029 } else { 2030 if (f->IsStatic()) { 2031 LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues" 2032 << " on static field " << f->PrettyField(); 2033 } 2034 } 2035 if (f->IsStatic()) { 2036 o.Assign(f->GetDeclaringClass()); 2037 } 2038 return SetArtFieldValue(f, o.Get(), value, width); 2039 } 2040 2041 JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, 2042 int width) { 2043 return SetFieldValueImpl(object_id, field_id, value, width, false); 2044 } 2045 2046 JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) { 2047 return SetFieldValueImpl(0, field_id, value, width, true); 2048 } 2049 2050 JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) { 2051 JDWP::JdwpError error; 2052 mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error); 2053 if (error != JDWP::ERR_NONE) { 2054 return error; 2055 } 2056 if (obj == nullptr) { 2057 return JDWP::ERR_INVALID_OBJECT; 2058 } 2059 { 2060 ScopedObjectAccessUnchecked soa(Thread::Current()); 2061 ObjPtr<mirror::Class> java_lang_String = 2062 soa.Decode<mirror::Class>(WellKnownClasses::java_lang_String); 2063 if (!java_lang_String->IsAssignableFrom(obj->GetClass())) { 2064 // This isn't a string. 2065 return JDWP::ERR_INVALID_STRING; 2066 } 2067 } 2068 *str = obj->AsString()->ToModifiedUtf8(); 2069 return JDWP::ERR_NONE; 2070 } 2071 2072 void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) { 2073 if (IsPrimitiveTag(tag)) { 2074 expandBufAdd1(pReply, tag); 2075 if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) { 2076 expandBufAdd1(pReply, return_value->GetI()); 2077 } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) { 2078 expandBufAdd2BE(pReply, return_value->GetI()); 2079 } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) { 2080 expandBufAdd4BE(pReply, return_value->GetI()); 2081 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) { 2082 expandBufAdd8BE(pReply, return_value->GetJ()); 2083 } else { 2084 CHECK_EQ(tag, JDWP::JT_VOID); 2085 } 2086 } else { 2087 ScopedObjectAccessUnchecked soa(Thread::Current()); 2088 mirror::Object* value = return_value->GetL(); 2089 expandBufAdd1(pReply, TagFromObject(soa, value)); 2090 expandBufAddObjectId(pReply, gRegistry->Add(value)); 2091 } 2092 } 2093 2094 JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) { 2095 ScopedObjectAccessUnchecked soa(Thread::Current()); 2096 JDWP::JdwpError error; 2097 DecodeThread(soa, thread_id, &error); 2098 if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) { 2099 return error; 2100 } 2101 2102 // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName. 2103 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error); 2104 CHECK(thread_object != nullptr) << error; 2105 ArtField* java_lang_Thread_name_field = 2106 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 2107 ObjPtr<mirror::String> s(java_lang_Thread_name_field->GetObject(thread_object)->AsString()); 2108 if (s != nullptr) { 2109 *name = s->ToModifiedUtf8(); 2110 } 2111 return JDWP::ERR_NONE; 2112 } 2113 2114 JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) { 2115 ScopedObjectAccessUnchecked soa(Thread::Current()); 2116 JDWP::JdwpError error; 2117 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error); 2118 if (error != JDWP::ERR_NONE) { 2119 return JDWP::ERR_INVALID_OBJECT; 2120 } 2121 ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroup"); 2122 // Okay, so it's an object, but is it actually a thread? 2123 DecodeThread(soa, thread_id, &error); 2124 if (error == JDWP::ERR_THREAD_NOT_ALIVE) { 2125 // Zombie threads are in the null group. 2126 expandBufAddObjectId(pReply, JDWP::ObjectId(0)); 2127 error = JDWP::ERR_NONE; 2128 } else if (error == JDWP::ERR_NONE) { 2129 ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread); 2130 CHECK(c != nullptr); 2131 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group); 2132 CHECK(f != nullptr); 2133 ObjPtr<mirror::Object> group = f->GetObject(thread_object); 2134 CHECK(group != nullptr); 2135 JDWP::ObjectId thread_group_id = gRegistry->Add(group); 2136 expandBufAddObjectId(pReply, thread_group_id); 2137 } 2138 return error; 2139 } 2140 2141 static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa, 2142 JDWP::ObjectId thread_group_id, JDWP::JdwpError* error) 2143 REQUIRES_SHARED(Locks::mutator_lock_) { 2144 mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id, 2145 error); 2146 if (*error != JDWP::ERR_NONE) { 2147 return nullptr; 2148 } 2149 if (thread_group == nullptr) { 2150 *error = JDWP::ERR_INVALID_OBJECT; 2151 return nullptr; 2152 } 2153 ObjPtr<mirror::Class> c = 2154 soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ThreadGroup); 2155 CHECK(c != nullptr); 2156 if (!c->IsAssignableFrom(thread_group->GetClass())) { 2157 // This is not a java.lang.ThreadGroup. 2158 *error = JDWP::ERR_INVALID_THREAD_GROUP; 2159 return nullptr; 2160 } 2161 *error = JDWP::ERR_NONE; 2162 return thread_group; 2163 } 2164 2165 JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) { 2166 ScopedObjectAccessUnchecked soa(Thread::Current()); 2167 JDWP::JdwpError error; 2168 mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error); 2169 if (error != JDWP::ERR_NONE) { 2170 return error; 2171 } 2172 ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupName"); 2173 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name); 2174 CHECK(f != nullptr); 2175 ObjPtr<mirror::String> s = f->GetObject(thread_group)->AsString(); 2176 2177 std::string thread_group_name(s->ToModifiedUtf8()); 2178 expandBufAddUtf8String(pReply, thread_group_name); 2179 return JDWP::ERR_NONE; 2180 } 2181 2182 JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) { 2183 ScopedObjectAccessUnchecked soa(Thread::Current()); 2184 JDWP::JdwpError error; 2185 mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error); 2186 if (error != JDWP::ERR_NONE) { 2187 return error; 2188 } 2189 ObjPtr<mirror::Object> parent; 2190 { 2191 ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupParent"); 2192 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_parent); 2193 CHECK(f != nullptr); 2194 parent = f->GetObject(thread_group); 2195 } 2196 JDWP::ObjectId parent_group_id = gRegistry->Add(parent); 2197 expandBufAddObjectId(pReply, parent_group_id); 2198 return JDWP::ERR_NONE; 2199 } 2200 2201 static void GetChildThreadGroups(mirror::Object* thread_group, 2202 std::vector<JDWP::ObjectId>* child_thread_group_ids) 2203 REQUIRES_SHARED(Locks::mutator_lock_) { 2204 CHECK(thread_group != nullptr); 2205 2206 // Get the int "ngroups" count of this thread group... 2207 ArtField* ngroups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_ngroups); 2208 CHECK(ngroups_field != nullptr); 2209 const int32_t size = ngroups_field->GetInt(thread_group); 2210 if (size == 0) { 2211 return; 2212 } 2213 2214 // Get the ThreadGroup[] "groups" out of this thread group... 2215 ArtField* groups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_groups); 2216 ObjPtr<mirror::Object> groups_array = groups_field->GetObject(thread_group); 2217 2218 CHECK(groups_array != nullptr); 2219 CHECK(groups_array->IsObjectArray()); 2220 2221 ObjPtr<mirror::ObjectArray<mirror::Object>> groups_array_as_array = 2222 groups_array->AsObjectArray<mirror::Object>(); 2223 2224 // Copy the first 'size' elements out of the array into the result. 2225 ObjectRegistry* registry = Dbg::GetObjectRegistry(); 2226 for (int32_t i = 0; i < size; ++i) { 2227 child_thread_group_ids->push_back(registry->Add(groups_array_as_array->Get(i))); 2228 } 2229 } 2230 2231 JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id, 2232 JDWP::ExpandBuf* pReply) { 2233 ScopedObjectAccessUnchecked soa(Thread::Current()); 2234 JDWP::JdwpError error; 2235 mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error); 2236 if (error != JDWP::ERR_NONE) { 2237 return error; 2238 } 2239 2240 // Add child threads. 2241 { 2242 std::vector<JDWP::ObjectId> child_thread_ids; 2243 GetThreads(thread_group, &child_thread_ids); 2244 expandBufAdd4BE(pReply, child_thread_ids.size()); 2245 for (JDWP::ObjectId child_thread_id : child_thread_ids) { 2246 expandBufAddObjectId(pReply, child_thread_id); 2247 } 2248 } 2249 2250 // Add child thread groups. 2251 { 2252 std::vector<JDWP::ObjectId> child_thread_groups_ids; 2253 GetChildThreadGroups(thread_group, &child_thread_groups_ids); 2254 expandBufAdd4BE(pReply, child_thread_groups_ids.size()); 2255 for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) { 2256 expandBufAddObjectId(pReply, child_thread_group_id); 2257 } 2258 } 2259 2260 return JDWP::ERR_NONE; 2261 } 2262 2263 JDWP::ObjectId Dbg::GetSystemThreadGroupId() { 2264 ScopedObjectAccessUnchecked soa(Thread::Current()); 2265 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup); 2266 ObjPtr<mirror::Object> group = f->GetObject(f->GetDeclaringClass()); 2267 return gRegistry->Add(group); 2268 } 2269 2270 JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) { 2271 switch (state) { 2272 case kBlocked: 2273 return JDWP::TS_MONITOR; 2274 case kNative: 2275 case kRunnable: 2276 case kSuspended: 2277 return JDWP::TS_RUNNING; 2278 case kSleeping: 2279 return JDWP::TS_SLEEPING; 2280 case kStarting: 2281 case kTerminated: 2282 return JDWP::TS_ZOMBIE; 2283 case kTimedWaiting: 2284 case kWaitingForTaskProcessor: 2285 case kWaitingForLockInflation: 2286 case kWaitingForCheckPointsToRun: 2287 case kWaitingForDebuggerSend: 2288 case kWaitingForDebuggerSuspension: 2289 case kWaitingForDebuggerToAttach: 2290 case kWaitingForDeoptimization: 2291 case kWaitingForGcToComplete: 2292 case kWaitingForGetObjectsAllocated: 2293 case kWaitingForJniOnLoad: 2294 case kWaitingForMethodTracingStart: 2295 case kWaitingForSignalCatcherOutput: 2296 case kWaitingForVisitObjects: 2297 case kWaitingInMainDebuggerLoop: 2298 case kWaitingInMainSignalCatcherLoop: 2299 case kWaitingPerformingGc: 2300 case kWaitingWeakGcRootRead: 2301 case kWaitingForGcThreadFlip: 2302 case kWaiting: 2303 return JDWP::TS_WAIT; 2304 // Don't add a 'default' here so the compiler can spot incompatible enum changes. 2305 } 2306 LOG(FATAL) << "Unknown thread state: " << state; 2307 return JDWP::TS_ZOMBIE; 2308 } 2309 2310 JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus, 2311 JDWP::JdwpSuspendStatus* pSuspendStatus) { 2312 ScopedObjectAccess soa(Thread::Current()); 2313 2314 *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED; 2315 2316 JDWP::JdwpError error; 2317 Thread* thread = DecodeThread(soa, thread_id, &error); 2318 if (error != JDWP::ERR_NONE) { 2319 if (error == JDWP::ERR_THREAD_NOT_ALIVE) { 2320 *pThreadStatus = JDWP::TS_ZOMBIE; 2321 return JDWP::ERR_NONE; 2322 } 2323 return error; 2324 } 2325 2326 if (IsSuspendedForDebugger(soa, thread)) { 2327 *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED; 2328 } 2329 2330 *pThreadStatus = ToJdwpThreadStatus(thread->GetState()); 2331 return JDWP::ERR_NONE; 2332 } 2333 2334 JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) { 2335 ScopedObjectAccess soa(Thread::Current()); 2336 JDWP::JdwpError error; 2337 Thread* thread = DecodeThread(soa, thread_id, &error); 2338 if (error != JDWP::ERR_NONE) { 2339 return error; 2340 } 2341 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_); 2342 expandBufAdd4BE(pReply, thread->GetDebugSuspendCount()); 2343 return JDWP::ERR_NONE; 2344 } 2345 2346 JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) { 2347 ScopedObjectAccess soa(Thread::Current()); 2348 JDWP::JdwpError error; 2349 Thread* thread = DecodeThread(soa, thread_id, &error); 2350 if (error != JDWP::ERR_NONE) { 2351 return error; 2352 } 2353 thread->Interrupt(soa.Self()); 2354 return JDWP::ERR_NONE; 2355 } 2356 2357 static bool IsInDesiredThreadGroup(mirror::Object* desired_thread_group, mirror::Object* peer) 2358 REQUIRES_SHARED(Locks::mutator_lock_) { 2359 // Do we want threads from all thread groups? 2360 if (desired_thread_group == nullptr) { 2361 return true; 2362 } 2363 ArtField* thread_group_field = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group); 2364 DCHECK(thread_group_field != nullptr); 2365 ObjPtr<mirror::Object> group = thread_group_field->GetObject(peer); 2366 return (group == desired_thread_group); 2367 } 2368 2369 void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) { 2370 ScopedObjectAccessUnchecked soa(Thread::Current()); 2371 std::list<Thread*> all_threads_list; 2372 { 2373 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 2374 all_threads_list = Runtime::Current()->GetThreadList()->GetList(); 2375 } 2376 for (Thread* t : all_threads_list) { 2377 if (t == Dbg::GetDebugThread()) { 2378 // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and 2379 // query all threads, so it's easier if we just don't tell them about this thread. 2380 continue; 2381 } 2382 if (t->IsStillStarting()) { 2383 // This thread is being started (and has been registered in the thread list). However, it is 2384 // not completely started yet so we must ignore it. 2385 continue; 2386 } 2387 mirror::Object* peer = t->GetPeerFromOtherThread(); 2388 if (peer == nullptr) { 2389 // peer might be null if the thread is still starting up. We can't tell the debugger about 2390 // this thread yet. 2391 // TODO: if we identified threads to the debugger by their Thread* 2392 // rather than their peer's mirror::Object*, we could fix this. 2393 // Doing so might help us report ZOMBIE threads too. 2394 continue; 2395 } 2396 if (IsInDesiredThreadGroup(thread_group, peer)) { 2397 thread_ids->push_back(gRegistry->Add(peer)); 2398 } 2399 } 2400 } 2401 2402 static int GetStackDepth(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) { 2403 struct CountStackDepthVisitor : public StackVisitor { 2404 explicit CountStackDepthVisitor(Thread* thread_in) 2405 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2406 depth(0) {} 2407 2408 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2409 // annotalysis. 2410 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2411 if (!GetMethod()->IsRuntimeMethod()) { 2412 ++depth; 2413 } 2414 return true; 2415 } 2416 size_t depth; 2417 }; 2418 2419 CountStackDepthVisitor visitor(thread); 2420 visitor.WalkStack(); 2421 return visitor.depth; 2422 } 2423 2424 JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) { 2425 ScopedObjectAccess soa(Thread::Current()); 2426 JDWP::JdwpError error; 2427 *result = 0; 2428 Thread* thread = DecodeThread(soa, thread_id, &error); 2429 if (error != JDWP::ERR_NONE) { 2430 return error; 2431 } 2432 if (!IsSuspendedForDebugger(soa, thread)) { 2433 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2434 } 2435 *result = GetStackDepth(thread); 2436 return JDWP::ERR_NONE; 2437 } 2438 2439 JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, 2440 size_t frame_count, JDWP::ExpandBuf* buf) { 2441 class GetFrameVisitor : public StackVisitor { 2442 public: 2443 GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in, 2444 JDWP::ExpandBuf* buf_in) 2445 REQUIRES_SHARED(Locks::mutator_lock_) 2446 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2447 depth_(0), 2448 start_frame_(start_frame_in), 2449 frame_count_(frame_count_in), 2450 buf_(buf_in) { 2451 expandBufAdd4BE(buf_, frame_count_); 2452 } 2453 2454 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 2455 if (GetMethod()->IsRuntimeMethod()) { 2456 return true; // The debugger can't do anything useful with a frame that has no Method*. 2457 } 2458 if (depth_ >= start_frame_ + frame_count_) { 2459 return false; 2460 } 2461 if (depth_ >= start_frame_) { 2462 JDWP::FrameId frame_id(GetFrameId()); 2463 JDWP::JdwpLocation location; 2464 SetJdwpLocation(&location, GetMethod(), GetDexPc()); 2465 VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location; 2466 expandBufAdd8BE(buf_, frame_id); 2467 expandBufAddLocation(buf_, location); 2468 } 2469 ++depth_; 2470 return true; 2471 } 2472 2473 private: 2474 size_t depth_; 2475 const size_t start_frame_; 2476 const size_t frame_count_; 2477 JDWP::ExpandBuf* buf_; 2478 }; 2479 2480 ScopedObjectAccessUnchecked soa(Thread::Current()); 2481 JDWP::JdwpError error; 2482 Thread* thread = DecodeThread(soa, thread_id, &error); 2483 if (error != JDWP::ERR_NONE) { 2484 return error; 2485 } 2486 if (!IsSuspendedForDebugger(soa, thread)) { 2487 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2488 } 2489 GetFrameVisitor visitor(thread, start_frame, frame_count, buf); 2490 visitor.WalkStack(); 2491 return JDWP::ERR_NONE; 2492 } 2493 2494 JDWP::ObjectId Dbg::GetThreadSelfId() { 2495 return GetThreadId(Thread::Current()); 2496 } 2497 2498 JDWP::ObjectId Dbg::GetThreadId(Thread* thread) { 2499 ScopedObjectAccessUnchecked soa(Thread::Current()); 2500 return gRegistry->Add(thread->GetPeerFromOtherThread()); 2501 } 2502 2503 void Dbg::SuspendVM() { 2504 // Avoid a deadlock between GC and debugger where GC gets suspended during GC. b/25800335. 2505 gc::ScopedGCCriticalSection gcs(Thread::Current(), 2506 gc::kGcCauseDebugger, 2507 gc::kCollectorTypeDebugger); 2508 Runtime::Current()->GetThreadList()->SuspendAllForDebugger(); 2509 } 2510 2511 void Dbg::ResumeVM() { 2512 Runtime::Current()->GetThreadList()->ResumeAllForDebugger(); 2513 } 2514 2515 JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) { 2516 Thread* self = Thread::Current(); 2517 ScopedLocalRef<jobject> peer(self->GetJniEnv(), nullptr); 2518 { 2519 ScopedObjectAccess soa(self); 2520 JDWP::JdwpError error; 2521 peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id, &error))); 2522 } 2523 if (peer.get() == nullptr) { 2524 return JDWP::ERR_THREAD_NOT_ALIVE; 2525 } 2526 // Suspend thread to build stack trace. 2527 bool timed_out; 2528 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 2529 Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), 2530 request_suspension, 2531 SuspendReason::kForDebugger, 2532 &timed_out); 2533 if (thread != nullptr) { 2534 return JDWP::ERR_NONE; 2535 } else if (timed_out) { 2536 return JDWP::ERR_INTERNAL; 2537 } else { 2538 return JDWP::ERR_THREAD_NOT_ALIVE; 2539 } 2540 } 2541 2542 void Dbg::ResumeThread(JDWP::ObjectId thread_id) { 2543 ScopedObjectAccessUnchecked soa(Thread::Current()); 2544 JDWP::JdwpError error; 2545 mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id, &error); 2546 CHECK(peer != nullptr) << error; 2547 Thread* thread; 2548 { 2549 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2550 thread = Thread::FromManagedThread(soa, peer); 2551 } 2552 if (thread == nullptr) { 2553 LOG(WARNING) << "No such thread for resume: " << peer; 2554 return; 2555 } 2556 bool needs_resume; 2557 { 2558 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_); 2559 needs_resume = thread->GetDebugSuspendCount() > 0; 2560 } 2561 if (needs_resume) { 2562 bool resumed = Runtime::Current()->GetThreadList()->Resume(thread, SuspendReason::kForDebugger); 2563 DCHECK(resumed); 2564 } 2565 } 2566 2567 void Dbg::SuspendSelf() { 2568 Runtime::Current()->GetThreadList()->SuspendSelfForDebugger(); 2569 } 2570 2571 struct GetThisVisitor : public StackVisitor { 2572 GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in) 2573 REQUIRES_SHARED(Locks::mutator_lock_) 2574 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2575 this_object(nullptr), 2576 frame_id(frame_id_in) {} 2577 2578 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2579 // annotalysis. 2580 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2581 if (frame_id != GetFrameId()) { 2582 return true; // continue 2583 } else { 2584 this_object = GetThisObject(); 2585 return false; 2586 } 2587 } 2588 2589 mirror::Object* this_object; 2590 JDWP::FrameId frame_id; 2591 }; 2592 2593 JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, 2594 JDWP::ObjectId* result) { 2595 ScopedObjectAccessUnchecked soa(Thread::Current()); 2596 JDWP::JdwpError error; 2597 Thread* thread = DecodeThread(soa, thread_id, &error); 2598 if (error != JDWP::ERR_NONE) { 2599 return error; 2600 } 2601 if (!IsSuspendedForDebugger(soa, thread)) { 2602 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2603 } 2604 std::unique_ptr<Context> context(Context::Create()); 2605 GetThisVisitor visitor(thread, context.get(), frame_id); 2606 visitor.WalkStack(); 2607 *result = gRegistry->Add(visitor.this_object); 2608 return JDWP::ERR_NONE; 2609 } 2610 2611 // Walks the stack until we find the frame with the given FrameId. 2612 class FindFrameVisitor FINAL : public StackVisitor { 2613 public: 2614 FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id) 2615 REQUIRES_SHARED(Locks::mutator_lock_) 2616 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2617 frame_id_(frame_id), 2618 error_(JDWP::ERR_INVALID_FRAMEID) {} 2619 2620 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2621 // annotalysis. 2622 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2623 if (GetFrameId() != frame_id_) { 2624 return true; // Not our frame, carry on. 2625 } 2626 ArtMethod* m = GetMethod(); 2627 if (m->IsNative()) { 2628 // We can't read/write local value from/into native method. 2629 error_ = JDWP::ERR_OPAQUE_FRAME; 2630 } else { 2631 // We found our frame. 2632 error_ = JDWP::ERR_NONE; 2633 } 2634 return false; 2635 } 2636 2637 JDWP::JdwpError GetError() const { 2638 return error_; 2639 } 2640 2641 private: 2642 const JDWP::FrameId frame_id_; 2643 JDWP::JdwpError error_; 2644 2645 DISALLOW_COPY_AND_ASSIGN(FindFrameVisitor); 2646 }; 2647 2648 JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) { 2649 JDWP::ObjectId thread_id = request->ReadThreadId(); 2650 JDWP::FrameId frame_id = request->ReadFrameId(); 2651 2652 ScopedObjectAccessUnchecked soa(Thread::Current()); 2653 JDWP::JdwpError error; 2654 Thread* thread = DecodeThread(soa, thread_id, &error); 2655 if (error != JDWP::ERR_NONE) { 2656 return error; 2657 } 2658 if (!IsSuspendedForDebugger(soa, thread)) { 2659 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2660 } 2661 // Find the frame with the given frame_id. 2662 std::unique_ptr<Context> context(Context::Create()); 2663 FindFrameVisitor visitor(thread, context.get(), frame_id); 2664 visitor.WalkStack(); 2665 if (visitor.GetError() != JDWP::ERR_NONE) { 2666 return visitor.GetError(); 2667 } 2668 2669 // Read the values from visitor's context. 2670 int32_t slot_count = request->ReadSigned32("slot count"); 2671 expandBufAdd4BE(pReply, slot_count); /* "int values" */ 2672 for (int32_t i = 0; i < slot_count; ++i) { 2673 uint32_t slot = request->ReadUnsigned32("slot"); 2674 JDWP::JdwpTag reqSigByte = request->ReadTag(); 2675 2676 VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte; 2677 2678 size_t width = Dbg::GetTagWidth(reqSigByte); 2679 uint8_t* ptr = expandBufAddSpace(pReply, width + 1); 2680 error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width); 2681 if (error != JDWP::ERR_NONE) { 2682 return error; 2683 } 2684 } 2685 return JDWP::ERR_NONE; 2686 } 2687 2688 constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION; 2689 2690 static std::string GetStackContextAsString(const StackVisitor& visitor) 2691 REQUIRES_SHARED(Locks::mutator_lock_) { 2692 return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false), 2693 ArtMethod::PrettyMethod(visitor.GetMethod()).c_str()); 2694 } 2695 2696 static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg, 2697 JDWP::JdwpTag tag) 2698 REQUIRES_SHARED(Locks::mutator_lock_) { 2699 LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg 2700 << GetStackContextAsString(visitor); 2701 return kStackFrameLocalAccessError; 2702 } 2703 2704 JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa, 2705 int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) { 2706 ArtMethod* m = visitor.GetMethod(); 2707 JDWP::JdwpError error = JDWP::ERR_NONE; 2708 uint16_t vreg = DemangleSlot(slot, m, &error); 2709 if (error != JDWP::ERR_NONE) { 2710 return error; 2711 } 2712 // TODO: check that the tag is compatible with the actual type of the slot! 2713 switch (tag) { 2714 case JDWP::JT_BOOLEAN: { 2715 CHECK_EQ(width, 1U); 2716 uint32_t intVal; 2717 if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) { 2718 return FailGetLocalValue(visitor, vreg, tag); 2719 } 2720 VLOG(jdwp) << "get boolean local " << vreg << " = " << intVal; 2721 JDWP::Set1(buf + 1, intVal != 0); 2722 break; 2723 } 2724 case JDWP::JT_BYTE: { 2725 CHECK_EQ(width, 1U); 2726 uint32_t intVal; 2727 if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) { 2728 return FailGetLocalValue(visitor, vreg, tag); 2729 } 2730 VLOG(jdwp) << "get byte local " << vreg << " = " << intVal; 2731 JDWP::Set1(buf + 1, intVal); 2732 break; 2733 } 2734 case JDWP::JT_SHORT: 2735 case JDWP::JT_CHAR: { 2736 CHECK_EQ(width, 2U); 2737 uint32_t intVal; 2738 if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) { 2739 return FailGetLocalValue(visitor, vreg, tag); 2740 } 2741 VLOG(jdwp) << "get short/char local " << vreg << " = " << intVal; 2742 JDWP::Set2BE(buf + 1, intVal); 2743 break; 2744 } 2745 case JDWP::JT_INT: { 2746 CHECK_EQ(width, 4U); 2747 uint32_t intVal; 2748 if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) { 2749 return FailGetLocalValue(visitor, vreg, tag); 2750 } 2751 VLOG(jdwp) << "get int local " << vreg << " = " << intVal; 2752 JDWP::Set4BE(buf + 1, intVal); 2753 break; 2754 } 2755 case JDWP::JT_FLOAT: { 2756 CHECK_EQ(width, 4U); 2757 uint32_t intVal; 2758 if (!visitor.GetVReg(m, vreg, kFloatVReg, &intVal)) { 2759 return FailGetLocalValue(visitor, vreg, tag); 2760 } 2761 VLOG(jdwp) << "get float local " << vreg << " = " << intVal; 2762 JDWP::Set4BE(buf + 1, intVal); 2763 break; 2764 } 2765 case JDWP::JT_ARRAY: 2766 case JDWP::JT_CLASS_LOADER: 2767 case JDWP::JT_CLASS_OBJECT: 2768 case JDWP::JT_OBJECT: 2769 case JDWP::JT_STRING: 2770 case JDWP::JT_THREAD: 2771 case JDWP::JT_THREAD_GROUP: { 2772 CHECK_EQ(width, sizeof(JDWP::ObjectId)); 2773 uint32_t intVal; 2774 if (!visitor.GetVReg(m, vreg, kReferenceVReg, &intVal)) { 2775 return FailGetLocalValue(visitor, vreg, tag); 2776 } 2777 mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal); 2778 VLOG(jdwp) << "get " << tag << " object local " << vreg << " = " << o; 2779 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) { 2780 LOG(FATAL) << StringPrintf("Found invalid object %#" PRIxPTR " in register v%u", 2781 reinterpret_cast<uintptr_t>(o), vreg) 2782 << GetStackContextAsString(visitor); 2783 UNREACHABLE(); 2784 } 2785 tag = TagFromObject(soa, o); 2786 JDWP::SetObjectId(buf + 1, gRegistry->Add(o)); 2787 break; 2788 } 2789 case JDWP::JT_DOUBLE: { 2790 CHECK_EQ(width, 8U); 2791 uint64_t longVal; 2792 if (!visitor.GetVRegPair(m, vreg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) { 2793 return FailGetLocalValue(visitor, vreg, tag); 2794 } 2795 VLOG(jdwp) << "get double local " << vreg << " = " << longVal; 2796 JDWP::Set8BE(buf + 1, longVal); 2797 break; 2798 } 2799 case JDWP::JT_LONG: { 2800 CHECK_EQ(width, 8U); 2801 uint64_t longVal; 2802 if (!visitor.GetVRegPair(m, vreg, kLongLoVReg, kLongHiVReg, &longVal)) { 2803 return FailGetLocalValue(visitor, vreg, tag); 2804 } 2805 VLOG(jdwp) << "get long local " << vreg << " = " << longVal; 2806 JDWP::Set8BE(buf + 1, longVal); 2807 break; 2808 } 2809 default: 2810 LOG(FATAL) << "Unknown tag " << tag; 2811 UNREACHABLE(); 2812 } 2813 2814 // Prepend tag, which may have been updated. 2815 JDWP::Set1(buf, tag); 2816 return JDWP::ERR_NONE; 2817 } 2818 2819 JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) { 2820 JDWP::ObjectId thread_id = request->ReadThreadId(); 2821 JDWP::FrameId frame_id = request->ReadFrameId(); 2822 2823 ScopedObjectAccessUnchecked soa(Thread::Current()); 2824 JDWP::JdwpError error; 2825 Thread* thread = DecodeThread(soa, thread_id, &error); 2826 if (error != JDWP::ERR_NONE) { 2827 return error; 2828 } 2829 if (!IsSuspendedForDebugger(soa, thread)) { 2830 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2831 } 2832 // Find the frame with the given frame_id. 2833 std::unique_ptr<Context> context(Context::Create()); 2834 FindFrameVisitor visitor(thread, context.get(), frame_id); 2835 visitor.WalkStack(); 2836 if (visitor.GetError() != JDWP::ERR_NONE) { 2837 return visitor.GetError(); 2838 } 2839 2840 // Writes the values into visitor's context. 2841 int32_t slot_count = request->ReadSigned32("slot count"); 2842 for (int32_t i = 0; i < slot_count; ++i) { 2843 uint32_t slot = request->ReadUnsigned32("slot"); 2844 JDWP::JdwpTag sigByte = request->ReadTag(); 2845 size_t width = Dbg::GetTagWidth(sigByte); 2846 uint64_t value = request->ReadValue(width); 2847 2848 VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value; 2849 error = Dbg::SetLocalValue(thread, visitor, slot, sigByte, value, width); 2850 if (error != JDWP::ERR_NONE) { 2851 return error; 2852 } 2853 } 2854 return JDWP::ERR_NONE; 2855 } 2856 2857 template<typename T> 2858 static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg, 2859 JDWP::JdwpTag tag, T value) 2860 REQUIRES_SHARED(Locks::mutator_lock_) { 2861 LOG(ERROR) << "Failed to write " << tag << " local " << value 2862 << " (0x" << std::hex << value << ") into register v" << vreg 2863 << GetStackContextAsString(visitor); 2864 return kStackFrameLocalAccessError; 2865 } 2866 2867 JDWP::JdwpError Dbg::SetLocalValue(Thread* thread, StackVisitor& visitor, int slot, 2868 JDWP::JdwpTag tag, uint64_t value, size_t width) { 2869 ArtMethod* m = visitor.GetMethod(); 2870 JDWP::JdwpError error = JDWP::ERR_NONE; 2871 uint16_t vreg = DemangleSlot(slot, m, &error); 2872 if (error != JDWP::ERR_NONE) { 2873 return error; 2874 } 2875 // TODO: check that the tag is compatible with the actual type of the slot! 2876 switch (tag) { 2877 case JDWP::JT_BOOLEAN: 2878 case JDWP::JT_BYTE: 2879 CHECK_EQ(width, 1U); 2880 if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) { 2881 return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value)); 2882 } 2883 break; 2884 case JDWP::JT_SHORT: 2885 case JDWP::JT_CHAR: 2886 CHECK_EQ(width, 2U); 2887 if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) { 2888 return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value)); 2889 } 2890 break; 2891 case JDWP::JT_INT: 2892 CHECK_EQ(width, 4U); 2893 if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) { 2894 return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value)); 2895 } 2896 break; 2897 case JDWP::JT_FLOAT: 2898 CHECK_EQ(width, 4U); 2899 if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kFloatVReg)) { 2900 return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value)); 2901 } 2902 break; 2903 case JDWP::JT_ARRAY: 2904 case JDWP::JT_CLASS_LOADER: 2905 case JDWP::JT_CLASS_OBJECT: 2906 case JDWP::JT_OBJECT: 2907 case JDWP::JT_STRING: 2908 case JDWP::JT_THREAD: 2909 case JDWP::JT_THREAD_GROUP: { 2910 CHECK_EQ(width, sizeof(JDWP::ObjectId)); 2911 mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value), 2912 &error); 2913 if (error != JDWP::ERR_NONE) { 2914 VLOG(jdwp) << tag << " object " << o << " is an invalid object"; 2915 return JDWP::ERR_INVALID_OBJECT; 2916 } 2917 if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)), 2918 kReferenceVReg)) { 2919 return FailSetLocalValue(visitor, vreg, tag, reinterpret_cast<uintptr_t>(o)); 2920 } 2921 break; 2922 } 2923 case JDWP::JT_DOUBLE: { 2924 CHECK_EQ(width, 8U); 2925 if (!visitor.SetVRegPair(m, vreg, value, kDoubleLoVReg, kDoubleHiVReg)) { 2926 return FailSetLocalValue(visitor, vreg, tag, value); 2927 } 2928 break; 2929 } 2930 case JDWP::JT_LONG: { 2931 CHECK_EQ(width, 8U); 2932 if (!visitor.SetVRegPair(m, vreg, value, kLongLoVReg, kLongHiVReg)) { 2933 return FailSetLocalValue(visitor, vreg, tag, value); 2934 } 2935 break; 2936 } 2937 default: 2938 LOG(FATAL) << "Unknown tag " << tag; 2939 UNREACHABLE(); 2940 } 2941 2942 // If we set the local variable in a compiled frame, we need to trigger a deoptimization of 2943 // the stack so we continue execution with the interpreter using the new value(s) of the updated 2944 // local variable(s). To achieve this, we install instrumentation exit stub on each method of the 2945 // thread's stack. The stub will cause the deoptimization to happen. 2946 if (!visitor.IsShadowFrame() && thread->HasDebuggerShadowFrames()) { 2947 Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(thread); 2948 } 2949 2950 return JDWP::ERR_NONE; 2951 } 2952 2953 static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc) 2954 REQUIRES_SHARED(Locks::mutator_lock_) { 2955 DCHECK(location != nullptr); 2956 if (m == nullptr) { 2957 memset(location, 0, sizeof(*location)); 2958 } else { 2959 location->method = m->GetCanonicalMethod(kRuntimePointerSize); 2960 location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc; 2961 } 2962 } 2963 2964 void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, 2965 int event_flags, const JValue* return_value) { 2966 if (!IsDebuggerActive()) { 2967 return; 2968 } 2969 DCHECK(m != nullptr); 2970 DCHECK_EQ(m->IsStatic(), this_object == nullptr); 2971 JDWP::EventLocation location; 2972 SetEventLocation(&location, m, dex_pc); 2973 2974 // We need to be sure no exception is pending when calling JdwpState::PostLocationEvent. 2975 // This is required to be able to call JNI functions to create JDWP ids. To achieve this, 2976 // we temporarily clear the current thread's exception (if any) and will restore it after 2977 // the call. 2978 // Note: the only way to get a pending exception here is to suspend on a move-exception 2979 // instruction. 2980 Thread* const self = Thread::Current(); 2981 StackHandleScope<1> hs(self); 2982 Handle<mirror::Throwable> pending_exception(hs.NewHandle(self->GetException())); 2983 self->ClearException(); 2984 if (kIsDebugBuild && pending_exception != nullptr) { 2985 const Instruction& instr = location.method->DexInstructions().InstructionAt(location.dex_pc); 2986 CHECK_EQ(Instruction::MOVE_EXCEPTION, instr.Opcode()); 2987 } 2988 2989 gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value); 2990 2991 if (pending_exception != nullptr) { 2992 self->SetException(pending_exception.Get()); 2993 } 2994 } 2995 2996 void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc, 2997 mirror::Object* this_object, ArtField* f) { 2998 // TODO We should send events for native methods. 2999 if (!IsDebuggerActive() || m->IsNative()) { 3000 return; 3001 } 3002 DCHECK(m != nullptr); 3003 DCHECK(f != nullptr); 3004 JDWP::EventLocation location; 3005 SetEventLocation(&location, m, dex_pc); 3006 3007 gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false); 3008 } 3009 3010 void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc, 3011 mirror::Object* this_object, ArtField* f, 3012 const JValue* field_value) { 3013 // TODO We should send events for native methods. 3014 if (!IsDebuggerActive() || m->IsNative()) { 3015 return; 3016 } 3017 DCHECK(m != nullptr); 3018 DCHECK(f != nullptr); 3019 DCHECK(field_value != nullptr); 3020 JDWP::EventLocation location; 3021 SetEventLocation(&location, m, dex_pc); 3022 3023 gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true); 3024 } 3025 3026 /** 3027 * Finds the location where this exception will be caught. We search until we reach the top 3028 * frame, in which case this exception is considered uncaught. 3029 */ 3030 class CatchLocationFinder : public StackVisitor { 3031 public: 3032 CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context) 3033 REQUIRES_SHARED(Locks::mutator_lock_) 3034 : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 3035 exception_(exception), 3036 handle_scope_(self), 3037 this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)), 3038 catch_method_(nullptr), 3039 throw_method_(nullptr), 3040 catch_dex_pc_(dex::kDexNoIndex), 3041 throw_dex_pc_(dex::kDexNoIndex) { 3042 } 3043 3044 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3045 ArtMethod* method = GetMethod(); 3046 DCHECK(method != nullptr); 3047 if (method->IsRuntimeMethod()) { 3048 // Ignore callee save method. 3049 DCHECK(method->IsCalleeSaveMethod()); 3050 return true; 3051 } 3052 3053 uint32_t dex_pc = GetDexPc(); 3054 if (throw_method_ == nullptr) { 3055 // First Java method found. It is either the method that threw the exception, 3056 // or the Java native method that is reporting an exception thrown by 3057 // native code. 3058 this_at_throw_.Assign(GetThisObject()); 3059 throw_method_ = method; 3060 throw_dex_pc_ = dex_pc; 3061 } 3062 3063 if (dex_pc != dex::kDexNoIndex) { 3064 StackHandleScope<1> hs(GetThread()); 3065 uint32_t found_dex_pc; 3066 Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass())); 3067 bool unused_clear_exception; 3068 found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception); 3069 if (found_dex_pc != dex::kDexNoIndex) { 3070 catch_method_ = method; 3071 catch_dex_pc_ = found_dex_pc; 3072 return false; // End stack walk. 3073 } 3074 } 3075 return true; // Continue stack walk. 3076 } 3077 3078 ArtMethod* GetCatchMethod() REQUIRES_SHARED(Locks::mutator_lock_) { 3079 return catch_method_; 3080 } 3081 3082 ArtMethod* GetThrowMethod() REQUIRES_SHARED(Locks::mutator_lock_) { 3083 return throw_method_; 3084 } 3085 3086 mirror::Object* GetThisAtThrow() REQUIRES_SHARED(Locks::mutator_lock_) { 3087 return this_at_throw_.Get(); 3088 } 3089 3090 uint32_t GetCatchDexPc() const { 3091 return catch_dex_pc_; 3092 } 3093 3094 uint32_t GetThrowDexPc() const { 3095 return throw_dex_pc_; 3096 } 3097 3098 private: 3099 const Handle<mirror::Throwable>& exception_; 3100 StackHandleScope<1> handle_scope_; 3101 MutableHandle<mirror::Object> this_at_throw_; 3102 ArtMethod* catch_method_; 3103 ArtMethod* throw_method_; 3104 uint32_t catch_dex_pc_; 3105 uint32_t throw_dex_pc_; 3106 3107 DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder); 3108 }; 3109 3110 void Dbg::PostException(mirror::Throwable* exception_object) { 3111 if (!IsDebuggerActive()) { 3112 return; 3113 } 3114 Thread* const self = Thread::Current(); 3115 StackHandleScope<1> handle_scope(self); 3116 Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object)); 3117 std::unique_ptr<Context> context(Context::Create()); 3118 CatchLocationFinder clf(self, h_exception, context.get()); 3119 clf.WalkStack(/* include_transitions */ false); 3120 JDWP::EventLocation exception_throw_location; 3121 SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc()); 3122 JDWP::EventLocation exception_catch_location; 3123 SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc()); 3124 3125 gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location, 3126 clf.GetThisAtThrow()); 3127 } 3128 3129 void Dbg::PostClassPrepare(mirror::Class* c) { 3130 if (!IsDebuggerActive()) { 3131 return; 3132 } 3133 gJdwpState->PostClassPrepare(c); 3134 } 3135 3136 void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object, 3137 ArtMethod* m, uint32_t dex_pc, 3138 int event_flags, const JValue* return_value) { 3139 if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) { 3140 return; 3141 } 3142 3143 if (IsBreakpoint(m, dex_pc)) { 3144 event_flags |= kBreakpoint; 3145 } 3146 3147 // If the debugger is single-stepping one of our threads, check to 3148 // see if we're that thread and we've reached a step point. 3149 const SingleStepControl* single_step_control = thread->GetSingleStepControl(); 3150 if (single_step_control != nullptr) { 3151 CHECK(!m->IsNative()); 3152 if (single_step_control->GetStepDepth() == JDWP::SD_INTO) { 3153 // Step into method calls. We break when the line number 3154 // or method pointer changes. If we're in SS_MIN mode, we 3155 // always stop. 3156 if (single_step_control->GetMethod() != m) { 3157 event_flags |= kSingleStep; 3158 VLOG(jdwp) << "SS new method"; 3159 } else if (single_step_control->GetStepSize() == JDWP::SS_MIN) { 3160 event_flags |= kSingleStep; 3161 VLOG(jdwp) << "SS new instruction"; 3162 } else if (single_step_control->ContainsDexPc(dex_pc)) { 3163 event_flags |= kSingleStep; 3164 VLOG(jdwp) << "SS new line"; 3165 } 3166 } else if (single_step_control->GetStepDepth() == JDWP::SD_OVER) { 3167 // Step over method calls. We break when the line number is 3168 // different and the frame depth is <= the original frame 3169 // depth. (We can't just compare on the method, because we 3170 // might get unrolled past it by an exception, and it's tricky 3171 // to identify recursion.) 3172 3173 int stack_depth = GetStackDepth(thread); 3174 3175 if (stack_depth < single_step_control->GetStackDepth()) { 3176 // Popped up one or more frames, always trigger. 3177 event_flags |= kSingleStep; 3178 VLOG(jdwp) << "SS method pop"; 3179 } else if (stack_depth == single_step_control->GetStackDepth()) { 3180 // Same depth, see if we moved. 3181 if (single_step_control->GetStepSize() == JDWP::SS_MIN) { 3182 event_flags |= kSingleStep; 3183 VLOG(jdwp) << "SS new instruction"; 3184 } else if (single_step_control->ContainsDexPc(dex_pc)) { 3185 event_flags |= kSingleStep; 3186 VLOG(jdwp) << "SS new line"; 3187 } 3188 } 3189 } else { 3190 CHECK_EQ(single_step_control->GetStepDepth(), JDWP::SD_OUT); 3191 // Return from the current method. We break when the frame 3192 // depth pops up. 3193 3194 // This differs from the "method exit" break in that it stops 3195 // with the PC at the next instruction in the returned-to 3196 // function, rather than the end of the returning function. 3197 3198 int stack_depth = GetStackDepth(thread); 3199 if (stack_depth < single_step_control->GetStackDepth()) { 3200 event_flags |= kSingleStep; 3201 VLOG(jdwp) << "SS method pop"; 3202 } 3203 } 3204 } 3205 3206 // If there's something interesting going on, see if it matches one 3207 // of the debugger filters. 3208 if (event_flags != 0) { 3209 Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value); 3210 } 3211 } 3212 3213 size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) { 3214 switch (instrumentation_event) { 3215 case instrumentation::Instrumentation::kMethodEntered: 3216 return &method_enter_event_ref_count_; 3217 case instrumentation::Instrumentation::kMethodExited: 3218 return &method_exit_event_ref_count_; 3219 case instrumentation::Instrumentation::kDexPcMoved: 3220 return &dex_pc_change_event_ref_count_; 3221 case instrumentation::Instrumentation::kFieldRead: 3222 return &field_read_event_ref_count_; 3223 case instrumentation::Instrumentation::kFieldWritten: 3224 return &field_write_event_ref_count_; 3225 case instrumentation::Instrumentation::kExceptionThrown: 3226 return &exception_catch_event_ref_count_; 3227 default: 3228 return nullptr; 3229 } 3230 } 3231 3232 // Process request while all mutator threads are suspended. 3233 void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) { 3234 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 3235 switch (request.GetKind()) { 3236 case DeoptimizationRequest::kNothing: 3237 LOG(WARNING) << "Ignoring empty deoptimization request."; 3238 break; 3239 case DeoptimizationRequest::kRegisterForEvent: 3240 VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x", 3241 request.InstrumentationEvent()); 3242 instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent()); 3243 instrumentation_events_ |= request.InstrumentationEvent(); 3244 break; 3245 case DeoptimizationRequest::kUnregisterForEvent: 3246 VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x", 3247 request.InstrumentationEvent()); 3248 instrumentation->RemoveListener(&gDebugInstrumentationListener, 3249 request.InstrumentationEvent()); 3250 instrumentation_events_ &= ~request.InstrumentationEvent(); 3251 break; 3252 case DeoptimizationRequest::kFullDeoptimization: 3253 VLOG(jdwp) << "Deoptimize the world ..."; 3254 instrumentation->DeoptimizeEverything(kDbgInstrumentationKey); 3255 VLOG(jdwp) << "Deoptimize the world DONE"; 3256 break; 3257 case DeoptimizationRequest::kFullUndeoptimization: 3258 VLOG(jdwp) << "Undeoptimize the world ..."; 3259 instrumentation->UndeoptimizeEverything(kDbgInstrumentationKey); 3260 VLOG(jdwp) << "Undeoptimize the world DONE"; 3261 break; 3262 case DeoptimizationRequest::kSelectiveDeoptimization: 3263 VLOG(jdwp) << "Deoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " ..."; 3264 instrumentation->Deoptimize(request.Method()); 3265 VLOG(jdwp) << "Deoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " DONE"; 3266 break; 3267 case DeoptimizationRequest::kSelectiveUndeoptimization: 3268 VLOG(jdwp) << "Undeoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " ..."; 3269 instrumentation->Undeoptimize(request.Method()); 3270 VLOG(jdwp) << "Undeoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " DONE"; 3271 break; 3272 default: 3273 LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind(); 3274 break; 3275 } 3276 } 3277 3278 void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) { 3279 if (req.GetKind() == DeoptimizationRequest::kNothing) { 3280 // Nothing to do. 3281 return; 3282 } 3283 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); 3284 RequestDeoptimizationLocked(req); 3285 } 3286 3287 void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) { 3288 switch (req.GetKind()) { 3289 case DeoptimizationRequest::kRegisterForEvent: { 3290 DCHECK_NE(req.InstrumentationEvent(), 0u); 3291 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent()); 3292 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x", 3293 req.InstrumentationEvent()); 3294 if (*counter == 0) { 3295 VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x", 3296 deoptimization_requests_.size(), req.InstrumentationEvent()); 3297 deoptimization_requests_.push_back(req); 3298 } 3299 *counter = *counter + 1; 3300 break; 3301 } 3302 case DeoptimizationRequest::kUnregisterForEvent: { 3303 DCHECK_NE(req.InstrumentationEvent(), 0u); 3304 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent()); 3305 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x", 3306 req.InstrumentationEvent()); 3307 *counter = *counter - 1; 3308 if (*counter == 0) { 3309 VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x", 3310 deoptimization_requests_.size(), req.InstrumentationEvent()); 3311 deoptimization_requests_.push_back(req); 3312 } 3313 break; 3314 } 3315 case DeoptimizationRequest::kFullDeoptimization: { 3316 DCHECK(req.Method() == nullptr); 3317 if (full_deoptimization_event_count_ == 0) { 3318 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 3319 << " for full deoptimization"; 3320 deoptimization_requests_.push_back(req); 3321 } 3322 ++full_deoptimization_event_count_; 3323 break; 3324 } 3325 case DeoptimizationRequest::kFullUndeoptimization: { 3326 DCHECK(req.Method() == nullptr); 3327 DCHECK_GT(full_deoptimization_event_count_, 0U); 3328 --full_deoptimization_event_count_; 3329 if (full_deoptimization_event_count_ == 0) { 3330 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 3331 << " for full undeoptimization"; 3332 deoptimization_requests_.push_back(req); 3333 } 3334 break; 3335 } 3336 case DeoptimizationRequest::kSelectiveDeoptimization: { 3337 DCHECK(req.Method() != nullptr); 3338 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 3339 << " for deoptimization of " << req.Method()->PrettyMethod(); 3340 deoptimization_requests_.push_back(req); 3341 break; 3342 } 3343 case DeoptimizationRequest::kSelectiveUndeoptimization: { 3344 DCHECK(req.Method() != nullptr); 3345 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 3346 << " for undeoptimization of " << req.Method()->PrettyMethod(); 3347 deoptimization_requests_.push_back(req); 3348 break; 3349 } 3350 default: { 3351 LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind(); 3352 break; 3353 } 3354 } 3355 } 3356 3357 void Dbg::ManageDeoptimization() { 3358 Thread* const self = Thread::Current(); 3359 { 3360 // Avoid suspend/resume if there is no pending request. 3361 MutexLock mu(self, *Locks::deoptimization_lock_); 3362 if (deoptimization_requests_.empty()) { 3363 return; 3364 } 3365 } 3366 CHECK_EQ(self->GetState(), kRunnable); 3367 ScopedThreadSuspension sts(self, kWaitingForDeoptimization); 3368 // Required for ProcessDeoptimizationRequest. 3369 gc::ScopedGCCriticalSection gcs(self, 3370 gc::kGcCauseInstrumentation, 3371 gc::kCollectorTypeInstrumentation); 3372 // We need to suspend mutator threads first. 3373 ScopedSuspendAll ssa(__FUNCTION__); 3374 const ThreadState old_state = self->SetStateUnsafe(kRunnable); 3375 { 3376 MutexLock mu(self, *Locks::deoptimization_lock_); 3377 size_t req_index = 0; 3378 for (DeoptimizationRequest& request : deoptimization_requests_) { 3379 VLOG(jdwp) << "Process deoptimization request #" << req_index++; 3380 ProcessDeoptimizationRequest(request); 3381 } 3382 deoptimization_requests_.clear(); 3383 } 3384 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); 3385 } 3386 3387 static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m) 3388 REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) { 3389 for (Breakpoint& breakpoint : gBreakpoints) { 3390 if (breakpoint.IsInMethod(m)) { 3391 return &breakpoint; 3392 } 3393 } 3394 return nullptr; 3395 } 3396 3397 bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) { 3398 ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 3399 return FindFirstBreakpointForMethod(method) != nullptr; 3400 } 3401 3402 // Sanity checks all existing breakpoints on the same method. 3403 static void SanityCheckExistingBreakpoints(ArtMethod* m, 3404 DeoptimizationRequest::Kind deoptimization_kind) 3405 REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) { 3406 for (const Breakpoint& breakpoint : gBreakpoints) { 3407 if (breakpoint.IsInMethod(m)) { 3408 CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind()); 3409 } 3410 } 3411 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 3412 if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) { 3413 // We should have deoptimized everything but not "selectively" deoptimized this method. 3414 CHECK(instrumentation->AreAllMethodsDeoptimized()); 3415 CHECK(!instrumentation->IsDeoptimized(m)); 3416 } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) { 3417 // We should have "selectively" deoptimized this method. 3418 // Note: while we have not deoptimized everything for this method, we may have done it for 3419 // another event. 3420 CHECK(instrumentation->IsDeoptimized(m)); 3421 } else { 3422 // This method does not require deoptimization. 3423 CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing); 3424 CHECK(!instrumentation->IsDeoptimized(m)); 3425 } 3426 } 3427 3428 // Returns the deoptimization kind required to set a breakpoint in a method. 3429 // If a breakpoint has already been set, we also return the first breakpoint 3430 // through the given 'existing_brkpt' pointer. 3431 static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self, 3432 ArtMethod* m, 3433 const Breakpoint** existing_brkpt) 3434 REQUIRES_SHARED(Locks::mutator_lock_) { 3435 if (!Dbg::RequiresDeoptimization()) { 3436 // We already run in interpreter-only mode so we don't need to deoptimize anything. 3437 VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method " 3438 << ArtMethod::PrettyMethod(m); 3439 return DeoptimizationRequest::kNothing; 3440 } 3441 const Breakpoint* first_breakpoint; 3442 { 3443 ReaderMutexLock mu(self, *Locks::breakpoint_lock_); 3444 first_breakpoint = FindFirstBreakpointForMethod(m); 3445 *existing_brkpt = first_breakpoint; 3446 } 3447 3448 if (first_breakpoint == nullptr) { 3449 // There is no breakpoint on this method yet: we need to deoptimize. If this method is default, 3450 // we deoptimize everything; otherwise we deoptimize only this method. We 3451 // deoptimize with defaults because we do not know everywhere they are used. It is possible some 3452 // of the copies could be missed. 3453 // TODO Deoptimizing on default methods might not be necessary in all cases. 3454 bool need_full_deoptimization = m->IsDefault(); 3455 if (need_full_deoptimization) { 3456 VLOG(jdwp) << "Need full deoptimization because of copying of method " 3457 << ArtMethod::PrettyMethod(m); 3458 return DeoptimizationRequest::kFullDeoptimization; 3459 } else { 3460 // We don't need to deoptimize if the method has not been compiled. 3461 const bool is_compiled = m->HasAnyCompiledCode(); 3462 if (is_compiled) { 3463 VLOG(jdwp) << "Need selective deoptimization for compiled method " 3464 << ArtMethod::PrettyMethod(m); 3465 return DeoptimizationRequest::kSelectiveDeoptimization; 3466 } else { 3467 // Method is not compiled: we don't need to deoptimize. 3468 VLOG(jdwp) << "No need for deoptimization for non-compiled method " 3469 << ArtMethod::PrettyMethod(m); 3470 return DeoptimizationRequest::kNothing; 3471 } 3472 } 3473 } else { 3474 // There is at least one breakpoint for this method: we don't need to deoptimize. 3475 // Let's check that all breakpoints are configured the same way for deoptimization. 3476 VLOG(jdwp) << "Breakpoint already set: no deoptimization is required"; 3477 DeoptimizationRequest::Kind deoptimization_kind = first_breakpoint->GetDeoptimizationKind(); 3478 if (kIsDebugBuild) { 3479 ReaderMutexLock mu(self, *Locks::breakpoint_lock_); 3480 SanityCheckExistingBreakpoints(m, deoptimization_kind); 3481 } 3482 return DeoptimizationRequest::kNothing; 3483 } 3484 } 3485 3486 // Installs a breakpoint at the specified location. Also indicates through the deoptimization 3487 // request if we need to deoptimize. 3488 void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { 3489 Thread* const self = Thread::Current(); 3490 ArtMethod* m = FromMethodId(location->method_id); 3491 DCHECK(m != nullptr) << "No method for method id " << location->method_id; 3492 3493 const Breakpoint* existing_breakpoint = nullptr; 3494 const DeoptimizationRequest::Kind deoptimization_kind = 3495 GetRequiredDeoptimizationKind(self, m, &existing_breakpoint); 3496 req->SetKind(deoptimization_kind); 3497 if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) { 3498 req->SetMethod(m); 3499 } else { 3500 CHECK(deoptimization_kind == DeoptimizationRequest::kNothing || 3501 deoptimization_kind == DeoptimizationRequest::kFullDeoptimization); 3502 req->SetMethod(nullptr); 3503 } 3504 3505 { 3506 WriterMutexLock mu(self, *Locks::breakpoint_lock_); 3507 // If there is at least one existing breakpoint on the same method, the new breakpoint 3508 // must have the same deoptimization kind than the existing breakpoint(s). 3509 DeoptimizationRequest::Kind breakpoint_deoptimization_kind; 3510 if (existing_breakpoint != nullptr) { 3511 breakpoint_deoptimization_kind = existing_breakpoint->GetDeoptimizationKind(); 3512 } else { 3513 breakpoint_deoptimization_kind = deoptimization_kind; 3514 } 3515 gBreakpoints.push_back(Breakpoint(m, location->dex_pc, breakpoint_deoptimization_kind)); 3516 VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " 3517 << gBreakpoints[gBreakpoints.size() - 1]; 3518 } 3519 } 3520 3521 // Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization 3522 // request if we need to undeoptimize. 3523 void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { 3524 WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 3525 ArtMethod* m = FromMethodId(location->method_id); 3526 DCHECK(m != nullptr) << "No method for method id " << location->method_id; 3527 DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing; 3528 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { 3529 if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].IsInMethod(m)) { 3530 VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i]; 3531 deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind(); 3532 DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization, 3533 Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); 3534 gBreakpoints.erase(gBreakpoints.begin() + i); 3535 break; 3536 } 3537 } 3538 const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m); 3539 if (existing_breakpoint == nullptr) { 3540 // There is no more breakpoint on this method: we need to undeoptimize. 3541 if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) { 3542 // This method required full deoptimization: we need to undeoptimize everything. 3543 req->SetKind(DeoptimizationRequest::kFullUndeoptimization); 3544 req->SetMethod(nullptr); 3545 } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) { 3546 // This method required selective deoptimization: we need to undeoptimize only that method. 3547 req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization); 3548 req->SetMethod(m); 3549 } else { 3550 // This method had no need for deoptimization: do nothing. 3551 CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing); 3552 req->SetKind(DeoptimizationRequest::kNothing); 3553 req->SetMethod(nullptr); 3554 } 3555 } else { 3556 // There is at least one breakpoint for this method: we don't need to undeoptimize. 3557 req->SetKind(DeoptimizationRequest::kNothing); 3558 req->SetMethod(nullptr); 3559 if (kIsDebugBuild) { 3560 SanityCheckExistingBreakpoints(m, deoptimization_kind); 3561 } 3562 } 3563 } 3564 3565 bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) { 3566 const SingleStepControl* const ssc = thread->GetSingleStepControl(); 3567 if (ssc == nullptr) { 3568 // If we are not single-stepping, then we don't have to force interpreter. 3569 return false; 3570 } 3571 if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) { 3572 // If we are in interpreter only mode, then we don't have to force interpreter. 3573 return false; 3574 } 3575 3576 if (!m->IsNative() && !m->IsProxyMethod()) { 3577 // If we want to step into a method, then we have to force interpreter on that call. 3578 if (ssc->GetStepDepth() == JDWP::SD_INTO) { 3579 return true; 3580 } 3581 } 3582 return false; 3583 } 3584 3585 bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) { 3586 instrumentation::Instrumentation* const instrumentation = 3587 Runtime::Current()->GetInstrumentation(); 3588 // If we are in interpreter only mode, then we don't have to force interpreter. 3589 if (instrumentation->InterpretOnly()) { 3590 return false; 3591 } 3592 // We can only interpret pure Java method. 3593 if (m->IsNative() || m->IsProxyMethod()) { 3594 return false; 3595 } 3596 const SingleStepControl* const ssc = thread->GetSingleStepControl(); 3597 if (ssc != nullptr) { 3598 // If we want to step into a method, then we have to force interpreter on that call. 3599 if (ssc->GetStepDepth() == JDWP::SD_INTO) { 3600 return true; 3601 } 3602 // If we are stepping out from a static initializer, by issuing a step 3603 // in or step over, that was implicitly invoked by calling a static method, 3604 // then we need to step into that method. Having a lower stack depth than 3605 // the one the single step control has indicates that the step originates 3606 // from the static initializer. 3607 if (ssc->GetStepDepth() != JDWP::SD_OUT && 3608 ssc->GetStackDepth() > GetStackDepth(thread)) { 3609 return true; 3610 } 3611 } 3612 // There are cases where we have to force interpreter on deoptimized methods, 3613 // because in some cases the call will not be performed by invoking an entry 3614 // point that has been replaced by the deoptimization, but instead by directly 3615 // invoking the compiled code of the method, for example. 3616 return instrumentation->IsDeoptimized(m); 3617 } 3618 3619 bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) { 3620 // The upcall can be null and in that case we don't need to do anything. 3621 if (m == nullptr) { 3622 return false; 3623 } 3624 instrumentation::Instrumentation* const instrumentation = 3625 Runtime::Current()->GetInstrumentation(); 3626 // If we are in interpreter only mode, then we don't have to force interpreter. 3627 if (instrumentation->InterpretOnly()) { 3628 return false; 3629 } 3630 // We can only interpret pure Java method. 3631 if (m->IsNative() || m->IsProxyMethod()) { 3632 return false; 3633 } 3634 const SingleStepControl* const ssc = thread->GetSingleStepControl(); 3635 if (ssc != nullptr) { 3636 // If we are stepping out from a static initializer, by issuing a step 3637 // out, that was implicitly invoked by calling a static method, then we 3638 // need to step into the caller of that method. Having a lower stack 3639 // depth than the one the single step control has indicates that the 3640 // step originates from the static initializer. 3641 if (ssc->GetStepDepth() == JDWP::SD_OUT && 3642 ssc->GetStackDepth() > GetStackDepth(thread)) { 3643 return true; 3644 } 3645 } 3646 // If we are returning from a static intializer, that was implicitly 3647 // invoked by calling a static method and the caller is deoptimized, 3648 // then we have to deoptimize the stack without forcing interpreter 3649 // on the static method that was called originally. This problem can 3650 // be solved easily by forcing instrumentation on the called method, 3651 // because the instrumentation exit hook will recognise the need of 3652 // stack deoptimization by calling IsForcedInterpreterNeededForUpcall. 3653 return instrumentation->IsDeoptimized(m); 3654 } 3655 3656 bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) { 3657 // The upcall can be null and in that case we don't need to do anything. 3658 if (m == nullptr) { 3659 return false; 3660 } 3661 instrumentation::Instrumentation* const instrumentation = 3662 Runtime::Current()->GetInstrumentation(); 3663 // If we are in interpreter only mode, then we don't have to force interpreter. 3664 if (instrumentation->InterpretOnly()) { 3665 return false; 3666 } 3667 // We can only interpret pure Java method. 3668 if (m->IsNative() || m->IsProxyMethod()) { 3669 return false; 3670 } 3671 const SingleStepControl* const ssc = thread->GetSingleStepControl(); 3672 if (ssc != nullptr) { 3673 // The debugger is not interested in what is happening under the level 3674 // of the step, thus we only force interpreter when we are not below of 3675 // the step. 3676 if (ssc->GetStackDepth() >= GetStackDepth(thread)) { 3677 return true; 3678 } 3679 } 3680 if (thread->HasDebuggerShadowFrames()) { 3681 // We need to deoptimize the stack for the exception handling flow so that 3682 // we don't miss any deoptimization that should be done when there are 3683 // debugger shadow frames. 3684 return true; 3685 } 3686 // We have to require stack deoptimization if the upcall is deoptimized. 3687 return instrumentation->IsDeoptimized(m); 3688 } 3689 3690 class NeedsDeoptimizationVisitor : public StackVisitor { 3691 public: 3692 explicit NeedsDeoptimizationVisitor(Thread* self) 3693 REQUIRES_SHARED(Locks::mutator_lock_) 3694 : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 3695 needs_deoptimization_(false) {} 3696 3697 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3698 // The visitor is meant to be used when handling exception from compiled code only. 3699 CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: " 3700 << ArtMethod::PrettyMethod(GetMethod()); 3701 ArtMethod* method = GetMethod(); 3702 if (method == nullptr) { 3703 // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment) 3704 // so we can stop the visit. 3705 DCHECK(!needs_deoptimization_); 3706 return false; 3707 } 3708 if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) { 3709 // We found a compiled frame in the stack but instrumentation is set to interpret 3710 // everything: we need to deoptimize. 3711 needs_deoptimization_ = true; 3712 return false; 3713 } 3714 if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) { 3715 // We found a deoptimized method in the stack. 3716 needs_deoptimization_ = true; 3717 return false; 3718 } 3719 ShadowFrame* frame = GetThread()->FindDebuggerShadowFrame(GetFrameId()); 3720 if (frame != nullptr) { 3721 // The debugger allocated a ShadowFrame to update a variable in the stack: we need to 3722 // deoptimize the stack to execute (and deallocate) this frame. 3723 needs_deoptimization_ = true; 3724 return false; 3725 } 3726 return true; 3727 } 3728 3729 bool NeedsDeoptimization() const { 3730 return needs_deoptimization_; 3731 } 3732 3733 private: 3734 // Do we need to deoptimize the stack? 3735 bool needs_deoptimization_; 3736 3737 DISALLOW_COPY_AND_ASSIGN(NeedsDeoptimizationVisitor); 3738 }; 3739 3740 // Do we need to deoptimize the stack to handle an exception? 3741 bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) { 3742 const SingleStepControl* const ssc = thread->GetSingleStepControl(); 3743 if (ssc != nullptr) { 3744 // We deopt to step into the catch handler. 3745 return true; 3746 } 3747 // Deoptimization is required if at least one method in the stack needs it. However we 3748 // skip frames that will be unwound (thus not executed). 3749 NeedsDeoptimizationVisitor visitor(thread); 3750 visitor.WalkStack(true); // includes upcall. 3751 return visitor.NeedsDeoptimization(); 3752 } 3753 3754 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't 3755 // cause suspension if the thread is the current thread. 3756 class ScopedDebuggerThreadSuspension { 3757 public: 3758 ScopedDebuggerThreadSuspension(Thread* self, JDWP::ObjectId thread_id) 3759 REQUIRES(!Locks::thread_list_lock_) 3760 REQUIRES_SHARED(Locks::mutator_lock_) : 3761 thread_(nullptr), 3762 error_(JDWP::ERR_NONE), 3763 self_suspend_(false), 3764 other_suspend_(false) { 3765 ScopedObjectAccessUnchecked soa(self); 3766 thread_ = DecodeThread(soa, thread_id, &error_); 3767 if (error_ == JDWP::ERR_NONE) { 3768 if (thread_ == soa.Self()) { 3769 self_suspend_ = true; 3770 } else { 3771 Thread* suspended_thread; 3772 { 3773 ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension); 3774 jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id); 3775 bool timed_out; 3776 ThreadList* const thread_list = Runtime::Current()->GetThreadList(); 3777 suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, 3778 /* request_suspension */ true, 3779 SuspendReason::kForDebugger, 3780 &timed_out); 3781 } 3782 if (suspended_thread == nullptr) { 3783 // Thread terminated from under us while suspending. 3784 error_ = JDWP::ERR_INVALID_THREAD; 3785 } else { 3786 CHECK_EQ(suspended_thread, thread_); 3787 other_suspend_ = true; 3788 } 3789 } 3790 } 3791 } 3792 3793 Thread* GetThread() const { 3794 return thread_; 3795 } 3796 3797 JDWP::JdwpError GetError() const { 3798 return error_; 3799 } 3800 3801 ~ScopedDebuggerThreadSuspension() { 3802 if (other_suspend_) { 3803 bool resumed = Runtime::Current()->GetThreadList()->Resume(thread_, 3804 SuspendReason::kForDebugger); 3805 DCHECK(resumed); 3806 } 3807 } 3808 3809 private: 3810 Thread* thread_; 3811 JDWP::JdwpError error_; 3812 bool self_suspend_; 3813 bool other_suspend_; 3814 }; 3815 3816 JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size, 3817 JDWP::JdwpStepDepth step_depth) { 3818 Thread* self = Thread::Current(); 3819 ScopedDebuggerThreadSuspension sts(self, thread_id); 3820 if (sts.GetError() != JDWP::ERR_NONE) { 3821 return sts.GetError(); 3822 } 3823 3824 // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently 3825 // is for step-out. 3826 struct SingleStepStackVisitor : public StackVisitor { 3827 explicit SingleStepStackVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) 3828 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 3829 stack_depth(0), 3830 method(nullptr), 3831 line_number(-1) {} 3832 3833 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 3834 // annotalysis. 3835 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 3836 ArtMethod* m = GetMethod(); 3837 if (!m->IsRuntimeMethod()) { 3838 ++stack_depth; 3839 if (method == nullptr) { 3840 const DexFile* dex_file = m->GetDexFile(); 3841 method = m; 3842 if (dex_file != nullptr) { 3843 line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc()); 3844 } 3845 } 3846 } 3847 return true; 3848 } 3849 3850 int stack_depth; 3851 ArtMethod* method; 3852 int32_t line_number; 3853 }; 3854 3855 Thread* const thread = sts.GetThread(); 3856 SingleStepStackVisitor visitor(thread); 3857 visitor.WalkStack(); 3858 3859 // Find the dex_pc values that correspond to the current line, for line-based single-stepping. 3860 struct DebugCallbackContext { 3861 DebugCallbackContext(SingleStepControl* single_step_control_cb, 3862 int32_t line_number_cb, uint32_t num_insns_in_code_units) 3863 : single_step_control_(single_step_control_cb), line_number_(line_number_cb), 3864 num_insns_in_code_units_(num_insns_in_code_units), last_pc_valid(false), last_pc(0) { 3865 } 3866 3867 static bool Callback(void* raw_context, const DexFile::PositionInfo& entry) { 3868 DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context); 3869 if (static_cast<int32_t>(entry.line_) == context->line_number_) { 3870 if (!context->last_pc_valid) { 3871 // Everything from this address until the next line change is ours. 3872 context->last_pc = entry.address_; 3873 context->last_pc_valid = true; 3874 } 3875 // Otherwise, if we're already in a valid range for this line, 3876 // just keep going (shouldn't really happen)... 3877 } else if (context->last_pc_valid) { // and the line number is new 3878 // Add everything from the last entry up until here to the set 3879 for (uint32_t dex_pc = context->last_pc; dex_pc < entry.address_; ++dex_pc) { 3880 context->single_step_control_->AddDexPc(dex_pc); 3881 } 3882 context->last_pc_valid = false; 3883 } 3884 return false; // There may be multiple entries for any given line. 3885 } 3886 3887 ~DebugCallbackContext() { 3888 // If the line number was the last in the position table... 3889 if (last_pc_valid) { 3890 for (uint32_t dex_pc = last_pc; dex_pc < num_insns_in_code_units_; ++dex_pc) { 3891 single_step_control_->AddDexPc(dex_pc); 3892 } 3893 } 3894 } 3895 3896 SingleStepControl* const single_step_control_; 3897 const int32_t line_number_; 3898 const uint32_t num_insns_in_code_units_; 3899 bool last_pc_valid; 3900 uint32_t last_pc; 3901 }; 3902 3903 // Allocate single step. 3904 SingleStepControl* single_step_control = 3905 new (std::nothrow) SingleStepControl(step_size, step_depth, 3906 visitor.stack_depth, visitor.method); 3907 if (single_step_control == nullptr) { 3908 LOG(ERROR) << "Failed to allocate SingleStepControl"; 3909 return JDWP::ERR_OUT_OF_MEMORY; 3910 } 3911 3912 ArtMethod* m = single_step_control->GetMethod(); 3913 const int32_t line_number = visitor.line_number; 3914 // Note: if the thread is not running Java code (pure native thread), there is no "current" 3915 // method on the stack (and no line number either). 3916 if (m != nullptr && !m->IsNative()) { 3917 CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo()); 3918 DebugCallbackContext context(single_step_control, line_number, accessor.InsnsSizeInCodeUnits()); 3919 m->GetDexFile()->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), 3920 DebugCallbackContext::Callback, 3921 &context); 3922 } 3923 3924 // Activate single-step in the thread. 3925 thread->ActivateSingleStepControl(single_step_control); 3926 3927 if (VLOG_IS_ON(jdwp)) { 3928 VLOG(jdwp) << "Single-step thread: " << *thread; 3929 VLOG(jdwp) << "Single-step step size: " << single_step_control->GetStepSize(); 3930 VLOG(jdwp) << "Single-step step depth: " << single_step_control->GetStepDepth(); 3931 VLOG(jdwp) << "Single-step current method: " 3932 << ArtMethod::PrettyMethod(single_step_control->GetMethod()); 3933 VLOG(jdwp) << "Single-step current line: " << line_number; 3934 VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->GetStackDepth(); 3935 VLOG(jdwp) << "Single-step dex_pc values:"; 3936 for (uint32_t dex_pc : single_step_control->GetDexPcs()) { 3937 VLOG(jdwp) << StringPrintf(" %#x", dex_pc); 3938 } 3939 } 3940 3941 return JDWP::ERR_NONE; 3942 } 3943 3944 void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) { 3945 ScopedObjectAccessUnchecked soa(Thread::Current()); 3946 JDWP::JdwpError error; 3947 Thread* thread = DecodeThread(soa, thread_id, &error); 3948 if (error == JDWP::ERR_NONE) { 3949 thread->DeactivateSingleStepControl(); 3950 } 3951 } 3952 3953 static char JdwpTagToShortyChar(JDWP::JdwpTag tag) { 3954 switch (tag) { 3955 default: 3956 LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag); 3957 UNREACHABLE(); 3958 3959 // Primitives. 3960 case JDWP::JT_BYTE: return 'B'; 3961 case JDWP::JT_CHAR: return 'C'; 3962 case JDWP::JT_FLOAT: return 'F'; 3963 case JDWP::JT_DOUBLE: return 'D'; 3964 case JDWP::JT_INT: return 'I'; 3965 case JDWP::JT_LONG: return 'J'; 3966 case JDWP::JT_SHORT: return 'S'; 3967 case JDWP::JT_VOID: return 'V'; 3968 case JDWP::JT_BOOLEAN: return 'Z'; 3969 3970 // Reference types. 3971 case JDWP::JT_ARRAY: 3972 case JDWP::JT_OBJECT: 3973 case JDWP::JT_STRING: 3974 case JDWP::JT_THREAD: 3975 case JDWP::JT_THREAD_GROUP: 3976 case JDWP::JT_CLASS_LOADER: 3977 case JDWP::JT_CLASS_OBJECT: 3978 return 'L'; 3979 } 3980 } 3981 3982 JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id, 3983 JDWP::ObjectId object_id, JDWP::RefTypeId class_id, 3984 JDWP::MethodId method_id, uint32_t arg_count, 3985 uint64_t arg_values[], JDWP::JdwpTag* arg_types, 3986 uint32_t options) { 3987 Thread* const self = Thread::Current(); 3988 CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread"; 3989 const bool resume_all_threads = ((options & JDWP::INVOKE_SINGLE_THREADED) == 0); 3990 3991 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 3992 Thread* targetThread = nullptr; 3993 { 3994 ScopedObjectAccessUnchecked soa(self); 3995 JDWP::JdwpError error; 3996 targetThread = DecodeThread(soa, thread_id, &error); 3997 if (error != JDWP::ERR_NONE) { 3998 LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id; 3999 return error; 4000 } 4001 if (targetThread->GetInvokeReq() != nullptr) { 4002 // Thread is already invoking a method on behalf of the debugger. 4003 LOG(ERROR) << "InvokeMethod request for thread already invoking a method: " << *targetThread; 4004 return JDWP::ERR_ALREADY_INVOKING; 4005 } 4006 if (!targetThread->IsReadyForDebugInvoke()) { 4007 // Thread is not suspended by an event so it cannot invoke a method. 4008 LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread; 4009 return JDWP::ERR_INVALID_THREAD; 4010 } 4011 4012 /* 4013 * According to the JDWP specs, we are expected to resume all threads (or only the 4014 * target thread) once. So if a thread has been suspended more than once (either by 4015 * the debugger for an event or by the runtime for GC), it will remain suspended before 4016 * the invoke is executed. This means the debugger is responsible to properly resume all 4017 * the threads it has suspended so the target thread can execute the method. 4018 * 4019 * However, for compatibility reason with older versions of debuggers (like Eclipse), we 4020 * fully resume all threads (by canceling *all* debugger suspensions) when the debugger 4021 * wants us to resume all threads. This is to avoid ending up in deadlock situation. 4022 * 4023 * On the other hand, if we are asked to only resume the target thread, then we follow the 4024 * JDWP specs by resuming that thread only once. This means the thread will remain suspended 4025 * if it has been suspended more than once before the invoke (and again, this is the 4026 * responsibility of the debugger to properly resume that thread before invoking a method). 4027 */ 4028 int suspend_count; 4029 { 4030 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_); 4031 suspend_count = targetThread->GetSuspendCount(); 4032 } 4033 if (suspend_count > 1 && resume_all_threads) { 4034 // The target thread will remain suspended even after we resume it. Let's emit a warning 4035 // to indicate the invoke won't be executed until the thread is resumed. 4036 LOG(WARNING) << *targetThread << " suspended more than once (suspend count == " 4037 << suspend_count << "). This thread will invoke the method only once " 4038 << "it is fully resumed."; 4039 } 4040 4041 mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id, &error); 4042 if (error != JDWP::ERR_NONE) { 4043 return JDWP::ERR_INVALID_OBJECT; 4044 } 4045 4046 gRegistry->Get<mirror::Object*>(thread_id, &error); 4047 if (error != JDWP::ERR_NONE) { 4048 return JDWP::ERR_INVALID_OBJECT; 4049 } 4050 4051 mirror::Class* c = DecodeClass(class_id, &error); 4052 if (c == nullptr) { 4053 return error; 4054 } 4055 4056 ArtMethod* m = FromMethodId(method_id); 4057 if (m->IsStatic() != (receiver == nullptr)) { 4058 return JDWP::ERR_INVALID_METHODID; 4059 } 4060 if (m->IsStatic()) { 4061 if (m->GetDeclaringClass() != c) { 4062 return JDWP::ERR_INVALID_METHODID; 4063 } 4064 } else { 4065 if (!m->GetDeclaringClass()->IsAssignableFrom(c)) { 4066 return JDWP::ERR_INVALID_METHODID; 4067 } 4068 } 4069 4070 // Check the argument list matches the method. 4071 uint32_t shorty_len = 0; 4072 const char* shorty = m->GetShorty(&shorty_len); 4073 if (shorty_len - 1 != arg_count) { 4074 return JDWP::ERR_ILLEGAL_ARGUMENT; 4075 } 4076 4077 { 4078 StackHandleScope<2> hs(soa.Self()); 4079 HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver)); 4080 HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c)); 4081 const DexFile::TypeList* types = m->GetParameterTypeList(); 4082 for (size_t i = 0; i < arg_count; ++i) { 4083 if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) { 4084 return JDWP::ERR_ILLEGAL_ARGUMENT; 4085 } 4086 4087 if (shorty[i + 1] == 'L') { 4088 // Did we really get an argument of an appropriate reference type? 4089 ObjPtr<mirror::Class> parameter_type = 4090 m->ResolveClassFromTypeIndex(types->GetTypeItem(i).type_idx_); 4091 mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error); 4092 if (error != JDWP::ERR_NONE) { 4093 return JDWP::ERR_INVALID_OBJECT; 4094 } 4095 if (argument != nullptr && !argument->InstanceOf(parameter_type)) { 4096 return JDWP::ERR_ILLEGAL_ARGUMENT; 4097 } 4098 4099 // Turn the on-the-wire ObjectId into a jobject. 4100 jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]); 4101 v.l = gRegistry->GetJObject(arg_values[i]); 4102 } 4103 } 4104 } 4105 4106 // Allocates a DebugInvokeReq. 4107 DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(request_id, thread_id, receiver, c, m, 4108 options, arg_values, arg_count); 4109 if (req == nullptr) { 4110 LOG(ERROR) << "Failed to allocate DebugInvokeReq"; 4111 return JDWP::ERR_OUT_OF_MEMORY; 4112 } 4113 4114 // Attaches the DebugInvokeReq to the target thread so it executes the method when 4115 // it is resumed. Once the invocation completes, the target thread will delete it before 4116 // suspending itself (see ThreadList::SuspendSelfForDebugger). 4117 targetThread->SetDebugInvokeReq(req); 4118 } 4119 4120 // The fact that we've released the thread list lock is a bit risky --- if the thread goes 4121 // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions 4122 // call. 4123 if (resume_all_threads) { 4124 VLOG(jdwp) << " Resuming all threads"; 4125 thread_list->UndoDebuggerSuspensions(); 4126 } else { 4127 VLOG(jdwp) << " Resuming event thread only"; 4128 bool resumed = thread_list->Resume(targetThread, SuspendReason::kForDebugger); 4129 DCHECK(resumed); 4130 } 4131 4132 return JDWP::ERR_NONE; 4133 } 4134 4135 void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { 4136 Thread* const self = Thread::Current(); 4137 CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread"; 4138 4139 ScopedObjectAccess soa(self); 4140 4141 // We can be called while an exception is pending. We need 4142 // to preserve that across the method invocation. 4143 StackHandleScope<1> hs(soa.Self()); 4144 Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException()); 4145 soa.Self()->ClearException(); 4146 4147 // Execute the method then sends reply to the debugger. 4148 ExecuteMethodWithoutPendingException(soa, pReq); 4149 4150 // If an exception was pending before the invoke, restore it now. 4151 if (old_exception != nullptr) { 4152 soa.Self()->SetException(old_exception.Get()); 4153 } 4154 } 4155 4156 // Helper function: write a variable-width value into the output input buffer. 4157 static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) { 4158 switch (width) { 4159 case 1: 4160 expandBufAdd1(pReply, value); 4161 break; 4162 case 2: 4163 expandBufAdd2BE(pReply, value); 4164 break; 4165 case 4: 4166 expandBufAdd4BE(pReply, value); 4167 break; 4168 case 8: 4169 expandBufAdd8BE(pReply, value); 4170 break; 4171 default: 4172 LOG(FATAL) << width; 4173 UNREACHABLE(); 4174 } 4175 } 4176 4177 void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) { 4178 soa.Self()->AssertNoPendingException(); 4179 4180 // Translate the method through the vtable, unless the debugger wants to suppress it. 4181 ArtMethod* m = pReq->method; 4182 PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); 4183 if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) { 4184 ArtMethod* actual_method = 4185 pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size); 4186 if (actual_method != m) { 4187 VLOG(jdwp) << "ExecuteMethod translated " << ArtMethod::PrettyMethod(m) 4188 << " to " << ArtMethod::PrettyMethod(actual_method); 4189 m = actual_method; 4190 } 4191 } 4192 VLOG(jdwp) << "ExecuteMethod " << ArtMethod::PrettyMethod(m) 4193 << " receiver=" << pReq->receiver.Read() 4194 << " arg_count=" << pReq->arg_count; 4195 CHECK(m != nullptr); 4196 4197 static_assert(sizeof(jvalue) == sizeof(uint64_t), "jvalue and uint64_t have different sizes."); 4198 4199 // Invoke the method. 4200 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read())); 4201 JValue result = InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(m), 4202 reinterpret_cast<jvalue*>(pReq->arg_values.get())); 4203 4204 // Prepare JDWP ids for the reply. 4205 JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty()); 4206 const bool is_object_result = (result_tag == JDWP::JT_OBJECT); 4207 StackHandleScope<3> hs(soa.Self()); 4208 Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr); 4209 Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException()); 4210 soa.Self()->ClearException(); 4211 4212 if (!IsDebuggerActive()) { 4213 // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply 4214 // because it won't be sent either. 4215 return; 4216 } 4217 4218 JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception); 4219 uint64_t result_value = 0; 4220 if (exceptionObjectId != 0) { 4221 VLOG(jdwp) << " JDWP invocation returning with exception=" << exception.Get() 4222 << " " << exception->Dump(); 4223 result_value = 0; 4224 } else if (is_object_result) { 4225 /* if no exception was thrown, examine object result more closely */ 4226 JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get()); 4227 if (new_tag != result_tag) { 4228 VLOG(jdwp) << " JDWP promoted result from " << result_tag << " to " << new_tag; 4229 result_tag = new_tag; 4230 } 4231 4232 // Register the object in the registry and reference its ObjectId. This ensures 4233 // GC safety and prevents from accessing stale reference if the object is moved. 4234 result_value = gRegistry->Add(object_result.Get()); 4235 } else { 4236 // Primitive result. 4237 DCHECK(IsPrimitiveTag(result_tag)); 4238 result_value = result.GetJ(); 4239 } 4240 const bool is_constructor = m->IsConstructor() && !m->IsStatic(); 4241 if (is_constructor) { 4242 // If we invoked a constructor (which actually returns void), return the receiver, 4243 // unless we threw, in which case we return null. 4244 DCHECK_EQ(JDWP::JT_VOID, result_tag); 4245 if (exceptionObjectId == 0) { 4246 if (m->GetDeclaringClass()->IsStringClass()) { 4247 // For string constructors, the new string is remapped to the receiver (stored in ref). 4248 Handle<mirror::Object> decoded_ref = hs.NewHandle(soa.Self()->DecodeJObject(ref.get())); 4249 result_value = gRegistry->Add(decoded_ref); 4250 result_tag = TagFromObject(soa, decoded_ref.Get()); 4251 } else { 4252 // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the 4253 // object registry. 4254 result_value = GetObjectRegistry()->Add(pReq->receiver.Read()); 4255 result_tag = TagFromObject(soa, pReq->receiver.Read()); 4256 } 4257 } else { 4258 result_value = 0; 4259 result_tag = JDWP::JT_OBJECT; 4260 } 4261 } 4262 4263 // Suspend other threads if the invoke is not single-threaded. 4264 if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) { 4265 ScopedThreadSuspension sts(soa.Self(), kWaitingForDebuggerSuspension); 4266 // Avoid a deadlock between GC and debugger where GC gets suspended during GC. b/25800335. 4267 gc::ScopedGCCriticalSection gcs(soa.Self(), gc::kGcCauseDebugger, gc::kCollectorTypeDebugger); 4268 VLOG(jdwp) << " Suspending all threads"; 4269 Runtime::Current()->GetThreadList()->SuspendAllForDebugger(); 4270 } 4271 4272 VLOG(jdwp) << " --> returned " << result_tag 4273 << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value, 4274 exceptionObjectId); 4275 4276 // Show detailed debug output. 4277 if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) { 4278 if (result_value != 0) { 4279 if (VLOG_IS_ON(jdwp)) { 4280 std::string result_string; 4281 JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string); 4282 CHECK_EQ(error, JDWP::ERR_NONE); 4283 VLOG(jdwp) << " string '" << result_string << "'"; 4284 } 4285 } else { 4286 VLOG(jdwp) << " string (null)"; 4287 } 4288 } 4289 4290 // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread 4291 // is ready to suspend. 4292 BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId); 4293 } 4294 4295 void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag, 4296 uint64_t result_value, JDWP::ObjectId exception) { 4297 // Make room for the JDWP header since we do not know the size of the reply yet. 4298 JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen); 4299 4300 size_t width = GetTagWidth(result_tag); 4301 JDWP::expandBufAdd1(pReply, result_tag); 4302 if (width != 0) { 4303 WriteValue(pReply, width, result_value); 4304 } 4305 JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT); 4306 JDWP::expandBufAddObjectId(pReply, exception); 4307 4308 // Now we know the size, we can complete the JDWP header. 4309 uint8_t* buf = expandBufGetBuffer(pReply); 4310 JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply)); 4311 JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id); 4312 JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply); // flags 4313 JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE); 4314 } 4315 4316 void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) { 4317 CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread"; 4318 4319 JDWP::ExpandBuf* const pReply = pReq->reply; 4320 CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq"; 4321 4322 // We need to prevent other threads (including JDWP thread) from interacting with the debugger 4323 // while we send the reply but are not yet suspended. The JDWP token will be released just before 4324 // we suspend ourself again (see ThreadList::SuspendSelfForDebugger). 4325 gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id); 4326 4327 // Send the reply unless the debugger detached before the completion of the method. 4328 if (IsDebuggerActive()) { 4329 const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen; 4330 VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)", 4331 pReq->request_id, replyDataLength); 4332 4333 gJdwpState->SendRequest(pReply); 4334 } else { 4335 VLOG(jdwp) << "Not sending invoke reply because debugger detached"; 4336 } 4337 } 4338 4339 bool Dbg::DdmHandleChunk(JNIEnv* env, 4340 uint32_t type, 4341 const ArrayRef<const jbyte>& data, 4342 /*out*/uint32_t* out_type, 4343 /*out*/std::vector<uint8_t>* out_data) { 4344 ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(data.size())); 4345 if (dataArray.get() == nullptr) { 4346 LOG(WARNING) << "byte[] allocation failed: " << data.size(); 4347 env->ExceptionClear(); 4348 return false; 4349 } 4350 env->SetByteArrayRegion(dataArray.get(), 4351 0, 4352 data.size(), 4353 reinterpret_cast<const jbyte*>(data.data())); 4354 // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)". 4355 ScopedLocalRef<jobject> chunk( 4356 env, 4357 env->CallStaticObjectMethod( 4358 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, 4359 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch, 4360 type, dataArray.get(), 0, data.size())); 4361 if (env->ExceptionCheck()) { 4362 LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type); 4363 env->ExceptionDescribe(); 4364 env->ExceptionClear(); 4365 return false; 4366 } 4367 4368 if (chunk.get() == nullptr) { 4369 return false; 4370 } 4371 4372 /* 4373 * Pull the pieces out of the chunk. We copy the results into a 4374 * newly-allocated buffer that the caller can free. We don't want to 4375 * continue using the Chunk object because nothing has a reference to it. 4376 * 4377 * We could avoid this by returning type/data/offset/length and having 4378 * the caller be aware of the object lifetime issues, but that 4379 * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work 4380 * if we have responses for multiple chunks. 4381 * 4382 * So we're pretty much stuck with copying data around multiple times. 4383 */ 4384 ScopedLocalRef<jbyteArray> replyData( 4385 env, 4386 reinterpret_cast<jbyteArray>( 4387 env->GetObjectField( 4388 chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data))); 4389 jint offset = env->GetIntField(chunk.get(), 4390 WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset); 4391 jint length = env->GetIntField(chunk.get(), 4392 WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length); 4393 *out_type = env->GetIntField(chunk.get(), 4394 WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type); 4395 4396 VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", 4397 type, 4398 replyData.get(), 4399 offset, 4400 length); 4401 out_data->resize(length); 4402 env->GetByteArrayRegion(replyData.get(), 4403 offset, 4404 length, 4405 reinterpret_cast<jbyte*>(out_data->data())); 4406 4407 if (env->ExceptionCheck()) { 4408 LOG(INFO) << StringPrintf("Exception thrown when reading response data from dispatcher 0x%08x", 4409 type); 4410 env->ExceptionDescribe(); 4411 env->ExceptionClear(); 4412 return false; 4413 } 4414 4415 return true; 4416 } 4417 4418 /* 4419 * "request" contains a full JDWP packet, possibly with multiple chunks. We 4420 * need to process each, accumulate the replies, and ship the whole thing 4421 * back. 4422 * 4423 * Returns "true" if we have a reply. The reply buffer is newly allocated, 4424 * and includes the chunk type/length, followed by the data. 4425 * 4426 * OLD-TODO: we currently assume that the request and reply include a single 4427 * chunk. If this becomes inconvenient we will need to adapt. 4428 */ 4429 bool Dbg::DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen) { 4430 Thread* self = Thread::Current(); 4431 JNIEnv* env = self->GetJniEnv(); 4432 4433 uint32_t type = request->ReadUnsigned32("type"); 4434 uint32_t length = request->ReadUnsigned32("length"); 4435 4436 // Create a byte[] corresponding to 'request'. 4437 size_t request_length = request->size(); 4438 // Run through and find all chunks. [Currently just find the first.] 4439 if (length != request_length) { 4440 LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length); 4441 return false; 4442 } 4443 4444 ArrayRef<const jbyte> data(reinterpret_cast<const jbyte*>(request->data()), request_length); 4445 std::vector<uint8_t> out_data; 4446 uint32_t out_type = 0; 4447 request->Skip(request_length); 4448 if (!DdmHandleChunk(env, type, data, &out_type, &out_data) || out_data.empty()) { 4449 return false; 4450 } 4451 const uint32_t kDdmHeaderSize = 8; 4452 *pReplyLen = out_data.size() + kDdmHeaderSize; 4453 *pReplyBuf = new uint8_t[out_data.size() + kDdmHeaderSize]; 4454 memcpy((*pReplyBuf) + kDdmHeaderSize, out_data.data(), out_data.size()); 4455 JDWP::Set4BE(*pReplyBuf, out_type); 4456 JDWP::Set4BE((*pReplyBuf) + 4, static_cast<uint32_t>(out_data.size())); 4457 VLOG(jdwp) 4458 << StringPrintf("dvmHandleDdm returning type=%.4s", reinterpret_cast<char*>(*pReplyBuf)) 4459 << "0x" << std::hex << reinterpret_cast<uintptr_t>(*pReplyBuf) << std::dec 4460 << " len= " << out_data.size(); 4461 return true; 4462 } 4463 4464 void Dbg::DdmBroadcast(bool connect) { 4465 VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "..."; 4466 4467 Thread* self = Thread::Current(); 4468 if (self->GetState() != kRunnable) { 4469 LOG(ERROR) << "DDM broadcast in thread state " << self->GetState(); 4470 /* try anyway? */ 4471 } 4472 4473 JNIEnv* env = self->GetJniEnv(); 4474 jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/; 4475 env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, 4476 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast, 4477 event); 4478 if (env->ExceptionCheck()) { 4479 LOG(ERROR) << "DdmServer.broadcast " << event << " failed"; 4480 env->ExceptionDescribe(); 4481 env->ExceptionClear(); 4482 } 4483 } 4484 4485 void Dbg::DdmConnected() { 4486 Dbg::DdmBroadcast(true); 4487 } 4488 4489 void Dbg::DdmDisconnected() { 4490 Dbg::DdmBroadcast(false); 4491 gDdmThreadNotification = false; 4492 } 4493 4494 /* 4495 * Send a notification when a thread starts, stops, or changes its name. 4496 * 4497 * Because we broadcast the full set of threads when the notifications are 4498 * first enabled, it's possible for "thread" to be actively executing. 4499 */ 4500 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) { 4501 if (!gDdmThreadNotification) { 4502 return; 4503 } 4504 4505 RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks(); 4506 if (type == CHUNK_TYPE("THDE")) { 4507 uint8_t buf[4]; 4508 JDWP::Set4BE(&buf[0], t->GetThreadId()); 4509 cb->DdmPublishChunk(CHUNK_TYPE("THDE"), ArrayRef<const uint8_t>(buf)); 4510 } else { 4511 CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type; 4512 ScopedObjectAccessUnchecked soa(Thread::Current()); 4513 StackHandleScope<1> hs(soa.Self()); 4514 Handle<mirror::String> name(hs.NewHandle(t->GetThreadName())); 4515 size_t char_count = (name != nullptr) ? name->GetLength() : 0; 4516 const jchar* chars = (name != nullptr) ? name->GetValue() : nullptr; 4517 bool is_compressed = (name != nullptr) ? name->IsCompressed() : false; 4518 4519 std::vector<uint8_t> bytes; 4520 JDWP::Append4BE(bytes, t->GetThreadId()); 4521 if (is_compressed) { 4522 const uint8_t* chars_compressed = name->GetValueCompressed(); 4523 JDWP::AppendUtf16CompressedBE(bytes, chars_compressed, char_count); 4524 } else { 4525 JDWP::AppendUtf16BE(bytes, chars, char_count); 4526 } 4527 CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2); 4528 cb->DdmPublishChunk(type, ArrayRef<const uint8_t>(bytes)); 4529 } 4530 } 4531 4532 void Dbg::DdmSetThreadNotification(bool enable) { 4533 // Enable/disable thread notifications. 4534 gDdmThreadNotification = enable; 4535 if (enable) { 4536 // Suspend the VM then post thread start notifications for all threads. Threads attaching will 4537 // see a suspension in progress and block until that ends. They then post their own start 4538 // notification. 4539 SuspendVM(); 4540 std::list<Thread*> threads; 4541 Thread* self = Thread::Current(); 4542 { 4543 MutexLock mu(self, *Locks::thread_list_lock_); 4544 threads = Runtime::Current()->GetThreadList()->GetList(); 4545 } 4546 { 4547 ScopedObjectAccess soa(self); 4548 for (Thread* thread : threads) { 4549 Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR")); 4550 } 4551 } 4552 ResumeVM(); 4553 } 4554 } 4555 4556 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) { 4557 if (IsDebuggerActive()) { 4558 gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR")); 4559 } 4560 Dbg::DdmSendThreadNotification(t, type); 4561 } 4562 4563 void Dbg::PostThreadStart(Thread* t) { 4564 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR")); 4565 } 4566 4567 void Dbg::PostThreadDeath(Thread* t) { 4568 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE")); 4569 } 4570 4571 JDWP::JdwpState* Dbg::GetJdwpState() { 4572 return gJdwpState; 4573 } 4574 4575 int Dbg::DdmHandleHpifChunk(HpifWhen when) { 4576 if (when == HPIF_WHEN_NOW) { 4577 DdmSendHeapInfo(when); 4578 return true; 4579 } 4580 4581 if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) { 4582 LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when); 4583 return false; 4584 } 4585 4586 gDdmHpifWhen = when; 4587 return true; 4588 } 4589 4590 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) { 4591 if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) { 4592 LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when); 4593 return false; 4594 } 4595 4596 if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) { 4597 LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what); 4598 return false; 4599 } 4600 4601 if (native) { 4602 gDdmNhsgWhen = when; 4603 gDdmNhsgWhat = what; 4604 } else { 4605 gDdmHpsgWhen = when; 4606 gDdmHpsgWhat = what; 4607 } 4608 return true; 4609 } 4610 4611 void Dbg::DdmSendHeapInfo(HpifWhen reason) { 4612 // If there's a one-shot 'when', reset it. 4613 if (reason == gDdmHpifWhen) { 4614 if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) { 4615 gDdmHpifWhen = HPIF_WHEN_NEVER; 4616 } 4617 } 4618 4619 /* 4620 * Chunk HPIF (client --> server) 4621 * 4622 * Heap Info. General information about the heap, 4623 * suitable for a summary display. 4624 * 4625 * [u4]: number of heaps 4626 * 4627 * For each heap: 4628 * [u4]: heap ID 4629 * [u8]: timestamp in ms since Unix epoch 4630 * [u1]: capture reason (same as 'when' value from server) 4631 * [u4]: max heap size in bytes (-Xmx) 4632 * [u4]: current heap size in bytes 4633 * [u4]: current number of bytes allocated 4634 * [u4]: current number of objects allocated 4635 */ 4636 uint8_t heap_count = 1; 4637 gc::Heap* heap = Runtime::Current()->GetHeap(); 4638 std::vector<uint8_t> bytes; 4639 JDWP::Append4BE(bytes, heap_count); 4640 JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap). 4641 JDWP::Append8BE(bytes, MilliTime()); 4642 JDWP::Append1BE(bytes, reason); 4643 JDWP::Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes. 4644 JDWP::Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes. 4645 JDWP::Append4BE(bytes, heap->GetBytesAllocated()); 4646 JDWP::Append4BE(bytes, heap->GetObjectsAllocated()); 4647 CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4))); 4648 Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(CHUNK_TYPE("HPIF"), 4649 ArrayRef<const uint8_t>(bytes)); 4650 } 4651 4652 enum HpsgSolidity { 4653 SOLIDITY_FREE = 0, 4654 SOLIDITY_HARD = 1, 4655 SOLIDITY_SOFT = 2, 4656 SOLIDITY_WEAK = 3, 4657 SOLIDITY_PHANTOM = 4, 4658 SOLIDITY_FINALIZABLE = 5, 4659 SOLIDITY_SWEEP = 6, 4660 }; 4661 4662 enum HpsgKind { 4663 KIND_OBJECT = 0, 4664 KIND_CLASS_OBJECT = 1, 4665 KIND_ARRAY_1 = 2, 4666 KIND_ARRAY_2 = 3, 4667 KIND_ARRAY_4 = 4, 4668 KIND_ARRAY_8 = 5, 4669 KIND_UNKNOWN = 6, 4670 KIND_NATIVE = 7, 4671 }; 4672 4673 #define HPSG_PARTIAL (1<<7) 4674 #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7))) 4675 4676 class HeapChunkContext { 4677 public: 4678 // Maximum chunk size. Obtain this from the formula: 4679 // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2 4680 HeapChunkContext(bool merge, bool native) 4681 : buf_(16384 - 16), 4682 type_(0), 4683 chunk_overhead_(0) { 4684 Reset(); 4685 if (native) { 4686 type_ = CHUNK_TYPE("NHSG"); 4687 } else { 4688 type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO"); 4689 } 4690 } 4691 4692 ~HeapChunkContext() { 4693 if (p_ > &buf_[0]) { 4694 Flush(); 4695 } 4696 } 4697 4698 void SetChunkOverhead(size_t chunk_overhead) { 4699 chunk_overhead_ = chunk_overhead; 4700 } 4701 4702 void ResetStartOfNextChunk() { 4703 startOfNextMemoryChunk_ = nullptr; 4704 } 4705 4706 void EnsureHeader(const void* chunk_ptr) { 4707 if (!needHeader_) { 4708 return; 4709 } 4710 4711 // Start a new HPSx chunk. 4712 JDWP::Write4BE(&p_, 1); // Heap id (bogus; we only have one heap). 4713 JDWP::Write1BE(&p_, 8); // Size of allocation unit, in bytes. 4714 4715 JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start. 4716 JDWP::Write4BE(&p_, 0); // offset of this piece (relative to the virtual address). 4717 // [u4]: length of piece, in allocation units 4718 // We won't know this until we're done, so save the offset and stuff in a dummy value. 4719 pieceLenField_ = p_; 4720 JDWP::Write4BE(&p_, 0x55555555); 4721 needHeader_ = false; 4722 } 4723 4724 void Flush() REQUIRES_SHARED(Locks::mutator_lock_) { 4725 if (pieceLenField_ == nullptr) { 4726 // Flush immediately post Reset (maybe back-to-back Flush). Ignore. 4727 CHECK(needHeader_); 4728 return; 4729 } 4730 // Patch the "length of piece" field. 4731 CHECK_LE(&buf_[0], pieceLenField_); 4732 CHECK_LE(pieceLenField_, p_); 4733 JDWP::Set4BE(pieceLenField_, totalAllocationUnits_); 4734 4735 ArrayRef<const uint8_t> out(&buf_[0], p_ - &buf_[0]); 4736 Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(type_, out); 4737 Reset(); 4738 } 4739 4740 static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg) 4741 REQUIRES_SHARED(Locks::heap_bitmap_lock_, 4742 Locks::mutator_lock_) { 4743 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes); 4744 } 4745 4746 static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg) 4747 REQUIRES_SHARED(Locks::mutator_lock_) { 4748 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes); 4749 } 4750 4751 private: 4752 enum { ALLOCATION_UNIT_SIZE = 8 }; 4753 4754 void Reset() { 4755 p_ = &buf_[0]; 4756 ResetStartOfNextChunk(); 4757 totalAllocationUnits_ = 0; 4758 needHeader_ = true; 4759 pieceLenField_ = nullptr; 4760 } 4761 4762 bool IsNative() const { 4763 return type_ == CHUNK_TYPE("NHSG"); 4764 } 4765 4766 // Returns true if the object is not an empty chunk. 4767 bool ProcessRecord(void* start, size_t used_bytes) REQUIRES_SHARED(Locks::mutator_lock_) { 4768 // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken 4769 // in the following code not to allocate memory, by ensuring buf_ is of the correct size 4770 if (used_bytes == 0) { 4771 if (start == nullptr) { 4772 // Reset for start of new heap. 4773 startOfNextMemoryChunk_ = nullptr; 4774 Flush(); 4775 } 4776 // Only process in use memory so that free region information 4777 // also includes dlmalloc book keeping. 4778 return false; 4779 } 4780 if (startOfNextMemoryChunk_ != nullptr) { 4781 // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because 4782 // of the use of mmaps, so don't report. If not free memory then start a new segment. 4783 bool flush = true; 4784 if (start > startOfNextMemoryChunk_) { 4785 const size_t kMaxFreeLen = 2 * kPageSize; 4786 void* free_start = startOfNextMemoryChunk_; 4787 void* free_end = start; 4788 const size_t free_len = 4789 reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start); 4790 if (!IsNative() || free_len < kMaxFreeLen) { 4791 AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative()); 4792 flush = false; 4793 } 4794 } 4795 if (flush) { 4796 startOfNextMemoryChunk_ = nullptr; 4797 Flush(); 4798 } 4799 } 4800 return true; 4801 } 4802 4803 void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes) 4804 REQUIRES_SHARED(Locks::mutator_lock_) { 4805 if (ProcessRecord(start, used_bytes)) { 4806 uint8_t state = ExamineNativeObject(start); 4807 AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/); 4808 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_; 4809 } 4810 } 4811 4812 void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes) 4813 REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 4814 if (ProcessRecord(start, used_bytes)) { 4815 // Determine the type of this chunk. 4816 // OLD-TODO: if context.merge, see if this chunk is different from the last chunk. 4817 // If it's the same, we should combine them. 4818 uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start)); 4819 AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/); 4820 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_; 4821 } 4822 } 4823 4824 void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native) 4825 REQUIRES_SHARED(Locks::mutator_lock_) { 4826 // Make sure there's enough room left in the buffer. 4827 // We need to use two bytes for every fractional 256 allocation units used by the chunk plus 4828 // 17 bytes for any header. 4829 const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17; 4830 size_t byte_left = &buf_.back() - p_; 4831 if (byte_left < needed) { 4832 if (is_native) { 4833 // Cannot trigger memory allocation while walking native heap. 4834 return; 4835 } 4836 Flush(); 4837 } 4838 4839 byte_left = &buf_.back() - p_; 4840 if (byte_left < needed) { 4841 LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", " 4842 << needed << " bytes)"; 4843 return; 4844 } 4845 EnsureHeader(ptr); 4846 // Write out the chunk description. 4847 length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units. 4848 totalAllocationUnits_ += length; 4849 while (length > 256) { 4850 *p_++ = state | HPSG_PARTIAL; 4851 *p_++ = 255; // length - 1 4852 length -= 256; 4853 } 4854 *p_++ = state; 4855 *p_++ = length - 1; 4856 } 4857 4858 uint8_t ExamineNativeObject(const void* p) REQUIRES_SHARED(Locks::mutator_lock_) { 4859 return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); 4860 } 4861 4862 uint8_t ExamineJavaObject(mirror::Object* o) 4863 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 4864 if (o == nullptr) { 4865 return HPSG_STATE(SOLIDITY_FREE, 0); 4866 } 4867 // It's an allocated chunk. Figure out what it is. 4868 gc::Heap* heap = Runtime::Current()->GetHeap(); 4869 if (!heap->IsLiveObjectLocked(o)) { 4870 LOG(ERROR) << "Invalid object in managed heap: " << o; 4871 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); 4872 } 4873 mirror::Class* c = o->GetClass(); 4874 if (c == nullptr) { 4875 // The object was probably just created but hasn't been initialized yet. 4876 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); 4877 } 4878 if (!heap->IsValidObjectAddress(c)) { 4879 LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c; 4880 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); 4881 } 4882 if (c->GetClass() == nullptr) { 4883 LOG(ERROR) << "Null class of class " << c << " for object " << o; 4884 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); 4885 } 4886 if (c->IsClassClass()) { 4887 return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT); 4888 } 4889 if (c->IsArrayClass()) { 4890 switch (c->GetComponentSize()) { 4891 case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1); 4892 case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2); 4893 case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); 4894 case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8); 4895 } 4896 } 4897 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); 4898 } 4899 4900 std::vector<uint8_t> buf_; 4901 uint8_t* p_; 4902 uint8_t* pieceLenField_; 4903 void* startOfNextMemoryChunk_; 4904 size_t totalAllocationUnits_; 4905 uint32_t type_; 4906 bool needHeader_; 4907 size_t chunk_overhead_; 4908 4909 DISALLOW_COPY_AND_ASSIGN(HeapChunkContext); 4910 }; 4911 4912 void Dbg::DdmSendHeapSegments(bool native) { 4913 Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen; 4914 Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat; 4915 if (when == HPSG_WHEN_NEVER) { 4916 return; 4917 } 4918 RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks(); 4919 // Figure out what kind of chunks we'll be sending. 4920 CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) 4921 << static_cast<int>(what); 4922 4923 // First, send a heap start chunk. 4924 uint8_t heap_id[4]; 4925 JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap). 4926 cb->DdmPublishChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), 4927 ArrayRef<const uint8_t>(heap_id)); 4928 Thread* self = Thread::Current(); 4929 Locks::mutator_lock_->AssertSharedHeld(self); 4930 4931 // Send a series of heap segment chunks. 4932 HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native); 4933 auto bump_pointer_space_visitor = [&](mirror::Object* obj) 4934 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { 4935 const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment); 4936 HeapChunkContext::HeapChunkJavaCallback( 4937 obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, &context); 4938 }; 4939 if (native) { 4940 UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported"; 4941 } else { 4942 gc::Heap* heap = Runtime::Current()->GetHeap(); 4943 for (const auto& space : heap->GetContinuousSpaces()) { 4944 if (space->IsDlMallocSpace()) { 4945 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 4946 // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an 4947 // allocation then the first sizeof(size_t) may belong to it. 4948 context.SetChunkOverhead(sizeof(size_t)); 4949 space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context); 4950 } else if (space->IsRosAllocSpace()) { 4951 context.SetChunkOverhead(0); 4952 // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since 4953 // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock. 4954 ScopedThreadSuspension sts(self, kSuspended); 4955 ScopedSuspendAll ssa(__FUNCTION__); 4956 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 4957 space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context); 4958 } else if (space->IsBumpPointerSpace()) { 4959 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 4960 context.SetChunkOverhead(0); 4961 space->AsBumpPointerSpace()->Walk(bump_pointer_space_visitor); 4962 HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context); 4963 } else if (space->IsRegionSpace()) { 4964 heap->IncrementDisableMovingGC(self); 4965 { 4966 ScopedThreadSuspension sts(self, kSuspended); 4967 ScopedSuspendAll ssa(__FUNCTION__); 4968 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 4969 context.SetChunkOverhead(0); 4970 space->AsRegionSpace()->Walk(bump_pointer_space_visitor); 4971 HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context); 4972 } 4973 heap->DecrementDisableMovingGC(self); 4974 } else { 4975 UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space; 4976 } 4977 context.ResetStartOfNextChunk(); 4978 } 4979 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 4980 // Walk the large objects, these are not in the AllocSpace. 4981 context.SetChunkOverhead(0); 4982 heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context); 4983 } 4984 4985 // Finally, send a heap end chunk. 4986 cb->DdmPublishChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), 4987 ArrayRef<const uint8_t>(heap_id)); 4988 } 4989 4990 void Dbg::SetAllocTrackingEnabled(bool enable) { 4991 gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable); 4992 } 4993 4994 void Dbg::DumpRecentAllocations() { 4995 ScopedObjectAccess soa(Thread::Current()); 4996 MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_); 4997 if (!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) { 4998 LOG(INFO) << "Not recording tracked allocations"; 4999 return; 5000 } 5001 gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords(); 5002 CHECK(records != nullptr); 5003 5004 const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize()); 5005 uint16_t count = capped_count; 5006 5007 LOG(INFO) << "Tracked allocations, (count=" << count << ")"; 5008 for (auto it = records->RBegin(), end = records->REnd(); 5009 count > 0 && it != end; count--, it++) { 5010 const gc::AllocRecord* record = &it->second; 5011 5012 LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->GetTid(), record->ByteCount()) 5013 << mirror::Class::PrettyClass(record->GetClass()); 5014 5015 for (size_t stack_frame = 0, depth = record->GetDepth(); stack_frame < depth; ++stack_frame) { 5016 const gc::AllocRecordStackTraceElement& stack_element = record->StackElement(stack_frame); 5017 ArtMethod* m = stack_element.GetMethod(); 5018 LOG(INFO) << " " << ArtMethod::PrettyMethod(m) << " line " 5019 << stack_element.ComputeLineNumber(); 5020 } 5021 5022 // pause periodically to help logcat catch up 5023 if ((count % 5) == 0) { 5024 usleep(40000); 5025 } 5026 } 5027 } 5028 5029 class StringTable { 5030 private: 5031 struct Entry { 5032 explicit Entry(const char* data_in) 5033 : data(data_in), hash(ComputeModifiedUtf8Hash(data_in)), index(0) { 5034 } 5035 Entry(const Entry& entry) = default; 5036 Entry(Entry&& entry) = default; 5037 5038 // Pointer to the actual string data. 5039 const char* data; 5040 5041 // The hash of the data. 5042 const uint32_t hash; 5043 5044 // The index. This will be filled in on Finish and is not part of the ordering, so mark it 5045 // mutable. 5046 mutable uint32_t index; 5047 5048 bool operator==(const Entry& other) const { 5049 return strcmp(data, other.data) == 0; 5050 } 5051 }; 5052 struct EntryHash { 5053 size_t operator()(const Entry& entry) const { 5054 return entry.hash; 5055 } 5056 }; 5057 5058 public: 5059 StringTable() : finished_(false) { 5060 } 5061 5062 void Add(const char* str, bool copy_string) { 5063 DCHECK(!finished_); 5064 if (UNLIKELY(copy_string)) { 5065 // Check whether it's already there. 5066 Entry entry(str); 5067 if (table_.find(entry) != table_.end()) { 5068 return; 5069 } 5070 5071 // Make a copy. 5072 size_t str_len = strlen(str); 5073 char* copy = new char[str_len + 1]; 5074 strlcpy(copy, str, str_len + 1); 5075 string_backup_.emplace_back(copy); 5076 str = copy; 5077 } 5078 Entry entry(str); 5079 table_.insert(entry); 5080 } 5081 5082 // Update all entries and give them an index. Note that this is likely not the insertion order, 5083 // as the set will with high likelihood reorder elements. Thus, Add must not be called after 5084 // Finish, and Finish must be called before IndexOf. In that case, WriteTo will walk in 5085 // the same order as Finish, and indices will agree. The order invariant, as well as indices, 5086 // are enforced through debug checks. 5087 void Finish() { 5088 DCHECK(!finished_); 5089 finished_ = true; 5090 uint32_t index = 0; 5091 for (auto& entry : table_) { 5092 entry.index = index; 5093 ++index; 5094 } 5095 } 5096 5097 size_t IndexOf(const char* s) const { 5098 DCHECK(finished_); 5099 Entry entry(s); 5100 auto it = table_.find(entry); 5101 if (it == table_.end()) { 5102 LOG(FATAL) << "IndexOf(\"" << s << "\") failed"; 5103 } 5104 return it->index; 5105 } 5106 5107 size_t Size() const { 5108 return table_.size(); 5109 } 5110 5111 void WriteTo(std::vector<uint8_t>& bytes) const { 5112 DCHECK(finished_); 5113 uint32_t cur_index = 0; 5114 for (const auto& entry : table_) { 5115 DCHECK_EQ(cur_index++, entry.index); 5116 5117 size_t s_len = CountModifiedUtf8Chars(entry.data); 5118 std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]); 5119 ConvertModifiedUtf8ToUtf16(s_utf16.get(), entry.data); 5120 JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len); 5121 } 5122 } 5123 5124 private: 5125 std::unordered_set<Entry, EntryHash> table_; 5126 std::vector<std::unique_ptr<char[]>> string_backup_; 5127 5128 bool finished_; 5129 5130 DISALLOW_COPY_AND_ASSIGN(StringTable); 5131 }; 5132 5133 static const char* GetMethodSourceFile(ArtMethod* method) 5134 REQUIRES_SHARED(Locks::mutator_lock_) { 5135 DCHECK(method != nullptr); 5136 const char* source_file = method->GetDeclaringClassSourceFile(); 5137 return (source_file != nullptr) ? source_file : ""; 5138 } 5139 5140 /* 5141 * The data we send to DDMS contains everything we have recorded. 5142 * 5143 * Message header (all values big-endian): 5144 * (1b) message header len (to allow future expansion); includes itself 5145 * (1b) entry header len 5146 * (1b) stack frame len 5147 * (2b) number of entries 5148 * (4b) offset to string table from start of message 5149 * (2b) number of class name strings 5150 * (2b) number of method name strings 5151 * (2b) number of source file name strings 5152 * For each entry: 5153 * (4b) total allocation size 5154 * (2b) thread id 5155 * (2b) allocated object's class name index 5156 * (1b) stack depth 5157 * For each stack frame: 5158 * (2b) method's class name 5159 * (2b) method name 5160 * (2b) method source file 5161 * (2b) line number, clipped to 32767; -2 if native; -1 if no source 5162 * (xb) class name strings 5163 * (xb) method name strings 5164 * (xb) source file strings 5165 * 5166 * As with other DDM traffic, strings are sent as a 4-byte length 5167 * followed by UTF-16 data. 5168 * 5169 * We send up 16-bit unsigned indexes into string tables. In theory there 5170 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in 5171 * each table, but in practice there should be far fewer. 5172 * 5173 * The chief reason for using a string table here is to keep the size of 5174 * the DDMS message to a minimum. This is partly to make the protocol 5175 * efficient, but also because we have to form the whole thing up all at 5176 * once in a memory buffer. 5177 * 5178 * We use separate string tables for class names, method names, and source 5179 * files to keep the indexes small. There will generally be no overlap 5180 * between the contents of these tables. 5181 */ 5182 jbyteArray Dbg::GetRecentAllocations() { 5183 if ((false)) { 5184 DumpRecentAllocations(); 5185 } 5186 5187 Thread* self = Thread::Current(); 5188 std::vector<uint8_t> bytes; 5189 { 5190 MutexLock mu(self, *Locks::alloc_tracker_lock_); 5191 gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords(); 5192 // In case this method is called when allocation tracker is disabled, 5193 // we should still send some data back. 5194 gc::AllocRecordObjectMap dummy; 5195 if (records == nullptr) { 5196 CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()); 5197 records = &dummy; 5198 } 5199 // We don't need to wait on the condition variable records->new_record_condition_, because this 5200 // function only reads the class objects, which are already marked so it doesn't change their 5201 // reachability. 5202 5203 // 5204 // Part 1: generate string tables. 5205 // 5206 StringTable class_names; 5207 StringTable method_names; 5208 StringTable filenames; 5209 5210 VLOG(jdwp) << "Collecting StringTables."; 5211 5212 const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize()); 5213 uint16_t count = capped_count; 5214 size_t alloc_byte_count = 0; 5215 for (auto it = records->RBegin(), end = records->REnd(); 5216 count > 0 && it != end; count--, it++) { 5217 const gc::AllocRecord* record = &it->second; 5218 std::string temp; 5219 const char* class_descr = record->GetClassDescriptor(&temp); 5220 class_names.Add(class_descr, !temp.empty()); 5221 5222 // Size + tid + class name index + stack depth. 5223 alloc_byte_count += 4u + 2u + 2u + 1u; 5224 5225 for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) { 5226 ArtMethod* m = record->StackElement(i).GetMethod(); 5227 class_names.Add(m->GetDeclaringClassDescriptor(), false); 5228 method_names.Add(m->GetName(), false); 5229 filenames.Add(GetMethodSourceFile(m), false); 5230 } 5231 5232 // Depth * (class index + method name index + file name index + line number). 5233 alloc_byte_count += record->GetDepth() * (2u + 2u + 2u + 2u); 5234 } 5235 5236 class_names.Finish(); 5237 method_names.Finish(); 5238 filenames.Finish(); 5239 VLOG(jdwp) << "Done collecting StringTables:" << std::endl 5240 << " ClassNames: " << class_names.Size() << std::endl 5241 << " MethodNames: " << method_names.Size() << std::endl 5242 << " Filenames: " << filenames.Size(); 5243 5244 LOG(INFO) << "recent allocation records: " << capped_count; 5245 LOG(INFO) << "allocation records all objects: " << records->Size(); 5246 5247 // 5248 // Part 2: Generate the output and store it in the buffer. 5249 // 5250 5251 // (1b) message header len (to allow future expansion); includes itself 5252 // (1b) entry header len 5253 // (1b) stack frame len 5254 const int kMessageHeaderLen = 15; 5255 const int kEntryHeaderLen = 9; 5256 const int kStackFrameLen = 8; 5257 JDWP::Append1BE(bytes, kMessageHeaderLen); 5258 JDWP::Append1BE(bytes, kEntryHeaderLen); 5259 JDWP::Append1BE(bytes, kStackFrameLen); 5260 5261 // (2b) number of entries 5262 // (4b) offset to string table from start of message 5263 // (2b) number of class name strings 5264 // (2b) number of method name strings 5265 // (2b) number of source file name strings 5266 JDWP::Append2BE(bytes, capped_count); 5267 size_t string_table_offset = bytes.size(); 5268 JDWP::Append4BE(bytes, 0); // We'll patch this later... 5269 JDWP::Append2BE(bytes, class_names.Size()); 5270 JDWP::Append2BE(bytes, method_names.Size()); 5271 JDWP::Append2BE(bytes, filenames.Size()); 5272 5273 VLOG(jdwp) << "Dumping allocations with stacks"; 5274 5275 // Enlarge the vector for the allocation data. 5276 size_t reserve_size = bytes.size() + alloc_byte_count; 5277 bytes.reserve(reserve_size); 5278 5279 std::string temp; 5280 count = capped_count; 5281 // The last "count" number of allocation records in "records" are the most recent "count" number 5282 // of allocations. Reverse iterate to get them. The most recent allocation is sent first. 5283 for (auto it = records->RBegin(), end = records->REnd(); 5284 count > 0 && it != end; count--, it++) { 5285 // For each entry: 5286 // (4b) total allocation size 5287 // (2b) thread id 5288 // (2b) allocated object's class name index 5289 // (1b) stack depth 5290 const gc::AllocRecord* record = &it->second; 5291 size_t stack_depth = record->GetDepth(); 5292 size_t allocated_object_class_name_index = 5293 class_names.IndexOf(record->GetClassDescriptor(&temp)); 5294 JDWP::Append4BE(bytes, record->ByteCount()); 5295 JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid())); 5296 JDWP::Append2BE(bytes, allocated_object_class_name_index); 5297 JDWP::Append1BE(bytes, stack_depth); 5298 5299 for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) { 5300 // For each stack frame: 5301 // (2b) method's class name 5302 // (2b) method name 5303 // (2b) method source file 5304 // (2b) line number, clipped to 32767; -2 if native; -1 if no source 5305 ArtMethod* m = record->StackElement(stack_frame).GetMethod(); 5306 size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor()); 5307 size_t method_name_index = method_names.IndexOf(m->GetName()); 5308 size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m)); 5309 JDWP::Append2BE(bytes, class_name_index); 5310 JDWP::Append2BE(bytes, method_name_index); 5311 JDWP::Append2BE(bytes, file_name_index); 5312 JDWP::Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber()); 5313 } 5314 } 5315 5316 CHECK_EQ(bytes.size(), reserve_size); 5317 VLOG(jdwp) << "Dumping tables."; 5318 5319 // (xb) class name strings 5320 // (xb) method name strings 5321 // (xb) source file strings 5322 JDWP::Set4BE(&bytes[string_table_offset], bytes.size()); 5323 class_names.WriteTo(bytes); 5324 method_names.WriteTo(bytes); 5325 filenames.WriteTo(bytes); 5326 5327 VLOG(jdwp) << "GetRecentAllocations: data created. " << bytes.size(); 5328 } 5329 JNIEnv* env = self->GetJniEnv(); 5330 jbyteArray result = env->NewByteArray(bytes.size()); 5331 if (result != nullptr) { 5332 env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0])); 5333 } 5334 return result; 5335 } 5336 5337 ArtMethod* DeoptimizationRequest::Method() const { 5338 return jni::DecodeArtMethod(method_); 5339 } 5340 5341 void DeoptimizationRequest::SetMethod(ArtMethod* m) { 5342 method_ = jni::EncodeArtMethod(m); 5343 } 5344 5345 void Dbg::VisitRoots(RootVisitor* visitor) { 5346 // Visit breakpoint roots, used to prevent unloading of methods with breakpoints. 5347 ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 5348 BufferedRootVisitor<128> root_visitor(visitor, RootInfo(kRootVMInternal)); 5349 for (Breakpoint& breakpoint : gBreakpoints) { 5350 breakpoint.Method()->VisitRoots(root_visitor, kRuntimePointerSize); 5351 } 5352 } 5353 5354 void Dbg::DbgThreadLifecycleCallback::ThreadStart(Thread* self) { 5355 Dbg::PostThreadStart(self); 5356 } 5357 5358 void Dbg::DbgThreadLifecycleCallback::ThreadDeath(Thread* self) { 5359 Dbg::PostThreadDeath(self); 5360 } 5361 5362 void Dbg::DbgClassLoadCallback::ClassLoad(Handle<mirror::Class> klass ATTRIBUTE_UNUSED) { 5363 // Ignore ClassLoad; 5364 } 5365 void Dbg::DbgClassLoadCallback::ClassPrepare(Handle<mirror::Class> temp_klass ATTRIBUTE_UNUSED, 5366 Handle<mirror::Class> klass) { 5367 Dbg::PostClassPrepare(klass.Get()); 5368 } 5369 5370 } // namespace art 5371