1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "stack.h" 18 19 #include "android-base/stringprintf.h" 20 21 #include "arch/context.h" 22 #include "art_method-inl.h" 23 #include "base/enums.h" 24 #include "base/hex_dump.h" 25 #include "entrypoints/entrypoint_utils-inl.h" 26 #include "entrypoints/runtime_asm_entrypoints.h" 27 #include "gc/space/image_space.h" 28 #include "gc/space/space-inl.h" 29 #include "jit/jit.h" 30 #include "jit/jit_code_cache.h" 31 #include "linear_alloc.h" 32 #include "mirror/class-inl.h" 33 #include "mirror/object-inl.h" 34 #include "mirror/object_array-inl.h" 35 #include "oat_quick_method_header.h" 36 #include "quick/quick_method_frame_info.h" 37 #include "runtime.h" 38 #include "thread.h" 39 #include "thread_list.h" 40 #include "verify_object.h" 41 42 namespace art { 43 44 using android::base::StringPrintf; 45 46 static constexpr bool kDebugStackWalk = false; 47 48 mirror::Object* ShadowFrame::GetThisObject() const { 49 ArtMethod* m = GetMethod(); 50 if (m->IsStatic()) { 51 return nullptr; 52 } else if (m->IsNative()) { 53 return GetVRegReference(0); 54 } else { 55 const DexFile::CodeItem* code_item = m->GetCodeItem(); 56 CHECK(code_item != nullptr) << ArtMethod::PrettyMethod(m); 57 uint16_t reg = code_item->registers_size_ - code_item->ins_size_; 58 return GetVRegReference(reg); 59 } 60 } 61 62 mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const { 63 ArtMethod* m = GetMethod(); 64 if (m->IsStatic()) { 65 return nullptr; 66 } else { 67 return GetVRegReference(NumberOfVRegs() - num_ins); 68 } 69 } 70 71 size_t ManagedStack::NumJniShadowFrameReferences() const { 72 size_t count = 0; 73 for (const ManagedStack* current_fragment = this; current_fragment != nullptr; 74 current_fragment = current_fragment->GetLink()) { 75 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr; 76 current_frame = current_frame->GetLink()) { 77 if (current_frame->GetMethod()->IsNative()) { 78 // The JNI ShadowFrame only contains references. (For indirect reference.) 79 count += current_frame->NumberOfVRegs(); 80 } 81 } 82 } 83 return count; 84 } 85 86 bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const { 87 for (const ManagedStack* current_fragment = this; current_fragment != nullptr; 88 current_fragment = current_fragment->GetLink()) { 89 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr; 90 current_frame = current_frame->GetLink()) { 91 if (current_frame->Contains(shadow_frame_entry)) { 92 return true; 93 } 94 } 95 } 96 return false; 97 } 98 99 StackVisitor::StackVisitor(Thread* thread, 100 Context* context, 101 StackWalkKind walk_kind, 102 bool check_suspended) 103 : StackVisitor(thread, context, walk_kind, 0, check_suspended) {} 104 105 StackVisitor::StackVisitor(Thread* thread, 106 Context* context, 107 StackWalkKind walk_kind, 108 size_t num_frames, 109 bool check_suspended) 110 : thread_(thread), 111 walk_kind_(walk_kind), 112 cur_shadow_frame_(nullptr), 113 cur_quick_frame_(nullptr), 114 cur_quick_frame_pc_(0), 115 cur_oat_quick_method_header_(nullptr), 116 num_frames_(num_frames), 117 cur_depth_(0), 118 current_inlining_depth_(0), 119 context_(context), 120 check_suspended_(check_suspended) { 121 if (check_suspended_) { 122 DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread; 123 } 124 } 125 126 InlineInfo StackVisitor::GetCurrentInlineInfo() const { 127 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); 128 uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_); 129 CodeInfo code_info = method_header->GetOptimizedCodeInfo(); 130 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 131 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 132 DCHECK(stack_map.IsValid()); 133 return code_info.GetInlineInfoOf(stack_map, encoding); 134 } 135 136 ArtMethod* StackVisitor::GetMethod() const { 137 if (cur_shadow_frame_ != nullptr) { 138 return cur_shadow_frame_->GetMethod(); 139 } else if (cur_quick_frame_ != nullptr) { 140 if (IsInInlinedFrame()) { 141 size_t depth_in_stack_map = current_inlining_depth_ - 1; 142 InlineInfo inline_info = GetCurrentInlineInfo(); 143 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); 144 CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding(); 145 MethodInfo method_info = method_header->GetOptimizedMethodInfo(); 146 DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames); 147 return GetResolvedMethod(*GetCurrentQuickFrame(), 148 method_info, 149 inline_info, 150 encoding.inline_info.encoding, 151 depth_in_stack_map); 152 } else { 153 return *cur_quick_frame_; 154 } 155 } 156 return nullptr; 157 } 158 159 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const { 160 if (cur_shadow_frame_ != nullptr) { 161 return cur_shadow_frame_->GetDexPC(); 162 } else if (cur_quick_frame_ != nullptr) { 163 if (IsInInlinedFrame()) { 164 size_t depth_in_stack_map = current_inlining_depth_ - 1; 165 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); 166 CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding(); 167 return GetCurrentInlineInfo().GetDexPcAtDepth(encoding.inline_info.encoding, 168 depth_in_stack_map); 169 } else if (cur_oat_quick_method_header_ == nullptr) { 170 return DexFile::kDexNoIndex; 171 } else { 172 return cur_oat_quick_method_header_->ToDexPc( 173 GetMethod(), cur_quick_frame_pc_, abort_on_failure); 174 } 175 } else { 176 return 0; 177 } 178 } 179 180 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 181 REQUIRES_SHARED(Locks::mutator_lock_); 182 183 mirror::Object* StackVisitor::GetThisObject() const { 184 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); 185 ArtMethod* m = GetMethod(); 186 if (m->IsStatic()) { 187 return nullptr; 188 } else if (m->IsNative()) { 189 if (cur_quick_frame_ != nullptr) { 190 HandleScope* hs = reinterpret_cast<HandleScope*>( 191 reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*)); 192 return hs->GetReference(0); 193 } else { 194 return cur_shadow_frame_->GetVRegReference(0); 195 } 196 } else if (m->IsProxyMethod()) { 197 if (cur_quick_frame_ != nullptr) { 198 return artQuickGetProxyThisObject(cur_quick_frame_); 199 } else { 200 return cur_shadow_frame_->GetVRegReference(0); 201 } 202 } else { 203 const DexFile::CodeItem* code_item = m->GetCodeItem(); 204 if (code_item == nullptr) { 205 UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: " 206 << ArtMethod::PrettyMethod(m); 207 return nullptr; 208 } else { 209 uint16_t reg = code_item->registers_size_ - code_item->ins_size_; 210 uint32_t value = 0; 211 bool success = GetVReg(m, reg, kReferenceVReg, &value); 212 // We currently always guarantee the `this` object is live throughout the method. 213 CHECK(success) << "Failed to read the this object in " << ArtMethod::PrettyMethod(m); 214 return reinterpret_cast<mirror::Object*>(value); 215 } 216 } 217 } 218 219 size_t StackVisitor::GetNativePcOffset() const { 220 DCHECK(!IsShadowFrame()); 221 return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_); 222 } 223 224 bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg, 225 VRegKind kind, 226 uint32_t* val) const { 227 size_t frame_id = const_cast<StackVisitor*>(this)->GetFrameId(); 228 ShadowFrame* shadow_frame = thread_->FindDebuggerShadowFrame(frame_id); 229 if (shadow_frame != nullptr) { 230 bool* updated_vreg_flags = thread_->GetUpdatedVRegFlags(frame_id); 231 DCHECK(updated_vreg_flags != nullptr); 232 if (updated_vreg_flags[vreg]) { 233 // Value is set by the debugger. 234 if (kind == kReferenceVReg) { 235 *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>( 236 shadow_frame->GetVRegReference(vreg))); 237 } else { 238 *val = shadow_frame->GetVReg(vreg); 239 } 240 return true; 241 } 242 } 243 // No value is set by the debugger. 244 return false; 245 } 246 247 bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const { 248 if (cur_quick_frame_ != nullptr) { 249 DCHECK(context_ != nullptr); // You can't reliably read registers without a context. 250 DCHECK(m == GetMethod()); 251 // Check if there is value set by the debugger. 252 if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) { 253 return true; 254 } 255 DCHECK(cur_oat_quick_method_header_->IsOptimized()); 256 return GetVRegFromOptimizedCode(m, vreg, kind, val); 257 } else { 258 DCHECK(cur_shadow_frame_ != nullptr); 259 if (kind == kReferenceVReg) { 260 *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>( 261 cur_shadow_frame_->GetVRegReference(vreg))); 262 } else { 263 *val = cur_shadow_frame_->GetVReg(vreg); 264 } 265 return true; 266 } 267 } 268 269 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, 270 uint32_t* val) const { 271 DCHECK_EQ(m, GetMethod()); 272 const DexFile::CodeItem* code_item = m->GetCodeItem(); 273 DCHECK(code_item != nullptr) << m->PrettyMethod(); // Can't be null or how would we compile 274 // its instructions? 275 uint16_t number_of_dex_registers = code_item->registers_size_; 276 DCHECK_LT(vreg, code_item->registers_size_); 277 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); 278 CodeInfo code_info = method_header->GetOptimizedCodeInfo(); 279 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 280 281 uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_); 282 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 283 DCHECK(stack_map.IsValid()); 284 size_t depth_in_stack_map = current_inlining_depth_ - 1; 285 286 DexRegisterMap dex_register_map = IsInInlinedFrame() 287 ? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map, 288 code_info.GetInlineInfoOf(stack_map, encoding), 289 encoding, 290 number_of_dex_registers) 291 : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers); 292 293 if (!dex_register_map.IsValid()) { 294 return false; 295 } 296 DexRegisterLocation::Kind location_kind = 297 dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info, encoding); 298 switch (location_kind) { 299 case DexRegisterLocation::Kind::kInStack: { 300 const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg, 301 number_of_dex_registers, 302 code_info, 303 encoding); 304 const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset; 305 *val = *reinterpret_cast<const uint32_t*>(addr); 306 return true; 307 } 308 case DexRegisterLocation::Kind::kInRegister: 309 case DexRegisterLocation::Kind::kInRegisterHigh: 310 case DexRegisterLocation::Kind::kInFpuRegister: 311 case DexRegisterLocation::Kind::kInFpuRegisterHigh: { 312 uint32_t reg = 313 dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding); 314 return GetRegisterIfAccessible(reg, kind, val); 315 } 316 case DexRegisterLocation::Kind::kConstant: 317 *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info, encoding); 318 return true; 319 case DexRegisterLocation::Kind::kNone: 320 return false; 321 default: 322 LOG(FATAL) 323 << "Unexpected location kind " 324 << dex_register_map.GetLocationInternalKind(vreg, 325 number_of_dex_registers, 326 code_info, 327 encoding); 328 UNREACHABLE(); 329 } 330 } 331 332 bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const { 333 const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); 334 335 if (kRuntimeISA == InstructionSet::kX86 && is_float) { 336 // X86 float registers are 64-bit and each XMM register is provided as two separate 337 // 32-bit registers by the context. 338 reg = (kind == kDoubleHiVReg) ? (2 * reg + 1) : (2 * reg); 339 } 340 341 // MIPS32 float registers are used as 64-bit (for MIPS32r2 it is pair 342 // F(2n)-F(2n+1), and for MIPS32r6 it is 64-bit register F(2n)). When 343 // accessing upper 32-bits from double, reg + 1 should be used. 344 if ((kRuntimeISA == InstructionSet::kMips) && (kind == kDoubleHiVReg)) { 345 DCHECK_ALIGNED(reg, 2); 346 reg++; 347 } 348 349 if (!IsAccessibleRegister(reg, is_float)) { 350 return false; 351 } 352 uintptr_t ptr_val = GetRegister(reg, is_float); 353 const bool target64 = Is64BitInstructionSet(kRuntimeISA); 354 if (target64) { 355 const bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg); 356 const bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg); 357 int64_t value_long = static_cast<int64_t>(ptr_val); 358 if (wide_lo) { 359 ptr_val = static_cast<uintptr_t>(Low32Bits(value_long)); 360 } else if (wide_hi) { 361 ptr_val = static_cast<uintptr_t>(High32Bits(value_long)); 362 } 363 } 364 *val = ptr_val; 365 return true; 366 } 367 368 bool StackVisitor::GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, 369 VRegKind kind_lo, 370 VRegKind kind_hi, 371 uint64_t* val) const { 372 uint32_t low_32bits; 373 uint32_t high_32bits; 374 bool success = GetVRegFromDebuggerShadowFrame(vreg, kind_lo, &low_32bits); 375 success &= GetVRegFromDebuggerShadowFrame(vreg + 1, kind_hi, &high_32bits); 376 if (success) { 377 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits); 378 } 379 return success; 380 } 381 382 bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, 383 VRegKind kind_hi, uint64_t* val) const { 384 if (kind_lo == kLongLoVReg) { 385 DCHECK_EQ(kind_hi, kLongHiVReg); 386 } else if (kind_lo == kDoubleLoVReg) { 387 DCHECK_EQ(kind_hi, kDoubleHiVReg); 388 } else { 389 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi; 390 UNREACHABLE(); 391 } 392 // Check if there is value set by the debugger. 393 if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) { 394 return true; 395 } 396 if (cur_quick_frame_ != nullptr) { 397 DCHECK(context_ != nullptr); // You can't reliably read registers without a context. 398 DCHECK(m == GetMethod()); 399 DCHECK(cur_oat_quick_method_header_->IsOptimized()); 400 return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val); 401 } else { 402 DCHECK(cur_shadow_frame_ != nullptr); 403 *val = cur_shadow_frame_->GetVRegLong(vreg); 404 return true; 405 } 406 } 407 408 bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, 409 VRegKind kind_lo, VRegKind kind_hi, 410 uint64_t* val) const { 411 uint32_t low_32bits; 412 uint32_t high_32bits; 413 bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits); 414 success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits); 415 if (success) { 416 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits); 417 } 418 return success; 419 } 420 421 bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, 422 VRegKind kind_lo, uint64_t* val) const { 423 const bool is_float = (kind_lo == kDoubleLoVReg); 424 if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) { 425 return false; 426 } 427 uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float); 428 uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float); 429 bool target64 = Is64BitInstructionSet(kRuntimeISA); 430 if (target64) { 431 int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo); 432 int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi); 433 ptr_val_lo = static_cast<uintptr_t>(Low32Bits(value_long_lo)); 434 ptr_val_hi = static_cast<uintptr_t>(High32Bits(value_long_hi)); 435 } 436 *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo); 437 return true; 438 } 439 440 bool StackVisitor::SetVReg(ArtMethod* m, 441 uint16_t vreg, 442 uint32_t new_value, 443 VRegKind kind) { 444 const DexFile::CodeItem* code_item = m->GetCodeItem(); 445 if (code_item == nullptr) { 446 return false; 447 } 448 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 449 if (shadow_frame == nullptr) { 450 // This is a compiled frame: we must prepare and update a shadow frame that will 451 // be executed by the interpreter after deoptimization of the stack. 452 const size_t frame_id = GetFrameId(); 453 const uint16_t num_regs = code_item->registers_size_; 454 shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc()); 455 CHECK(shadow_frame != nullptr); 456 // Remember the vreg has been set for debugging and must not be overwritten by the 457 // original value during deoptimization of the stack. 458 thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true; 459 } 460 if (kind == kReferenceVReg) { 461 shadow_frame->SetVRegReference(vreg, reinterpret_cast<mirror::Object*>(new_value)); 462 } else { 463 shadow_frame->SetVReg(vreg, new_value); 464 } 465 return true; 466 } 467 468 bool StackVisitor::SetVRegPair(ArtMethod* m, 469 uint16_t vreg, 470 uint64_t new_value, 471 VRegKind kind_lo, 472 VRegKind kind_hi) { 473 if (kind_lo == kLongLoVReg) { 474 DCHECK_EQ(kind_hi, kLongHiVReg); 475 } else if (kind_lo == kDoubleLoVReg) { 476 DCHECK_EQ(kind_hi, kDoubleHiVReg); 477 } else { 478 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi; 479 UNREACHABLE(); 480 } 481 const DexFile::CodeItem* code_item = m->GetCodeItem(); 482 if (code_item == nullptr) { 483 return false; 484 } 485 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 486 if (shadow_frame == nullptr) { 487 // This is a compiled frame: we must prepare for deoptimization (see SetVRegFromDebugger). 488 const size_t frame_id = GetFrameId(); 489 const uint16_t num_regs = code_item->registers_size_; 490 shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc()); 491 CHECK(shadow_frame != nullptr); 492 // Remember the vreg pair has been set for debugging and must not be overwritten by the 493 // original value during deoptimization of the stack. 494 thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true; 495 thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true; 496 } 497 shadow_frame->SetVRegLong(vreg, new_value); 498 return true; 499 } 500 501 bool StackVisitor::IsAccessibleGPR(uint32_t reg) const { 502 DCHECK(context_ != nullptr); 503 return context_->IsAccessibleGPR(reg); 504 } 505 506 uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const { 507 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine"; 508 DCHECK(context_ != nullptr); 509 return context_->GetGPRAddress(reg); 510 } 511 512 uintptr_t StackVisitor::GetGPR(uint32_t reg) const { 513 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine"; 514 DCHECK(context_ != nullptr); 515 return context_->GetGPR(reg); 516 } 517 518 bool StackVisitor::IsAccessibleFPR(uint32_t reg) const { 519 DCHECK(context_ != nullptr); 520 return context_->IsAccessibleFPR(reg); 521 } 522 523 uintptr_t StackVisitor::GetFPR(uint32_t reg) const { 524 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine"; 525 DCHECK(context_ != nullptr); 526 return context_->GetFPR(reg); 527 } 528 529 uintptr_t StackVisitor::GetReturnPc() const { 530 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame()); 531 DCHECK(sp != nullptr); 532 uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset(); 533 return *reinterpret_cast<uintptr_t*>(pc_addr); 534 } 535 536 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) { 537 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame()); 538 CHECK(sp != nullptr); 539 uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset(); 540 *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc; 541 } 542 543 size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) { 544 struct NumFramesVisitor : public StackVisitor { 545 NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in) 546 : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {} 547 548 bool VisitFrame() OVERRIDE { 549 frames++; 550 return true; 551 } 552 553 size_t frames; 554 }; 555 NumFramesVisitor visitor(thread, walk_kind); 556 visitor.WalkStack(true); 557 return visitor.frames; 558 } 559 560 bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) { 561 struct HasMoreFramesVisitor : public StackVisitor { 562 HasMoreFramesVisitor(Thread* thread, 563 StackWalkKind walk_kind, 564 size_t num_frames, 565 size_t frame_height) 566 : StackVisitor(thread, nullptr, walk_kind, num_frames), 567 frame_height_(frame_height), 568 found_frame_(false), 569 has_more_frames_(false), 570 next_method_(nullptr), 571 next_dex_pc_(0) { 572 } 573 574 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 575 if (found_frame_) { 576 ArtMethod* method = GetMethod(); 577 if (method != nullptr && !method->IsRuntimeMethod()) { 578 has_more_frames_ = true; 579 next_method_ = method; 580 next_dex_pc_ = GetDexPc(); 581 return false; // End stack walk once next method is found. 582 } 583 } else if (GetFrameHeight() == frame_height_) { 584 found_frame_ = true; 585 } 586 return true; 587 } 588 589 size_t frame_height_; 590 bool found_frame_; 591 bool has_more_frames_; 592 ArtMethod* next_method_; 593 uint32_t next_dex_pc_; 594 }; 595 HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight()); 596 visitor.WalkStack(true); 597 *next_method = visitor.next_method_; 598 *next_dex_pc = visitor.next_dex_pc_; 599 return visitor.has_more_frames_; 600 } 601 602 void StackVisitor::DescribeStack(Thread* thread) { 603 struct DescribeStackVisitor : public StackVisitor { 604 explicit DescribeStackVisitor(Thread* thread_in) 605 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 606 607 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 608 LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation(); 609 return true; 610 } 611 }; 612 DescribeStackVisitor visitor(thread); 613 visitor.WalkStack(true); 614 } 615 616 std::string StackVisitor::DescribeLocation() const { 617 std::string result("Visiting method '"); 618 ArtMethod* m = GetMethod(); 619 if (m == nullptr) { 620 return "upcall"; 621 } 622 result += m->PrettyMethod(); 623 result += StringPrintf("' at dex PC 0x%04x", GetDexPc()); 624 if (!IsShadowFrame()) { 625 result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc())); 626 } 627 return result; 628 } 629 630 void StackVisitor::SetMethod(ArtMethod* method) { 631 DCHECK(GetMethod() != nullptr); 632 if (cur_shadow_frame_ != nullptr) { 633 cur_shadow_frame_->SetMethod(method); 634 } else { 635 DCHECK(cur_quick_frame_ != nullptr); 636 CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod!"; 637 *cur_quick_frame_ = method; 638 } 639 } 640 641 static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc) 642 REQUIRES_SHARED(Locks::mutator_lock_) { 643 if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) { 644 return; 645 } 646 647 if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) { 648 return; 649 } 650 651 Runtime* runtime = Runtime::Current(); 652 if (runtime->UseJitCompilation() && 653 runtime->GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(pc))) { 654 return; 655 } 656 657 const void* code = method->GetEntryPointFromQuickCompiledCode(); 658 if (code == GetQuickInstrumentationEntryPoint() || code == GetInvokeObsoleteMethodStub()) { 659 return; 660 } 661 662 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 663 if (class_linker->IsQuickToInterpreterBridge(code) || 664 class_linker->IsQuickResolutionStub(code)) { 665 return; 666 } 667 668 if (runtime->UseJitCompilation() && runtime->GetJit()->GetCodeCache()->ContainsPc(code)) { 669 return; 670 } 671 672 uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->GetCodeSize(); 673 uintptr_t code_start = reinterpret_cast<uintptr_t>(code); 674 CHECK(code_start <= pc && pc <= (code_start + code_size)) 675 << method->PrettyMethod() 676 << " pc=" << std::hex << pc 677 << " code_start=" << code_start 678 << " code_size=" << code_size; 679 } 680 681 void StackVisitor::SanityCheckFrame() const { 682 if (kIsDebugBuild) { 683 ArtMethod* method = GetMethod(); 684 auto* declaring_class = method->GetDeclaringClass(); 685 // Runtime methods have null declaring class. 686 if (!method->IsRuntimeMethod()) { 687 CHECK(declaring_class != nullptr); 688 CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass()) 689 << declaring_class; 690 } else { 691 CHECK(declaring_class == nullptr); 692 } 693 Runtime* const runtime = Runtime::Current(); 694 LinearAlloc* const linear_alloc = runtime->GetLinearAlloc(); 695 if (!linear_alloc->Contains(method)) { 696 // Check class linker linear allocs. 697 mirror::Class* klass = method->GetDeclaringClass(); 698 LinearAlloc* const class_linear_alloc = (klass != nullptr) 699 ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader()) 700 : linear_alloc; 701 if (!class_linear_alloc->Contains(method)) { 702 // Check image space. 703 bool in_image = false; 704 for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) { 705 if (space->IsImageSpace()) { 706 auto* image_space = space->AsImageSpace(); 707 const auto& header = image_space->GetImageHeader(); 708 const ImageSection& methods = header.GetMethodsSection(); 709 const ImageSection& runtime_methods = header.GetRuntimeMethodsSection(); 710 const size_t offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin(); 711 if (methods.Contains(offset) || runtime_methods.Contains(offset)) { 712 in_image = true; 713 break; 714 } 715 } 716 } 717 CHECK(in_image) << method->PrettyMethod() << " not in linear alloc or image"; 718 } 719 } 720 if (cur_quick_frame_ != nullptr) { 721 AssertPcIsWithinQuickCode(method, cur_quick_frame_pc_); 722 // Frame sanity. 723 size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes(); 724 CHECK_NE(frame_size, 0u); 725 // A rough guess at an upper size we expect to see for a frame. 726 // 256 registers 727 // 2 words HandleScope overhead 728 // 3+3 register spills 729 // TODO: this seems architecture specific for the case of JNI frames. 730 // TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong. 731 // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word); 732 const size_t kMaxExpectedFrameSize = 2 * KB; 733 CHECK_LE(frame_size, kMaxExpectedFrameSize) << method->PrettyMethod(); 734 size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset(); 735 CHECK_LT(return_pc_offset, frame_size); 736 } 737 } 738 } 739 740 // Counts the number of references in the parameter list of the corresponding method. 741 // Note: Thus does _not_ include "this" for non-static methods. 742 static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method) 743 REQUIRES_SHARED(Locks::mutator_lock_) { 744 uint32_t shorty_len; 745 const char* shorty = method->GetShorty(&shorty_len); 746 uint32_t refs = 0; 747 for (uint32_t i = 1; i < shorty_len ; ++i) { 748 if (shorty[i] == 'L') { 749 refs++; 750 } 751 } 752 return refs; 753 } 754 755 QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const { 756 if (cur_oat_quick_method_header_ != nullptr) { 757 return cur_oat_quick_method_header_->GetFrameInfo(); 758 } 759 760 ArtMethod* method = GetMethod(); 761 Runtime* runtime = Runtime::Current(); 762 763 if (method->IsAbstract()) { 764 return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs); 765 } 766 767 // This goes before IsProxyMethod since runtime methods have a null declaring class. 768 if (method->IsRuntimeMethod()) { 769 return runtime->GetRuntimeMethodFrameInfo(method); 770 } 771 772 if (method->IsProxyMethod()) { 773 // There is only one direct method of a proxy class: the constructor. A direct method is 774 // cloned from the original java.lang.reflect.Proxy and is executed as usual quick 775 // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader. 776 DCHECK(!method->IsDirect() && !method->IsConstructor()) 777 << "Constructors of proxy classes must have a OatQuickMethodHeader"; 778 return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs); 779 } 780 781 // The only remaining case is if the method is native and uses the generic JNI stub. 782 DCHECK(method->IsNative()); 783 ClassLinker* class_linker = runtime->GetClassLinker(); 784 const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method, 785 kRuntimePointerSize); 786 DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << method->PrettyMethod(); 787 // Generic JNI frame. 788 uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1; 789 size_t scope_size = HandleScope::SizeOf(handle_refs); 790 QuickMethodFrameInfo callee_info = 791 runtime->GetCalleeSaveMethodFrameInfo(Runtime::kSaveRefsAndArgs); 792 793 // Callee saves + handle scope + method ref + alignment 794 // Note: -sizeof(void*) since callee-save frame stores a whole method pointer. 795 size_t frame_size = RoundUp( 796 callee_info.FrameSizeInBytes() - sizeof(void*) + sizeof(ArtMethod*) + scope_size, 797 kStackAlignment); 798 return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask()); 799 } 800 801 template <StackVisitor::CountTransitions kCount> 802 void StackVisitor::WalkStack(bool include_transitions) { 803 if (check_suspended_) { 804 DCHECK(thread_ == Thread::Current() || thread_->IsSuspended()); 805 } 806 CHECK_EQ(cur_depth_, 0U); 807 bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled(); 808 uint32_t instrumentation_stack_depth = 0; 809 size_t inlined_frames_count = 0; 810 811 for (const ManagedStack* current_fragment = thread_->GetManagedStack(); 812 current_fragment != nullptr; current_fragment = current_fragment->GetLink()) { 813 cur_shadow_frame_ = current_fragment->GetTopShadowFrame(); 814 cur_quick_frame_ = current_fragment->GetTopQuickFrame(); 815 cur_quick_frame_pc_ = 0; 816 cur_oat_quick_method_header_ = nullptr; 817 818 if (cur_quick_frame_ != nullptr) { // Handle quick stack frames. 819 // Can't be both a shadow and a quick fragment. 820 DCHECK(current_fragment->GetTopShadowFrame() == nullptr); 821 ArtMethod* method = *cur_quick_frame_; 822 while (method != nullptr) { 823 cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_); 824 SanityCheckFrame(); 825 826 if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames) 827 && (cur_oat_quick_method_header_ != nullptr) 828 && cur_oat_quick_method_header_->IsOptimized()) { 829 CodeInfo code_info = cur_oat_quick_method_header_->GetOptimizedCodeInfo(); 830 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 831 uint32_t native_pc_offset = 832 cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_); 833 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 834 if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map.encoding)) { 835 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 836 DCHECK_EQ(current_inlining_depth_, 0u); 837 for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info.encoding); 838 current_inlining_depth_ != 0; 839 --current_inlining_depth_) { 840 bool should_continue = VisitFrame(); 841 if (UNLIKELY(!should_continue)) { 842 return; 843 } 844 cur_depth_++; 845 inlined_frames_count++; 846 } 847 } 848 } 849 850 bool should_continue = VisitFrame(); 851 if (UNLIKELY(!should_continue)) { 852 return; 853 } 854 855 QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo(); 856 if (context_ != nullptr) { 857 context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info); 858 } 859 // Compute PC for next stack frame from return PC. 860 size_t frame_size = frame_info.FrameSizeInBytes(); 861 size_t return_pc_offset = frame_size - sizeof(void*); 862 uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset; 863 uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr); 864 865 if (UNLIKELY(exit_stubs_installed)) { 866 // While profiling, the return pc is restored from the side stack, except when walking 867 // the stack for an exception where the side stack will be unwound in VisitFrame. 868 if (reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc) { 869 CHECK_LT(instrumentation_stack_depth, thread_->GetInstrumentationStack()->size()); 870 const instrumentation::InstrumentationStackFrame& instrumentation_frame = 871 thread_->GetInstrumentationStack()->at(instrumentation_stack_depth); 872 instrumentation_stack_depth++; 873 if (GetMethod() == 874 Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves)) { 875 // Skip runtime save all callee frames which are used to deliver exceptions. 876 } else if (instrumentation_frame.interpreter_entry_) { 877 ArtMethod* callee = 878 Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs); 879 CHECK_EQ(GetMethod(), callee) << "Expected: " << ArtMethod::PrettyMethod(callee) 880 << " Found: " << ArtMethod::PrettyMethod(GetMethod()); 881 } else { 882 // Instrumentation generally doesn't distinguish between a method's obsolete and 883 // non-obsolete version. 884 CHECK_EQ(instrumentation_frame.method_->GetNonObsoleteMethod(), 885 GetMethod()->GetNonObsoleteMethod()) 886 << "Expected: " 887 << ArtMethod::PrettyMethod(instrumentation_frame.method_->GetNonObsoleteMethod()) 888 << " Found: " << ArtMethod::PrettyMethod(GetMethod()->GetNonObsoleteMethod()); 889 } 890 if (num_frames_ != 0) { 891 // Check agreement of frame Ids only if num_frames_ is computed to avoid infinite 892 // recursion. 893 size_t frame_id = instrumentation::Instrumentation::ComputeFrameId( 894 thread_, 895 cur_depth_, 896 inlined_frames_count); 897 CHECK_EQ(instrumentation_frame.frame_id_, frame_id); 898 } 899 return_pc = instrumentation_frame.return_pc_; 900 } 901 } 902 903 cur_quick_frame_pc_ = return_pc; 904 uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size; 905 cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame); 906 907 if (kDebugStackWalk) { 908 LOG(INFO) << ArtMethod::PrettyMethod(method) << "@" << method << " size=" << frame_size 909 << std::boolalpha 910 << " optimized=" << (cur_oat_quick_method_header_ != nullptr && 911 cur_oat_quick_method_header_->IsOptimized()) 912 << " native=" << method->IsNative() 913 << std::noboolalpha 914 << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode() 915 << "," << (method->IsNative() ? method->GetEntryPointFromJni() : nullptr) 916 << " next=" << *cur_quick_frame_; 917 } 918 919 if (kCount == CountTransitions::kYes || !method->IsRuntimeMethod()) { 920 cur_depth_++; 921 } 922 method = *cur_quick_frame_; 923 } 924 } else if (cur_shadow_frame_ != nullptr) { 925 do { 926 SanityCheckFrame(); 927 bool should_continue = VisitFrame(); 928 if (UNLIKELY(!should_continue)) { 929 return; 930 } 931 cur_depth_++; 932 cur_shadow_frame_ = cur_shadow_frame_->GetLink(); 933 } while (cur_shadow_frame_ != nullptr); 934 } 935 if (include_transitions) { 936 bool should_continue = VisitFrame(); 937 if (!should_continue) { 938 return; 939 } 940 } 941 if (kCount == CountTransitions::kYes) { 942 cur_depth_++; 943 } 944 } 945 if (num_frames_ != 0) { 946 CHECK_EQ(cur_depth_, num_frames_); 947 } 948 } 949 950 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kYes>(bool); 951 template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kNo>(bool); 952 953 void JavaFrameRootInfo::Describe(std::ostream& os) const { 954 const StackVisitor* visitor = stack_visitor_; 955 CHECK(visitor != nullptr); 956 os << "Type=" << GetType() << " thread_id=" << GetThreadId() << " location=" << 957 visitor->DescribeLocation() << " vreg=" << vreg_; 958 } 959 960 int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, 961 uint32_t core_spills, uint32_t fp_spills, 962 size_t frame_size, int reg, InstructionSet isa) { 963 PointerSize pointer_size = InstructionSetPointerSize(isa); 964 if (kIsDebugBuild) { 965 auto* runtime = Runtime::Current(); 966 if (runtime != nullptr) { 967 CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size); 968 } 969 } 970 DCHECK_ALIGNED(frame_size, kStackAlignment); 971 DCHECK_NE(reg, -1); 972 int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa) 973 + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa) 974 + sizeof(uint32_t); // Filler. 975 int num_regs = code_item->registers_size_ - code_item->ins_size_; 976 int temp_threshold = code_item->registers_size_; 977 const int max_num_special_temps = 1; 978 if (reg == temp_threshold) { 979 // The current method pointer corresponds to special location on stack. 980 return 0; 981 } else if (reg >= temp_threshold + max_num_special_temps) { 982 /* 983 * Special temporaries may have custom locations and the logic above deals with that. 984 * However, non-special temporaries are placed relative to the outs. 985 */ 986 int temps_start = code_item->outs_size_ * sizeof(uint32_t) 987 + static_cast<size_t>(pointer_size) /* art method */; 988 int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t); 989 return temps_start + relative_offset; 990 } else if (reg < num_regs) { 991 int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t); 992 return locals_start + (reg * sizeof(uint32_t)); 993 } else { 994 // Handle ins. 995 return frame_size + ((reg - num_regs) * sizeof(uint32_t)) 996 + static_cast<size_t>(pointer_size) /* art method */; 997 } 998 } 999 1000 void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) { 1001 if (obj == nullptr) { 1002 return; 1003 } 1004 1005 // If there's an error during enter, we won't have locked the monitor. So check there's no 1006 // exception. 1007 if (self->IsExceptionPending()) { 1008 return; 1009 } 1010 1011 if (monitors_ == nullptr) { 1012 monitors_.reset(new std::vector<mirror::Object*>()); 1013 } 1014 monitors_->push_back(obj); 1015 } 1016 1017 void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) { 1018 if (obj == nullptr) { 1019 return; 1020 } 1021 bool found_object = false; 1022 if (monitors_ != nullptr) { 1023 // We need to remove one pointer to ref, as duplicates are used for counting recursive locks. 1024 // We arbitrarily choose the first one. 1025 auto it = std::find(monitors_->begin(), monitors_->end(), obj); 1026 if (it != monitors_->end()) { 1027 monitors_->erase(it); 1028 found_object = true; 1029 } 1030 } 1031 if (!found_object) { 1032 // The object wasn't found. Time for an IllegalMonitorStateException. 1033 // The order here isn't fully clear. Assume that any other pending exception is swallowed. 1034 // TODO: Maybe make already pending exception a suppressed exception. 1035 self->ClearException(); 1036 self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;", 1037 "did not lock monitor on object of type '%s' before unlocking", 1038 const_cast<mirror::Object*>(obj)->PrettyTypeOf().c_str()); 1039 } 1040 } 1041 1042 // Helper to unlock a monitor. Must be NO_THREAD_SAFETY_ANALYSIS, as we can't statically show 1043 // that the object was locked. 1044 void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS { 1045 DCHECK(self != nullptr); 1046 DCHECK(obj != nullptr); 1047 obj->MonitorExit(self); 1048 } 1049 1050 bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) { 1051 DCHECK(self != nullptr); 1052 if (monitors_ != nullptr) { 1053 if (!monitors_->empty()) { 1054 // There may be an exception pending, if the method is terminating abruptly. Clear it. 1055 // TODO: Should we add this as a suppressed exception? 1056 self->ClearException(); 1057 1058 // OK, there are monitors that are still locked. To enforce structured locking (and avoid 1059 // deadlocks) we unlock all of them before we raise the IllegalMonitorState exception. 1060 for (mirror::Object* obj : *monitors_) { 1061 MonitorExitHelper(self, obj); 1062 // If this raised an exception, ignore. TODO: Should we add this as suppressed 1063 // exceptions? 1064 if (self->IsExceptionPending()) { 1065 self->ClearException(); 1066 } 1067 } 1068 // Raise an exception, just give the first object as the sample. 1069 mirror::Object* first = (*monitors_)[0]; 1070 self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;", 1071 "did not unlock monitor on object of type '%s'", 1072 mirror::Object::PrettyTypeOf(first).c_str()); 1073 1074 // To make sure this path is not triggered again, clean out the monitors. 1075 monitors_->clear(); 1076 1077 return false; 1078 } 1079 } 1080 return true; 1081 } 1082 1083 } // namespace art 1084