1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "trace.h" 18 19 #include <sys/uio.h> 20 21 #include "base/stl_util.h" 22 #include "base/unix_file/fd_file.h" 23 #include "class_linker.h" 24 #include "common_throws.h" 25 #include "debugger.h" 26 #include "dex_file-inl.h" 27 #include "instrumentation.h" 28 #include "mirror/art_method-inl.h" 29 #include "mirror/class-inl.h" 30 #include "mirror/dex_cache.h" 31 #include "mirror/object_array-inl.h" 32 #include "mirror/object-inl.h" 33 #include "object_utils.h" 34 #include "os.h" 35 #include "scoped_thread_state_change.h" 36 #include "ScopedLocalRef.h" 37 #include "thread.h" 38 #include "thread_list.h" 39 #if !defined(ART_USE_PORTABLE_COMPILER) 40 #include "entrypoints/quick/quick_entrypoints.h" 41 #endif 42 43 namespace art { 44 45 // File format: 46 // header 47 // record 0 48 // record 1 49 // ... 50 // 51 // Header format: 52 // u4 magic ('SLOW') 53 // u2 version 54 // u2 offset to data 55 // u8 start date/time in usec 56 // u2 record size in bytes (version >= 2 only) 57 // ... padding to 32 bytes 58 // 59 // Record format v1: 60 // u1 thread ID 61 // u4 method ID | method action 62 // u4 time delta since start, in usec 63 // 64 // Record format v2: 65 // u2 thread ID 66 // u4 method ID | method action 67 // u4 time delta since start, in usec 68 // 69 // Record format v3: 70 // u2 thread ID 71 // u4 method ID | method action 72 // u4 time delta since start, in usec 73 // u4 wall time since start, in usec (when clock == "dual" only) 74 // 75 // 32 bits of microseconds is 70 minutes. 76 // 77 // All values are stored in little-endian order. 78 79 enum TraceAction { 80 kTraceMethodEnter = 0x00, // method entry 81 kTraceMethodExit = 0x01, // method exit 82 kTraceUnroll = 0x02, // method exited by exception unrolling 83 // 0x03 currently unused 84 kTraceMethodActionMask = 0x03, // two bits 85 }; 86 87 class BuildStackTraceVisitor : public StackVisitor { 88 public: 89 explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, NULL), 90 method_trace_(Trace::AllocStackTrace()) {} 91 92 bool VisitFrame() { 93 mirror::ArtMethod* m = GetMethod(); 94 // Ignore runtime frames (in particular callee save). 95 if (!m->IsRuntimeMethod()) { 96 method_trace_->push_back(m); 97 } 98 return true; 99 } 100 101 // Returns a stack trace where the topmost frame corresponds with the first element of the vector. 102 std::vector<mirror::ArtMethod*>* GetStackTrace() const { 103 return method_trace_; 104 } 105 106 private: 107 std::vector<mirror::ArtMethod*>* const method_trace_; 108 }; 109 110 static const char kTraceTokenChar = '*'; 111 static const uint16_t kTraceHeaderLength = 32; 112 static const uint32_t kTraceMagicValue = 0x574f4c53; 113 static const uint16_t kTraceVersionSingleClock = 2; 114 static const uint16_t kTraceVersionDualClock = 3; 115 static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2 116 static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps 117 118 #if defined(HAVE_POSIX_CLOCKS) 119 ProfilerClockSource Trace::default_clock_source_ = kProfilerClockSourceDual; 120 #else 121 ProfilerClockSource Trace::default_clock_source_ = kProfilerClockSourceWall; 122 #endif 123 124 Trace* volatile Trace::the_trace_ = NULL; 125 pthread_t Trace::sampling_pthread_ = 0U; 126 UniquePtr<std::vector<mirror::ArtMethod*> > Trace::temp_stack_trace_; 127 128 static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) { 129 return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask); 130 } 131 132 static TraceAction DecodeTraceAction(uint32_t tmid) { 133 return static_cast<TraceAction>(tmid & kTraceMethodActionMask); 134 } 135 136 static uint32_t EncodeTraceMethodAndAction(const mirror::ArtMethod* method, 137 TraceAction action) { 138 uint32_t tmid = reinterpret_cast<uint32_t>(method) | action; 139 DCHECK_EQ(method, DecodeTraceMethodId(tmid)); 140 return tmid; 141 } 142 143 std::vector<mirror::ArtMethod*>* Trace::AllocStackTrace() { 144 if (temp_stack_trace_.get() != NULL) { 145 return temp_stack_trace_.release(); 146 } else { 147 return new std::vector<mirror::ArtMethod*>(); 148 } 149 } 150 151 void Trace::FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace) { 152 stack_trace->clear(); 153 temp_stack_trace_.reset(stack_trace); 154 } 155 156 void Trace::SetDefaultClockSource(ProfilerClockSource clock_source) { 157 #if defined(HAVE_POSIX_CLOCKS) 158 default_clock_source_ = clock_source; 159 #else 160 if (clock_source != kProfilerClockSourceWall) { 161 LOG(WARNING) << "Ignoring tracing request to use CPU time."; 162 } 163 #endif 164 } 165 166 static uint16_t GetTraceVersion(ProfilerClockSource clock_source) { 167 return (clock_source == kProfilerClockSourceDual) ? kTraceVersionDualClock 168 : kTraceVersionSingleClock; 169 } 170 171 static uint16_t GetRecordSize(ProfilerClockSource clock_source) { 172 return (clock_source == kProfilerClockSourceDual) ? kTraceRecordSizeDualClock 173 : kTraceRecordSizeSingleClock; 174 } 175 176 bool Trace::UseThreadCpuClock() { 177 return (clock_source_ == kProfilerClockSourceThreadCpu) || 178 (clock_source_ == kProfilerClockSourceDual); 179 } 180 181 bool Trace::UseWallClock() { 182 return (clock_source_ == kProfilerClockSourceWall) || 183 (clock_source_ == kProfilerClockSourceDual); 184 } 185 186 static void MeasureClockOverhead(Trace* trace) { 187 if (trace->UseThreadCpuClock()) { 188 Thread::Current()->GetCpuMicroTime(); 189 } 190 if (trace->UseWallClock()) { 191 MicroTime(); 192 } 193 } 194 195 // Compute an average time taken to measure clocks. 196 static uint32_t GetClockOverheadNanoSeconds(Trace* trace) { 197 Thread* self = Thread::Current(); 198 uint64_t start = self->GetCpuMicroTime(); 199 200 for (int i = 4000; i > 0; i--) { 201 MeasureClockOverhead(trace); 202 MeasureClockOverhead(trace); 203 MeasureClockOverhead(trace); 204 MeasureClockOverhead(trace); 205 MeasureClockOverhead(trace); 206 MeasureClockOverhead(trace); 207 MeasureClockOverhead(trace); 208 MeasureClockOverhead(trace); 209 } 210 211 uint64_t elapsed_us = self->GetCpuMicroTime() - start; 212 return static_cast<uint32_t>(elapsed_us / 32); 213 } 214 215 // TODO: put this somewhere with the big-endian equivalent used by JDWP. 216 static void Append2LE(uint8_t* buf, uint16_t val) { 217 *buf++ = static_cast<uint8_t>(val); 218 *buf++ = static_cast<uint8_t>(val >> 8); 219 } 220 221 // TODO: put this somewhere with the big-endian equivalent used by JDWP. 222 static void Append4LE(uint8_t* buf, uint32_t val) { 223 *buf++ = static_cast<uint8_t>(val); 224 *buf++ = static_cast<uint8_t>(val >> 8); 225 *buf++ = static_cast<uint8_t>(val >> 16); 226 *buf++ = static_cast<uint8_t>(val >> 24); 227 } 228 229 // TODO: put this somewhere with the big-endian equivalent used by JDWP. 230 static void Append8LE(uint8_t* buf, uint64_t val) { 231 *buf++ = static_cast<uint8_t>(val); 232 *buf++ = static_cast<uint8_t>(val >> 8); 233 *buf++ = static_cast<uint8_t>(val >> 16); 234 *buf++ = static_cast<uint8_t>(val >> 24); 235 *buf++ = static_cast<uint8_t>(val >> 32); 236 *buf++ = static_cast<uint8_t>(val >> 40); 237 *buf++ = static_cast<uint8_t>(val >> 48); 238 *buf++ = static_cast<uint8_t>(val >> 56); 239 } 240 241 static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 242 BuildStackTraceVisitor build_trace_visitor(thread); 243 build_trace_visitor.WalkStack(); 244 std::vector<mirror::ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); 245 Trace* the_trace = reinterpret_cast<Trace*>(arg); 246 the_trace->CompareAndUpdateStackTrace(thread, stack_trace); 247 } 248 249 static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg) { 250 thread->SetTraceClockBase(0); 251 std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample(); 252 thread->SetStackTraceSample(NULL); 253 delete stack_trace; 254 } 255 256 void Trace::CompareAndUpdateStackTrace(Thread* thread, 257 std::vector<mirror::ArtMethod*>* stack_trace) { 258 CHECK_EQ(pthread_self(), sampling_pthread_); 259 std::vector<mirror::ArtMethod*>* old_stack_trace = thread->GetStackTraceSample(); 260 // Update the thread's stack trace sample. 261 thread->SetStackTraceSample(stack_trace); 262 // Read timer clocks to use for all events in this trace. 263 uint32_t thread_clock_diff = 0; 264 uint32_t wall_clock_diff = 0; 265 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 266 if (old_stack_trace == NULL) { 267 // If there's no previous stack trace sample for this thread, log an entry event for all 268 // methods in the trace. 269 for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 270 rit != stack_trace->rend(); ++rit) { 271 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 272 thread_clock_diff, wall_clock_diff); 273 } 274 } else { 275 // If there's a previous stack trace for this thread, diff the traces and emit entry and exit 276 // events accordingly. 277 std::vector<mirror::ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin(); 278 std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 279 // Iterate bottom-up over both traces until there's a difference between them. 280 while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) { 281 old_rit++; 282 rit++; 283 } 284 // Iterate top-down over the old trace until the point where they differ, emitting exit events. 285 for (std::vector<mirror::ArtMethod*>::iterator old_it = old_stack_trace->begin(); 286 old_it != old_rit.base(); ++old_it) { 287 LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited, 288 thread_clock_diff, wall_clock_diff); 289 } 290 // Iterate bottom-up over the new trace from the point where they differ, emitting entry events. 291 for (; rit != stack_trace->rend(); ++rit) { 292 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 293 thread_clock_diff, wall_clock_diff); 294 } 295 FreeStackTrace(old_stack_trace); 296 } 297 } 298 299 void* Trace::RunSamplingThread(void* arg) { 300 Runtime* runtime = Runtime::Current(); 301 int interval_us = reinterpret_cast<int>(arg); 302 CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(), 303 !runtime->IsCompiler())); 304 305 while (true) { 306 usleep(interval_us); 307 ATRACE_BEGIN("Profile sampling"); 308 Thread* self = Thread::Current(); 309 Trace* the_trace; 310 { 311 MutexLock mu(self, *Locks::trace_lock_); 312 the_trace = the_trace_; 313 if (the_trace == NULL) { 314 break; 315 } 316 } 317 318 runtime->GetThreadList()->SuspendAll(); 319 { 320 MutexLock mu(self, *Locks::thread_list_lock_); 321 runtime->GetThreadList()->ForEach(GetSample, the_trace); 322 } 323 runtime->GetThreadList()->ResumeAll(); 324 ATRACE_END(); 325 } 326 327 runtime->DetachCurrentThread(); 328 return NULL; 329 } 330 331 void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, 332 bool direct_to_ddms, bool sampling_enabled, int interval_us) { 333 Thread* self = Thread::Current(); 334 { 335 MutexLock mu(self, *Locks::trace_lock_); 336 if (the_trace_ != NULL) { 337 LOG(ERROR) << "Trace already in progress, ignoring this request"; 338 return; 339 } 340 } 341 Runtime* runtime = Runtime::Current(); 342 runtime->GetThreadList()->SuspendAll(); 343 344 // Open trace file if not going directly to ddms. 345 UniquePtr<File> trace_file; 346 if (!direct_to_ddms) { 347 if (trace_fd < 0) { 348 trace_file.reset(OS::CreateEmptyFile(trace_filename)); 349 } else { 350 trace_file.reset(new File(trace_fd, "tracefile")); 351 trace_file->DisableAutoClose(); 352 } 353 if (trace_file.get() == NULL) { 354 PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'"; 355 runtime->GetThreadList()->ResumeAll(); 356 ScopedObjectAccess soa(self); 357 ThrowRuntimeException("Unable to open trace file '%s'", trace_filename); 358 return; 359 } 360 } 361 362 // Create Trace object. 363 { 364 MutexLock mu(self, *Locks::trace_lock_); 365 if (the_trace_ != NULL) { 366 LOG(ERROR) << "Trace already in progress, ignoring this request"; 367 } else { 368 the_trace_ = new Trace(trace_file.release(), buffer_size, flags, sampling_enabled); 369 370 // Enable count of allocs if specified in the flags. 371 if ((flags && kTraceCountAllocs) != 0) { 372 runtime->SetStatsEnabled(true); 373 } 374 375 376 377 if (sampling_enabled) { 378 CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread, 379 reinterpret_cast<void*>(interval_us)), 380 "Sampling profiler thread"); 381 } else { 382 runtime->GetInstrumentation()->AddListener(the_trace_, 383 instrumentation::Instrumentation::kMethodEntered | 384 instrumentation::Instrumentation::kMethodExited | 385 instrumentation::Instrumentation::kMethodUnwind); 386 } 387 } 388 } 389 runtime->GetThreadList()->ResumeAll(); 390 } 391 392 void Trace::Stop() { 393 Runtime* runtime = Runtime::Current(); 394 runtime->GetThreadList()->SuspendAll(); 395 Trace* the_trace = NULL; 396 pthread_t sampling_pthread = 0U; 397 { 398 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 399 if (the_trace_ == NULL) { 400 LOG(ERROR) << "Trace stop requested, but no trace currently running"; 401 } else { 402 the_trace = the_trace_; 403 the_trace_ = NULL; 404 sampling_pthread = sampling_pthread_; 405 sampling_pthread_ = 0U; 406 } 407 } 408 if (the_trace != NULL) { 409 the_trace->FinishTracing(); 410 411 if (the_trace->sampling_enabled_) { 412 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 413 runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, NULL); 414 } else { 415 runtime->GetInstrumentation()->RemoveListener(the_trace, 416 instrumentation::Instrumentation::kMethodEntered | 417 instrumentation::Instrumentation::kMethodExited | 418 instrumentation::Instrumentation::kMethodUnwind); 419 } 420 delete the_trace; 421 } 422 runtime->GetThreadList()->ResumeAll(); 423 424 if (sampling_pthread != 0U) { 425 CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown"); 426 } 427 } 428 429 void Trace::Shutdown() { 430 if (GetMethodTracingMode() != kTracingInactive) { 431 Stop(); 432 } 433 } 434 435 TracingMode Trace::GetMethodTracingMode() { 436 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 437 if (the_trace_ == NULL) { 438 return kTracingInactive; 439 } else if (the_trace_->sampling_enabled_) { 440 return kSampleProfilingActive; 441 } else { 442 return kMethodTracingActive; 443 } 444 } 445 446 Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled) 447 : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags), 448 sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_), 449 buffer_size_(buffer_size), start_time_(MicroTime()), cur_offset_(0), overflow_(false) { 450 // Set up the beginning of the trace. 451 uint16_t trace_version = GetTraceVersion(clock_source_); 452 memset(buf_.get(), 0, kTraceHeaderLength); 453 Append4LE(buf_.get(), kTraceMagicValue); 454 Append2LE(buf_.get() + 4, trace_version); 455 Append2LE(buf_.get() + 6, kTraceHeaderLength); 456 Append8LE(buf_.get() + 8, start_time_); 457 if (trace_version >= kTraceVersionDualClock) { 458 uint16_t record_size = GetRecordSize(clock_source_); 459 Append2LE(buf_.get() + 16, record_size); 460 } 461 462 // Update current offset. 463 cur_offset_ = kTraceHeaderLength; 464 } 465 466 static void DumpBuf(uint8_t* buf, size_t buf_size, ProfilerClockSource clock_source) 467 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 468 uint8_t* ptr = buf + kTraceHeaderLength; 469 uint8_t* end = buf + buf_size; 470 471 while (ptr < end) { 472 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 473 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 474 TraceAction action = DecodeTraceAction(tmid); 475 LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action); 476 ptr += GetRecordSize(clock_source); 477 } 478 } 479 480 void Trace::FinishTracing() { 481 // Compute elapsed time. 482 uint64_t elapsed = MicroTime() - start_time_; 483 484 size_t final_offset = cur_offset_; 485 uint32_t clock_overhead_ns = GetClockOverheadNanoSeconds(this); 486 487 if ((flags_ & kTraceCountAllocs) != 0) { 488 Runtime::Current()->SetStatsEnabled(false); 489 } 490 491 std::set<mirror::ArtMethod*> visited_methods; 492 GetVisitedMethods(final_offset, &visited_methods); 493 494 std::ostringstream os; 495 496 os << StringPrintf("%cversion\n", kTraceTokenChar); 497 os << StringPrintf("%d\n", GetTraceVersion(clock_source_)); 498 os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false"); 499 if (UseThreadCpuClock()) { 500 if (UseWallClock()) { 501 os << StringPrintf("clock=dual\n"); 502 } else { 503 os << StringPrintf("clock=thread-cpu\n"); 504 } 505 } else { 506 os << StringPrintf("clock=wall\n"); 507 } 508 os << StringPrintf("elapsed-time-usec=%llu\n", elapsed); 509 size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_); 510 os << StringPrintf("num-method-calls=%zd\n", num_records); 511 os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns); 512 os << StringPrintf("vm=art\n"); 513 if ((flags_ & kTraceCountAllocs) != 0) { 514 os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS)); 515 os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES)); 516 os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS)); 517 } 518 os << StringPrintf("%cthreads\n", kTraceTokenChar); 519 DumpThreadList(os); 520 os << StringPrintf("%cmethods\n", kTraceTokenChar); 521 DumpMethodList(os, visited_methods); 522 os << StringPrintf("%cend\n", kTraceTokenChar); 523 524 std::string header(os.str()); 525 if (trace_file_.get() == NULL) { 526 iovec iov[2]; 527 iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str())); 528 iov[0].iov_len = header.length(); 529 iov[1].iov_base = buf_.get(); 530 iov[1].iov_len = final_offset; 531 Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2); 532 const bool kDumpTraceInfo = false; 533 if (kDumpTraceInfo) { 534 LOG(INFO) << "Trace sent:\n" << header; 535 DumpBuf(buf_.get(), final_offset, clock_source_); 536 } 537 } else { 538 if (!trace_file_->WriteFully(header.c_str(), header.length()) || 539 !trace_file_->WriteFully(buf_.get(), final_offset)) { 540 std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno))); 541 PLOG(ERROR) << detail; 542 ThrowRuntimeException("%s", detail.c_str()); 543 } 544 } 545 } 546 547 void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, 548 const mirror::ArtMethod* method, uint32_t new_dex_pc) { 549 // We're not recorded to listen to this kind of event, so complain. 550 LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc; 551 }; 552 553 void Trace::MethodEntered(Thread* thread, mirror::Object* this_object, 554 const mirror::ArtMethod* method, uint32_t dex_pc) { 555 uint32_t thread_clock_diff = 0; 556 uint32_t wall_clock_diff = 0; 557 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 558 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered, 559 thread_clock_diff, wall_clock_diff); 560 } 561 562 void Trace::MethodExited(Thread* thread, mirror::Object* this_object, 563 const mirror::ArtMethod* method, uint32_t dex_pc, 564 const JValue& return_value) { 565 UNUSED(return_value); 566 uint32_t thread_clock_diff = 0; 567 uint32_t wall_clock_diff = 0; 568 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 569 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited, 570 thread_clock_diff, wall_clock_diff); 571 } 572 573 void Trace::MethodUnwind(Thread* thread, const mirror::ArtMethod* method, uint32_t dex_pc) { 574 uint32_t thread_clock_diff = 0; 575 uint32_t wall_clock_diff = 0; 576 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 577 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind, 578 thread_clock_diff, wall_clock_diff); 579 } 580 581 void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, 582 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc, 583 mirror::Throwable* exception_object) 584 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 585 LOG(ERROR) << "Unexpected exception caught event in tracing"; 586 } 587 588 void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) { 589 if (UseThreadCpuClock()) { 590 uint64_t clock_base = thread->GetTraceClockBase(); 591 if (UNLIKELY(clock_base == 0)) { 592 // First event, record the base time in the map. 593 uint64_t time = thread->GetCpuMicroTime(); 594 thread->SetTraceClockBase(time); 595 } else { 596 *thread_clock_diff = thread->GetCpuMicroTime() - clock_base; 597 } 598 } 599 if (UseWallClock()) { 600 *wall_clock_diff = MicroTime() - start_time_; 601 } 602 } 603 604 void Trace::LogMethodTraceEvent(Thread* thread, const mirror::ArtMethod* method, 605 instrumentation::Instrumentation::InstrumentationEvent event, 606 uint32_t thread_clock_diff, uint32_t wall_clock_diff) { 607 // Advance cur_offset_ atomically. 608 int32_t new_offset; 609 int32_t old_offset; 610 do { 611 old_offset = cur_offset_; 612 new_offset = old_offset + GetRecordSize(clock_source_); 613 if (new_offset > buffer_size_) { 614 overflow_ = true; 615 return; 616 } 617 } while (android_atomic_release_cas(old_offset, new_offset, &cur_offset_) != 0); 618 619 TraceAction action = kTraceMethodEnter; 620 switch (event) { 621 case instrumentation::Instrumentation::kMethodEntered: 622 action = kTraceMethodEnter; 623 break; 624 case instrumentation::Instrumentation::kMethodExited: 625 action = kTraceMethodExit; 626 break; 627 case instrumentation::Instrumentation::kMethodUnwind: 628 action = kTraceUnroll; 629 break; 630 default: 631 UNIMPLEMENTED(FATAL) << "Unexpected event: " << event; 632 } 633 634 uint32_t method_value = EncodeTraceMethodAndAction(method, action); 635 636 // Write data 637 uint8_t* ptr = buf_.get() + old_offset; 638 Append2LE(ptr, thread->GetTid()); 639 Append4LE(ptr + 2, method_value); 640 ptr += 6; 641 642 if (UseThreadCpuClock()) { 643 Append4LE(ptr, thread_clock_diff); 644 ptr += 4; 645 } 646 if (UseWallClock()) { 647 Append4LE(ptr, wall_clock_diff); 648 } 649 } 650 651 void Trace::GetVisitedMethods(size_t buf_size, 652 std::set<mirror::ArtMethod*>* visited_methods) { 653 uint8_t* ptr = buf_.get() + kTraceHeaderLength; 654 uint8_t* end = buf_.get() + buf_size; 655 656 while (ptr < end) { 657 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 658 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 659 visited_methods->insert(method); 660 ptr += GetRecordSize(clock_source_); 661 } 662 } 663 664 void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) { 665 MethodHelper mh; 666 for (const auto& method : visited_methods) { 667 mh.ChangeMethod(method); 668 os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method, 669 PrettyDescriptor(mh.GetDeclaringClassDescriptor()).c_str(), mh.GetName(), 670 mh.GetSignature().c_str(), mh.GetDeclaringClassSourceFile()); 671 } 672 } 673 674 static void DumpThread(Thread* t, void* arg) { 675 std::ostream& os = *reinterpret_cast<std::ostream*>(arg); 676 std::string name; 677 t->GetThreadName(name); 678 os << t->GetTid() << "\t" << name << "\n"; 679 } 680 681 void Trace::DumpThreadList(std::ostream& os) { 682 Thread* self = Thread::Current(); 683 Locks::thread_list_lock_->AssertNotHeld(self); 684 MutexLock mu(self, *Locks::thread_list_lock_); 685 Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os); 686 } 687 688 } // namespace art 689