1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "trace.h" 18 19 #include <sys/uio.h> 20 21 #include "base/stl_util.h" 22 #include "base/unix_file/fd_file.h" 23 #include "class_linker.h" 24 #include "common_throws.h" 25 #include "debugger.h" 26 #include "dex_file-inl.h" 27 #include "instrumentation.h" 28 #include "mirror/art_method-inl.h" 29 #include "mirror/class-inl.h" 30 #include "mirror/dex_cache.h" 31 #include "mirror/object_array-inl.h" 32 #include "mirror/object-inl.h" 33 #include "os.h" 34 #include "scoped_thread_state_change.h" 35 #include "ScopedLocalRef.h" 36 #include "thread.h" 37 #include "thread_list.h" 38 #if !defined(ART_USE_PORTABLE_COMPILER) 39 #include "entrypoints/quick/quick_entrypoints.h" 40 #endif 41 42 namespace art { 43 44 // File format: 45 // header 46 // record 0 47 // record 1 48 // ... 49 // 50 // Header format: 51 // u4 magic ('SLOW') 52 // u2 version 53 // u2 offset to data 54 // u8 start date/time in usec 55 // u2 record size in bytes (version >= 2 only) 56 // ... padding to 32 bytes 57 // 58 // Record format v1: 59 // u1 thread ID 60 // u4 method ID | method action 61 // u4 time delta since start, in usec 62 // 63 // Record format v2: 64 // u2 thread ID 65 // u4 method ID | method action 66 // u4 time delta since start, in usec 67 // 68 // Record format v3: 69 // u2 thread ID 70 // u4 method ID | method action 71 // u4 time delta since start, in usec 72 // u4 wall time since start, in usec (when clock == "dual" only) 73 // 74 // 32 bits of microseconds is 70 minutes. 75 // 76 // All values are stored in little-endian order. 77 78 enum TraceAction { 79 kTraceMethodEnter = 0x00, // method entry 80 kTraceMethodExit = 0x01, // method exit 81 kTraceUnroll = 0x02, // method exited by exception unrolling 82 // 0x03 currently unused 83 kTraceMethodActionMask = 0x03, // two bits 84 }; 85 86 class BuildStackTraceVisitor : public StackVisitor { 87 public: 88 explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, NULL), 89 method_trace_(Trace::AllocStackTrace()) {} 90 91 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 92 mirror::ArtMethod* m = GetMethod(); 93 // Ignore runtime frames (in particular callee save). 94 if (!m->IsRuntimeMethod()) { 95 method_trace_->push_back(m); 96 } 97 return true; 98 } 99 100 // Returns a stack trace where the topmost frame corresponds with the first element of the vector. 101 std::vector<mirror::ArtMethod*>* GetStackTrace() const { 102 return method_trace_; 103 } 104 105 private: 106 std::vector<mirror::ArtMethod*>* const method_trace_; 107 }; 108 109 static const char kTraceTokenChar = '*'; 110 static const uint16_t kTraceHeaderLength = 32; 111 static const uint32_t kTraceMagicValue = 0x574f4c53; 112 static const uint16_t kTraceVersionSingleClock = 2; 113 static const uint16_t kTraceVersionDualClock = 3; 114 static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2 115 static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps 116 117 TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource; 118 119 Trace* volatile Trace::the_trace_ = NULL; 120 pthread_t Trace::sampling_pthread_ = 0U; 121 std::unique_ptr<std::vector<mirror::ArtMethod*>> Trace::temp_stack_trace_; 122 123 static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) { 124 return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask); 125 } 126 127 static TraceAction DecodeTraceAction(uint32_t tmid) { 128 return static_cast<TraceAction>(tmid & kTraceMethodActionMask); 129 } 130 131 static uint32_t EncodeTraceMethodAndAction(mirror::ArtMethod* method, 132 TraceAction action) { 133 uint32_t tmid = PointerToLowMemUInt32(method) | action; 134 DCHECK_EQ(method, DecodeTraceMethodId(tmid)); 135 return tmid; 136 } 137 138 std::vector<mirror::ArtMethod*>* Trace::AllocStackTrace() { 139 if (temp_stack_trace_.get() != NULL) { 140 return temp_stack_trace_.release(); 141 } else { 142 return new std::vector<mirror::ArtMethod*>(); 143 } 144 } 145 146 void Trace::FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace) { 147 stack_trace->clear(); 148 temp_stack_trace_.reset(stack_trace); 149 } 150 151 void Trace::SetDefaultClockSource(TraceClockSource clock_source) { 152 #if defined(HAVE_POSIX_CLOCKS) 153 default_clock_source_ = clock_source; 154 #else 155 if (clock_source != kTraceClockSourceWall) { 156 LOG(WARNING) << "Ignoring tracing request to use CPU time."; 157 } 158 #endif 159 } 160 161 static uint16_t GetTraceVersion(TraceClockSource clock_source) { 162 return (clock_source == kTraceClockSourceDual) ? kTraceVersionDualClock 163 : kTraceVersionSingleClock; 164 } 165 166 static uint16_t GetRecordSize(TraceClockSource clock_source) { 167 return (clock_source == kTraceClockSourceDual) ? kTraceRecordSizeDualClock 168 : kTraceRecordSizeSingleClock; 169 } 170 171 bool Trace::UseThreadCpuClock() { 172 return (clock_source_ == kTraceClockSourceThreadCpu) || 173 (clock_source_ == kTraceClockSourceDual); 174 } 175 176 bool Trace::UseWallClock() { 177 return (clock_source_ == kTraceClockSourceWall) || 178 (clock_source_ == kTraceClockSourceDual); 179 } 180 181 void Trace::MeasureClockOverhead() { 182 if (UseThreadCpuClock()) { 183 Thread::Current()->GetCpuMicroTime(); 184 } 185 if (UseWallClock()) { 186 MicroTime(); 187 } 188 } 189 190 // Compute an average time taken to measure clocks. 191 uint32_t Trace::GetClockOverheadNanoSeconds() { 192 Thread* self = Thread::Current(); 193 uint64_t start = self->GetCpuMicroTime(); 194 195 for (int i = 4000; i > 0; i--) { 196 MeasureClockOverhead(); 197 MeasureClockOverhead(); 198 MeasureClockOverhead(); 199 MeasureClockOverhead(); 200 MeasureClockOverhead(); 201 MeasureClockOverhead(); 202 MeasureClockOverhead(); 203 MeasureClockOverhead(); 204 } 205 206 uint64_t elapsed_us = self->GetCpuMicroTime() - start; 207 return static_cast<uint32_t>(elapsed_us / 32); 208 } 209 210 // TODO: put this somewhere with the big-endian equivalent used by JDWP. 211 static void Append2LE(uint8_t* buf, uint16_t val) { 212 *buf++ = static_cast<uint8_t>(val); 213 *buf++ = static_cast<uint8_t>(val >> 8); 214 } 215 216 // TODO: put this somewhere with the big-endian equivalent used by JDWP. 217 static void Append4LE(uint8_t* buf, uint32_t val) { 218 *buf++ = static_cast<uint8_t>(val); 219 *buf++ = static_cast<uint8_t>(val >> 8); 220 *buf++ = static_cast<uint8_t>(val >> 16); 221 *buf++ = static_cast<uint8_t>(val >> 24); 222 } 223 224 // TODO: put this somewhere with the big-endian equivalent used by JDWP. 225 static void Append8LE(uint8_t* buf, uint64_t val) { 226 *buf++ = static_cast<uint8_t>(val); 227 *buf++ = static_cast<uint8_t>(val >> 8); 228 *buf++ = static_cast<uint8_t>(val >> 16); 229 *buf++ = static_cast<uint8_t>(val >> 24); 230 *buf++ = static_cast<uint8_t>(val >> 32); 231 *buf++ = static_cast<uint8_t>(val >> 40); 232 *buf++ = static_cast<uint8_t>(val >> 48); 233 *buf++ = static_cast<uint8_t>(val >> 56); 234 } 235 236 static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 237 BuildStackTraceVisitor build_trace_visitor(thread); 238 build_trace_visitor.WalkStack(); 239 std::vector<mirror::ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); 240 Trace* the_trace = reinterpret_cast<Trace*>(arg); 241 the_trace->CompareAndUpdateStackTrace(thread, stack_trace); 242 } 243 244 static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg) { 245 thread->SetTraceClockBase(0); 246 std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample(); 247 thread->SetStackTraceSample(NULL); 248 delete stack_trace; 249 } 250 251 void Trace::CompareAndUpdateStackTrace(Thread* thread, 252 std::vector<mirror::ArtMethod*>* stack_trace) { 253 CHECK_EQ(pthread_self(), sampling_pthread_); 254 std::vector<mirror::ArtMethod*>* old_stack_trace = thread->GetStackTraceSample(); 255 // Update the thread's stack trace sample. 256 thread->SetStackTraceSample(stack_trace); 257 // Read timer clocks to use for all events in this trace. 258 uint32_t thread_clock_diff = 0; 259 uint32_t wall_clock_diff = 0; 260 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 261 if (old_stack_trace == NULL) { 262 // If there's no previous stack trace sample for this thread, log an entry event for all 263 // methods in the trace. 264 for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 265 rit != stack_trace->rend(); ++rit) { 266 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 267 thread_clock_diff, wall_clock_diff); 268 } 269 } else { 270 // If there's a previous stack trace for this thread, diff the traces and emit entry and exit 271 // events accordingly. 272 std::vector<mirror::ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin(); 273 std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 274 // Iterate bottom-up over both traces until there's a difference between them. 275 while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) { 276 old_rit++; 277 rit++; 278 } 279 // Iterate top-down over the old trace until the point where they differ, emitting exit events. 280 for (std::vector<mirror::ArtMethod*>::iterator old_it = old_stack_trace->begin(); 281 old_it != old_rit.base(); ++old_it) { 282 LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited, 283 thread_clock_diff, wall_clock_diff); 284 } 285 // Iterate bottom-up over the new trace from the point where they differ, emitting entry events. 286 for (; rit != stack_trace->rend(); ++rit) { 287 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 288 thread_clock_diff, wall_clock_diff); 289 } 290 FreeStackTrace(old_stack_trace); 291 } 292 } 293 294 void* Trace::RunSamplingThread(void* arg) { 295 Runtime* runtime = Runtime::Current(); 296 intptr_t interval_us = reinterpret_cast<intptr_t>(arg); 297 CHECK_GE(interval_us, 0); 298 CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(), 299 !runtime->IsCompiler())); 300 301 while (true) { 302 usleep(interval_us); 303 ATRACE_BEGIN("Profile sampling"); 304 Thread* self = Thread::Current(); 305 Trace* the_trace; 306 { 307 MutexLock mu(self, *Locks::trace_lock_); 308 the_trace = the_trace_; 309 if (the_trace == NULL) { 310 break; 311 } 312 } 313 314 runtime->GetThreadList()->SuspendAll(); 315 { 316 MutexLock mu(self, *Locks::thread_list_lock_); 317 runtime->GetThreadList()->ForEach(GetSample, the_trace); 318 } 319 runtime->GetThreadList()->ResumeAll(); 320 ATRACE_END(); 321 } 322 323 runtime->DetachCurrentThread(); 324 return NULL; 325 } 326 327 void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, 328 bool direct_to_ddms, bool sampling_enabled, int interval_us) { 329 Thread* self = Thread::Current(); 330 { 331 MutexLock mu(self, *Locks::trace_lock_); 332 if (the_trace_ != NULL) { 333 LOG(ERROR) << "Trace already in progress, ignoring this request"; 334 return; 335 } 336 } 337 338 // Check interval if sampling is enabled 339 if (sampling_enabled && interval_us <= 0) { 340 LOG(ERROR) << "Invalid sampling interval: " << interval_us; 341 ScopedObjectAccess soa(self); 342 ThrowRuntimeException("Invalid sampling interval: %d", interval_us); 343 return; 344 } 345 346 // Open trace file if not going directly to ddms. 347 std::unique_ptr<File> trace_file; 348 if (!direct_to_ddms) { 349 if (trace_fd < 0) { 350 trace_file.reset(OS::CreateEmptyFile(trace_filename)); 351 } else { 352 trace_file.reset(new File(trace_fd, "tracefile")); 353 trace_file->DisableAutoClose(); 354 } 355 if (trace_file.get() == NULL) { 356 PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'"; 357 ScopedObjectAccess soa(self); 358 ThrowRuntimeException("Unable to open trace file '%s'", trace_filename); 359 return; 360 } 361 } 362 363 Runtime* runtime = Runtime::Current(); 364 365 // Enable count of allocs if specified in the flags. 366 bool enable_stats = false; 367 368 runtime->GetThreadList()->SuspendAll(); 369 370 // Create Trace object. 371 { 372 MutexLock mu(self, *Locks::trace_lock_); 373 if (the_trace_ != NULL) { 374 LOG(ERROR) << "Trace already in progress, ignoring this request"; 375 } else { 376 enable_stats = (flags && kTraceCountAllocs) != 0; 377 the_trace_ = new Trace(trace_file.release(), buffer_size, flags, sampling_enabled); 378 if (sampling_enabled) { 379 CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread, 380 reinterpret_cast<void*>(interval_us)), 381 "Sampling profiler thread"); 382 } else { 383 runtime->GetInstrumentation()->AddListener(the_trace_, 384 instrumentation::Instrumentation::kMethodEntered | 385 instrumentation::Instrumentation::kMethodExited | 386 instrumentation::Instrumentation::kMethodUnwind); 387 runtime->GetInstrumentation()->EnableMethodTracing(); 388 } 389 } 390 } 391 392 runtime->GetThreadList()->ResumeAll(); 393 394 // Can't call this when holding the mutator lock. 395 if (enable_stats) { 396 runtime->SetStatsEnabled(true); 397 } 398 } 399 400 void Trace::Stop() { 401 bool stop_alloc_counting = false; 402 Runtime* runtime = Runtime::Current(); 403 runtime->GetThreadList()->SuspendAll(); 404 Trace* the_trace = NULL; 405 pthread_t sampling_pthread = 0U; 406 { 407 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 408 if (the_trace_ == NULL) { 409 LOG(ERROR) << "Trace stop requested, but no trace currently running"; 410 } else { 411 the_trace = the_trace_; 412 the_trace_ = NULL; 413 sampling_pthread = sampling_pthread_; 414 } 415 } 416 if (the_trace != NULL) { 417 stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0; 418 the_trace->FinishTracing(); 419 420 if (the_trace->sampling_enabled_) { 421 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 422 runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, NULL); 423 } else { 424 runtime->GetInstrumentation()->DisableMethodTracing(); 425 runtime->GetInstrumentation()->RemoveListener(the_trace, 426 instrumentation::Instrumentation::kMethodEntered | 427 instrumentation::Instrumentation::kMethodExited | 428 instrumentation::Instrumentation::kMethodUnwind); 429 } 430 if (the_trace->trace_file_.get() != nullptr) { 431 // Do not try to erase, so flush and close explicitly. 432 if (the_trace->trace_file_->Flush() != 0) { 433 PLOG(ERROR) << "Could not flush trace file."; 434 } 435 if (the_trace->trace_file_->Close() != 0) { 436 PLOG(ERROR) << "Could not close trace file."; 437 } 438 } 439 delete the_trace; 440 } 441 runtime->GetThreadList()->ResumeAll(); 442 443 if (stop_alloc_counting) { 444 // Can be racy since SetStatsEnabled is not guarded by any locks. 445 Runtime::Current()->SetStatsEnabled(false); 446 } 447 448 if (sampling_pthread != 0U) { 449 CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown"); 450 sampling_pthread_ = 0U; 451 } 452 } 453 454 void Trace::Shutdown() { 455 if (GetMethodTracingMode() != kTracingInactive) { 456 Stop(); 457 } 458 } 459 460 TracingMode Trace::GetMethodTracingMode() { 461 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 462 if (the_trace_ == NULL) { 463 return kTracingInactive; 464 } else if (the_trace_->sampling_enabled_) { 465 return kSampleProfilingActive; 466 } else { 467 return kMethodTracingActive; 468 } 469 } 470 471 Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled) 472 : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags), 473 sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_), 474 buffer_size_(buffer_size), start_time_(MicroTime()), 475 clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) { 476 // Set up the beginning of the trace. 477 uint16_t trace_version = GetTraceVersion(clock_source_); 478 memset(buf_.get(), 0, kTraceHeaderLength); 479 Append4LE(buf_.get(), kTraceMagicValue); 480 Append2LE(buf_.get() + 4, trace_version); 481 Append2LE(buf_.get() + 6, kTraceHeaderLength); 482 Append8LE(buf_.get() + 8, start_time_); 483 if (trace_version >= kTraceVersionDualClock) { 484 uint16_t record_size = GetRecordSize(clock_source_); 485 Append2LE(buf_.get() + 16, record_size); 486 } 487 488 // Update current offset. 489 cur_offset_.StoreRelaxed(kTraceHeaderLength); 490 } 491 492 static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) 493 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 494 uint8_t* ptr = buf + kTraceHeaderLength; 495 uint8_t* end = buf + buf_size; 496 497 while (ptr < end) { 498 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 499 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 500 TraceAction action = DecodeTraceAction(tmid); 501 LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action); 502 ptr += GetRecordSize(clock_source); 503 } 504 } 505 506 void Trace::FinishTracing() { 507 // Compute elapsed time. 508 uint64_t elapsed = MicroTime() - start_time_; 509 510 size_t final_offset = cur_offset_.LoadRelaxed(); 511 512 std::set<mirror::ArtMethod*> visited_methods; 513 GetVisitedMethods(final_offset, &visited_methods); 514 515 std::ostringstream os; 516 517 os << StringPrintf("%cversion\n", kTraceTokenChar); 518 os << StringPrintf("%d\n", GetTraceVersion(clock_source_)); 519 os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false"); 520 if (UseThreadCpuClock()) { 521 if (UseWallClock()) { 522 os << StringPrintf("clock=dual\n"); 523 } else { 524 os << StringPrintf("clock=thread-cpu\n"); 525 } 526 } else { 527 os << StringPrintf("clock=wall\n"); 528 } 529 os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed); 530 size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_); 531 os << StringPrintf("num-method-calls=%zd\n", num_records); 532 os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_); 533 os << StringPrintf("vm=art\n"); 534 if ((flags_ & kTraceCountAllocs) != 0) { 535 os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS)); 536 os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES)); 537 os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS)); 538 } 539 os << StringPrintf("%cthreads\n", kTraceTokenChar); 540 DumpThreadList(os); 541 os << StringPrintf("%cmethods\n", kTraceTokenChar); 542 DumpMethodList(os, visited_methods); 543 os << StringPrintf("%cend\n", kTraceTokenChar); 544 545 std::string header(os.str()); 546 if (trace_file_.get() == NULL) { 547 iovec iov[2]; 548 iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str())); 549 iov[0].iov_len = header.length(); 550 iov[1].iov_base = buf_.get(); 551 iov[1].iov_len = final_offset; 552 Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2); 553 const bool kDumpTraceInfo = false; 554 if (kDumpTraceInfo) { 555 LOG(INFO) << "Trace sent:\n" << header; 556 DumpBuf(buf_.get(), final_offset, clock_source_); 557 } 558 } else { 559 if (!trace_file_->WriteFully(header.c_str(), header.length()) || 560 !trace_file_->WriteFully(buf_.get(), final_offset)) { 561 std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno))); 562 PLOG(ERROR) << detail; 563 ThrowRuntimeException("%s", detail.c_str()); 564 } 565 } 566 } 567 568 void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, 569 mirror::ArtMethod* method, uint32_t new_dex_pc) { 570 // We're not recorded to listen to this kind of event, so complain. 571 LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc; 572 }; 573 574 void Trace::FieldRead(Thread* /*thread*/, mirror::Object* this_object, 575 mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field) 576 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 577 // We're not recorded to listen to this kind of event, so complain. 578 LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc; 579 } 580 581 void Trace::FieldWritten(Thread* /*thread*/, mirror::Object* this_object, 582 mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field, 583 const JValue& field_value) 584 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 585 // We're not recorded to listen to this kind of event, so complain. 586 LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc; 587 } 588 589 void Trace::MethodEntered(Thread* thread, mirror::Object* this_object, 590 mirror::ArtMethod* method, uint32_t dex_pc) { 591 uint32_t thread_clock_diff = 0; 592 uint32_t wall_clock_diff = 0; 593 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 594 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered, 595 thread_clock_diff, wall_clock_diff); 596 } 597 598 void Trace::MethodExited(Thread* thread, mirror::Object* this_object, 599 mirror::ArtMethod* method, uint32_t dex_pc, 600 const JValue& return_value) { 601 UNUSED(return_value); 602 uint32_t thread_clock_diff = 0; 603 uint32_t wall_clock_diff = 0; 604 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 605 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited, 606 thread_clock_diff, wall_clock_diff); 607 } 608 609 void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object, 610 mirror::ArtMethod* method, uint32_t dex_pc) { 611 uint32_t thread_clock_diff = 0; 612 uint32_t wall_clock_diff = 0; 613 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 614 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind, 615 thread_clock_diff, wall_clock_diff); 616 } 617 618 void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, 619 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc, 620 mirror::Throwable* exception_object) 621 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 622 LOG(ERROR) << "Unexpected exception caught event in tracing"; 623 } 624 625 void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) { 626 if (UseThreadCpuClock()) { 627 uint64_t clock_base = thread->GetTraceClockBase(); 628 if (UNLIKELY(clock_base == 0)) { 629 // First event, record the base time in the map. 630 uint64_t time = thread->GetCpuMicroTime(); 631 thread->SetTraceClockBase(time); 632 } else { 633 *thread_clock_diff = thread->GetCpuMicroTime() - clock_base; 634 } 635 } 636 if (UseWallClock()) { 637 *wall_clock_diff = MicroTime() - start_time_; 638 } 639 } 640 641 void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, 642 instrumentation::Instrumentation::InstrumentationEvent event, 643 uint32_t thread_clock_diff, uint32_t wall_clock_diff) { 644 // Advance cur_offset_ atomically. 645 int32_t new_offset; 646 int32_t old_offset; 647 do { 648 old_offset = cur_offset_.LoadRelaxed(); 649 new_offset = old_offset + GetRecordSize(clock_source_); 650 if (new_offset > buffer_size_) { 651 overflow_ = true; 652 return; 653 } 654 } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset)); 655 656 TraceAction action = kTraceMethodEnter; 657 switch (event) { 658 case instrumentation::Instrumentation::kMethodEntered: 659 action = kTraceMethodEnter; 660 break; 661 case instrumentation::Instrumentation::kMethodExited: 662 action = kTraceMethodExit; 663 break; 664 case instrumentation::Instrumentation::kMethodUnwind: 665 action = kTraceUnroll; 666 break; 667 default: 668 UNIMPLEMENTED(FATAL) << "Unexpected event: " << event; 669 } 670 671 uint32_t method_value = EncodeTraceMethodAndAction(method, action); 672 673 // Write data 674 uint8_t* ptr = buf_.get() + old_offset; 675 Append2LE(ptr, thread->GetTid()); 676 Append4LE(ptr + 2, method_value); 677 ptr += 6; 678 679 if (UseThreadCpuClock()) { 680 Append4LE(ptr, thread_clock_diff); 681 ptr += 4; 682 } 683 if (UseWallClock()) { 684 Append4LE(ptr, wall_clock_diff); 685 } 686 } 687 688 void Trace::GetVisitedMethods(size_t buf_size, 689 std::set<mirror::ArtMethod*>* visited_methods) { 690 uint8_t* ptr = buf_.get() + kTraceHeaderLength; 691 uint8_t* end = buf_.get() + buf_size; 692 693 while (ptr < end) { 694 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 695 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 696 visited_methods->insert(method); 697 ptr += GetRecordSize(clock_source_); 698 } 699 } 700 701 void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) { 702 for (const auto& method : visited_methods) { 703 os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method, 704 PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(), 705 method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile()); 706 } 707 } 708 709 static void DumpThread(Thread* t, void* arg) { 710 std::ostream& os = *reinterpret_cast<std::ostream*>(arg); 711 std::string name; 712 t->GetThreadName(name); 713 os << t->GetTid() << "\t" << name << "\n"; 714 } 715 716 void Trace::DumpThreadList(std::ostream& os) { 717 Thread* self = Thread::Current(); 718 for (auto it : exited_threads_) { 719 os << it.first << "\t" << it.second << "\n"; 720 } 721 Locks::thread_list_lock_->AssertNotHeld(self); 722 MutexLock mu(self, *Locks::thread_list_lock_); 723 Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os); 724 } 725 726 void Trace::StoreExitingThreadInfo(Thread* thread) { 727 MutexLock mu(thread, *Locks::trace_lock_); 728 if (the_trace_ != nullptr) { 729 std::string name; 730 thread->GetThreadName(name); 731 the_trace_->exited_threads_.Put(thread->GetTid(), name); 732 } 733 } 734 735 } // namespace art 736