1 // Copyright 2015 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "base/trace_event/trace_log.h" 6 7 #include <algorithm> 8 #include <cmath> 9 #include <memory> 10 #include <utility> 11 12 #include "base/base_switches.h" 13 #include "base/bind.h" 14 #include "base/command_line.h" 15 #include "base/debug/leak_annotations.h" 16 #include "base/lazy_instance.h" 17 #include "base/location.h" 18 #include "base/macros.h" 19 #include "base/memory/ref_counted_memory.h" 20 #include "base/memory/singleton.h" 21 #include "base/process/process_metrics.h" 22 #include "base/stl_util.h" 23 #include "base/strings/string_split.h" 24 #include "base/strings/string_tokenizer.h" 25 #include "base/strings/stringprintf.h" 26 #include "base/sys_info.h" 27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" 28 #include "base/threading/platform_thread.h" 29 #include "base/threading/thread_id_name_manager.h" 30 #include "base/threading/thread_task_runner_handle.h" 31 #include "base/threading/worker_pool.h" 32 #include "base/time/time.h" 33 #include "base/trace_event/heap_profiler.h" 34 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 35 #include "base/trace_event/memory_dump_manager.h" 36 #include "base/trace_event/memory_dump_provider.h" 37 #include "base/trace_event/process_memory_dump.h" 38 #include "base/trace_event/trace_buffer.h" 39 #include "base/trace_event/trace_event.h" 40 #include "base/trace_event/trace_event_synthetic_delay.h" 41 #include "base/trace_event/trace_sampling_thread.h" 42 #include "build/build_config.h" 43 44 #if defined(OS_WIN) 45 #include "base/trace_event/trace_event_etw_export_win.h" 46 #endif 47 48 // The thread buckets for the sampling profiler. 49 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; 50 51 namespace base { 52 namespace internal { 53 54 class DeleteTraceLogForTesting { 55 public: 56 static void Delete() { 57 Singleton<trace_event::TraceLog, 58 LeakySingletonTraits<trace_event::TraceLog>>::OnExit(0); 59 } 60 }; 61 62 } // namespace internal 63 64 namespace trace_event { 65 66 namespace { 67 68 // Controls the number of trace events we will buffer in-memory 69 // before throwing them away. 70 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize; 71 72 const size_t kTraceEventVectorBigBufferChunks = 73 512000000 / kTraceBufferChunkSize; 74 static_assert( 75 kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex, 76 "Too many big buffer chunks"); 77 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize; 78 static_assert( 79 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex, 80 "Too many vector buffer chunks"); 81 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; 82 83 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. 84 const size_t kEchoToConsoleTraceEventBufferChunks = 256; 85 86 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; 87 const int kThreadFlushTimeoutMs = 3000; 88 89 #define MAX_CATEGORY_GROUPS 200 90 91 // Parallel arrays g_category_groups and g_category_group_enabled are separate 92 // so that a pointer to a member of g_category_group_enabled can be easily 93 // converted to an index into g_category_groups. This allows macros to deal 94 // only with char enabled pointers from g_category_group_enabled, and we can 95 // convert internally to determine the category name from the char enabled 96 // pointer. 97 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { 98 "toplevel", 99 "tracing already shutdown", 100 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", 101 "__metadata"}; 102 103 // The enabled flag is char instead of bool so that the API can be used from C. 104 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; 105 // Indexes here have to match the g_category_groups array indexes above. 106 const int g_category_already_shutdown = 1; 107 const int g_category_categories_exhausted = 2; 108 const int g_category_metadata = 3; 109 const int g_num_builtin_categories = 4; 110 // Skip default categories. 111 base::subtle::AtomicWord g_category_index = g_num_builtin_categories; 112 113 // The name of the current thread. This is used to decide if the current 114 // thread name has changed. We combine all the seen thread names into the 115 // output name for the thread. 116 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name = 117 LAZY_INSTANCE_INITIALIZER; 118 119 ThreadTicks ThreadNow() { 120 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks(); 121 } 122 123 template <typename T> 124 void InitializeMetadataEvent(TraceEvent* trace_event, 125 int thread_id, 126 const char* metadata_name, 127 const char* arg_name, 128 const T& value) { 129 if (!trace_event) 130 return; 131 132 int num_args = 1; 133 unsigned char arg_type; 134 unsigned long long arg_value; 135 ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value); 136 trace_event->Initialize( 137 thread_id, 138 TimeTicks(), 139 ThreadTicks(), 140 TRACE_EVENT_PHASE_METADATA, 141 &g_category_group_enabled[g_category_metadata], 142 metadata_name, 143 trace_event_internal::kGlobalScope, // scope 144 trace_event_internal::kNoId, // id 145 trace_event_internal::kNoId, // bind_id 146 num_args, 147 &arg_name, 148 &arg_type, 149 &arg_value, 150 nullptr, 151 TRACE_EVENT_FLAG_NONE); 152 } 153 154 class AutoThreadLocalBoolean { 155 public: 156 explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean) 157 : thread_local_boolean_(thread_local_boolean) { 158 DCHECK(!thread_local_boolean_->Get()); 159 thread_local_boolean_->Set(true); 160 } 161 ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); } 162 163 private: 164 ThreadLocalBoolean* thread_local_boolean_; 165 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean); 166 }; 167 168 // Use this function instead of TraceEventHandle constructor to keep the 169 // overhead of ScopedTracer (trace_event.h) constructor minimum. 170 void MakeHandle(uint32_t chunk_seq, 171 size_t chunk_index, 172 size_t event_index, 173 TraceEventHandle* handle) { 174 DCHECK(chunk_seq); 175 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); 176 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); 177 handle->chunk_seq = chunk_seq; 178 handle->chunk_index = static_cast<uint16_t>(chunk_index); 179 handle->event_index = static_cast<uint16_t>(event_index); 180 } 181 182 } // namespace 183 184 // A helper class that allows the lock to be acquired in the middle of the scope 185 // and unlocks at the end of scope if locked. 186 class TraceLog::OptionalAutoLock { 187 public: 188 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} 189 190 ~OptionalAutoLock() { 191 if (locked_) 192 lock_->Release(); 193 } 194 195 void EnsureAcquired() { 196 if (!locked_) { 197 lock_->Acquire(); 198 locked_ = true; 199 } 200 } 201 202 private: 203 Lock* lock_; 204 bool locked_; 205 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock); 206 }; 207 208 class TraceLog::ThreadLocalEventBuffer 209 : public MessageLoop::DestructionObserver, 210 public MemoryDumpProvider { 211 public: 212 explicit ThreadLocalEventBuffer(TraceLog* trace_log); 213 ~ThreadLocalEventBuffer() override; 214 215 TraceEvent* AddTraceEvent(TraceEventHandle* handle); 216 217 TraceEvent* GetEventByHandle(TraceEventHandle handle) { 218 if (!chunk_ || handle.chunk_seq != chunk_->seq() || 219 handle.chunk_index != chunk_index_) { 220 return nullptr; 221 } 222 223 return chunk_->GetEventAt(handle.event_index); 224 } 225 226 int generation() const { return generation_; } 227 228 private: 229 // MessageLoop::DestructionObserver 230 void WillDestroyCurrentMessageLoop() override; 231 232 // MemoryDumpProvider implementation. 233 bool OnMemoryDump(const MemoryDumpArgs& args, 234 ProcessMemoryDump* pmd) override; 235 236 void FlushWhileLocked(); 237 238 void CheckThisIsCurrentBuffer() const { 239 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); 240 } 241 242 // Since TraceLog is a leaky singleton, trace_log_ will always be valid 243 // as long as the thread exists. 244 TraceLog* trace_log_; 245 std::unique_ptr<TraceBufferChunk> chunk_; 246 size_t chunk_index_; 247 int generation_; 248 249 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer); 250 }; 251 252 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log) 253 : trace_log_(trace_log), 254 chunk_index_(0), 255 generation_(trace_log->generation()) { 256 // ThreadLocalEventBuffer is created only if the thread has a message loop, so 257 // the following message_loop won't be NULL. 258 MessageLoop* message_loop = MessageLoop::current(); 259 message_loop->AddDestructionObserver(this); 260 261 // This is to report the local memory usage when memory-infra is enabled. 262 MemoryDumpManager::GetInstance()->RegisterDumpProvider( 263 this, "ThreadLocalEventBuffer", ThreadTaskRunnerHandle::Get()); 264 265 AutoLock lock(trace_log->lock_); 266 trace_log->thread_message_loops_.insert(message_loop); 267 } 268 269 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() { 270 CheckThisIsCurrentBuffer(); 271 MessageLoop::current()->RemoveDestructionObserver(this); 272 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this); 273 274 { 275 AutoLock lock(trace_log_->lock_); 276 FlushWhileLocked(); 277 trace_log_->thread_message_loops_.erase(MessageLoop::current()); 278 } 279 trace_log_->thread_local_event_buffer_.Set(NULL); 280 } 281 282 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent( 283 TraceEventHandle* handle) { 284 CheckThisIsCurrentBuffer(); 285 286 if (chunk_ && chunk_->IsFull()) { 287 AutoLock lock(trace_log_->lock_); 288 FlushWhileLocked(); 289 chunk_.reset(); 290 } 291 if (!chunk_) { 292 AutoLock lock(trace_log_->lock_); 293 chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_); 294 trace_log_->CheckIfBufferIsFullWhileLocked(); 295 } 296 if (!chunk_) 297 return NULL; 298 299 size_t event_index; 300 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index); 301 if (trace_event && handle) 302 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle); 303 304 return trace_event; 305 } 306 307 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() { 308 delete this; 309 } 310 311 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs&, 312 ProcessMemoryDump* pmd) { 313 if (!chunk_) 314 return true; 315 std::string dump_base_name = StringPrintf( 316 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId())); 317 TraceEventMemoryOverhead overhead; 318 chunk_->EstimateTraceMemoryOverhead(&overhead); 319 overhead.DumpInto(dump_base_name.c_str(), pmd); 320 return true; 321 } 322 323 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() { 324 if (!chunk_) 325 return; 326 327 trace_log_->lock_.AssertAcquired(); 328 if (trace_log_->CheckGeneration(generation_)) { 329 // Return the chunk to the buffer only if the generation matches. 330 trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_)); 331 } 332 // Otherwise this method may be called from the destructor, or TraceLog will 333 // find the generation mismatch and delete this buffer soon. 334 } 335 336 struct TraceLog::RegisteredAsyncObserver { 337 explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer) 338 : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {} 339 ~RegisteredAsyncObserver() {} 340 341 WeakPtr<AsyncEnabledStateObserver> observer; 342 scoped_refptr<SequencedTaskRunner> task_runner; 343 }; 344 345 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {} 346 347 TraceLogStatus::~TraceLogStatus() {} 348 349 // static 350 TraceLog* TraceLog::GetInstance() { 351 return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get(); 352 } 353 354 TraceLog::TraceLog() 355 : mode_(DISABLED), 356 num_traces_recorded_(0), 357 event_callback_(0), 358 dispatching_to_observer_list_(false), 359 process_sort_index_(0), 360 process_id_hash_(0), 361 process_id_(0), 362 watch_category_(0), 363 trace_options_(kInternalRecordUntilFull), 364 sampling_thread_handle_(0), 365 trace_config_(TraceConfig()), 366 event_callback_trace_config_(TraceConfig()), 367 thread_shared_chunk_index_(0), 368 generation_(0), 369 use_worker_thread_(false) { 370 // Trace is enabled or disabled on one thread while other threads are 371 // accessing the enabled flag. We don't care whether edge-case events are 372 // traced or not, so we allow races on the enabled flag to keep the trace 373 // macros fast. 374 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: 375 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, 376 // sizeof(g_category_group_enabled), 377 // "trace_event category enabled"); 378 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) { 379 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i], 380 "trace_event category enabled"); 381 } 382 #if defined(OS_NACL) // NaCl shouldn't expose the process id. 383 SetProcessID(0); 384 #else 385 SetProcessID(static_cast<int>(GetCurrentProcId())); 386 #endif 387 388 logged_events_.reset(CreateTraceBuffer()); 389 390 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog", 391 nullptr); 392 } 393 394 TraceLog::~TraceLog() {} 395 396 void TraceLog::InitializeThreadLocalEventBufferIfSupported() { 397 // A ThreadLocalEventBuffer needs the message loop 398 // - to know when the thread exits; 399 // - to handle the final flush. 400 // For a thread without a message loop or the message loop may be blocked, the 401 // trace events will be added into the main buffer directly. 402 if (thread_blocks_message_loop_.Get() || !MessageLoop::current()) 403 return; 404 HEAP_PROFILER_SCOPED_IGNORE; 405 auto* thread_local_event_buffer = thread_local_event_buffer_.Get(); 406 if (thread_local_event_buffer && 407 !CheckGeneration(thread_local_event_buffer->generation())) { 408 delete thread_local_event_buffer; 409 thread_local_event_buffer = NULL; 410 } 411 if (!thread_local_event_buffer) { 412 thread_local_event_buffer = new ThreadLocalEventBuffer(this); 413 thread_local_event_buffer_.Set(thread_local_event_buffer); 414 } 415 } 416 417 bool TraceLog::OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump* pmd) { 418 // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested 419 // (crbug.com/499731). 420 TraceEventMemoryOverhead overhead; 421 overhead.Add("TraceLog", sizeof(*this)); 422 { 423 AutoLock lock(lock_); 424 if (logged_events_) 425 logged_events_->EstimateTraceMemoryOverhead(&overhead); 426 427 for (auto& metadata_event : metadata_events_) 428 metadata_event->EstimateTraceMemoryOverhead(&overhead); 429 } 430 overhead.AddSelf(); 431 overhead.DumpInto("tracing/main_trace_log", pmd); 432 return true; 433 } 434 435 const unsigned char* TraceLog::GetCategoryGroupEnabled( 436 const char* category_group) { 437 TraceLog* tracelog = GetInstance(); 438 if (!tracelog) { 439 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); 440 return &g_category_group_enabled[g_category_already_shutdown]; 441 } 442 return tracelog->GetCategoryGroupEnabledInternal(category_group); 443 } 444 445 const char* TraceLog::GetCategoryGroupName( 446 const unsigned char* category_group_enabled) { 447 // Calculate the index of the category group by finding 448 // category_group_enabled in g_category_group_enabled array. 449 uintptr_t category_begin = 450 reinterpret_cast<uintptr_t>(g_category_group_enabled); 451 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); 452 DCHECK(category_ptr >= category_begin && 453 category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + 454 MAX_CATEGORY_GROUPS)) 455 << "out of bounds category pointer"; 456 uintptr_t category_index = 457 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); 458 return g_category_groups[category_index]; 459 } 460 461 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { 462 unsigned char enabled_flag = 0; 463 const char* category_group = g_category_groups[category_index]; 464 if (mode_ == RECORDING_MODE && 465 trace_config_.IsCategoryGroupEnabled(category_group)) { 466 enabled_flag |= ENABLED_FOR_RECORDING; 467 } 468 469 if (event_callback_ && 470 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { 471 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; 472 } 473 474 #if defined(OS_WIN) 475 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( 476 category_group)) { 477 enabled_flag |= ENABLED_FOR_ETW_EXPORT; 478 } 479 #endif 480 481 // TODO(primiano): this is a temporary workaround for catapult:#2341, 482 // to guarantee that metadata events are always added even if the category 483 // filter is "-*". See crbug.com/618054 for more details and long-term fix. 484 if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata")) 485 enabled_flag |= ENABLED_FOR_RECORDING; 486 487 g_category_group_enabled[category_index] = enabled_flag; 488 } 489 490 void TraceLog::UpdateCategoryGroupEnabledFlags() { 491 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); 492 for (size_t i = 0; i < category_index; i++) 493 UpdateCategoryGroupEnabledFlag(i); 494 } 495 496 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { 497 ResetTraceEventSyntheticDelays(); 498 const TraceConfig::StringList& delays = 499 trace_config_.GetSyntheticDelayValues(); 500 TraceConfig::StringList::const_iterator ci; 501 for (ci = delays.begin(); ci != delays.end(); ++ci) { 502 StringTokenizer tokens(*ci, ";"); 503 if (!tokens.GetNext()) 504 continue; 505 TraceEventSyntheticDelay* delay = 506 TraceEventSyntheticDelay::Lookup(tokens.token()); 507 while (tokens.GetNext()) { 508 std::string token = tokens.token(); 509 char* duration_end; 510 double target_duration = strtod(token.c_str(), &duration_end); 511 if (duration_end != token.c_str()) { 512 delay->SetTargetDuration(TimeDelta::FromMicroseconds( 513 static_cast<int64_t>(target_duration * 1e6))); 514 } else if (token == "static") { 515 delay->SetMode(TraceEventSyntheticDelay::STATIC); 516 } else if (token == "oneshot") { 517 delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT); 518 } else if (token == "alternating") { 519 delay->SetMode(TraceEventSyntheticDelay::ALTERNATING); 520 } 521 } 522 } 523 } 524 525 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal( 526 const char* category_group) { 527 DCHECK(!strchr(category_group, '"')) 528 << "Category groups may not contain double quote"; 529 // The g_category_groups is append only, avoid using a lock for the fast path. 530 size_t current_category_index = base::subtle::Acquire_Load(&g_category_index); 531 532 // Search for pre-existing category group. 533 for (size_t i = 0; i < current_category_index; ++i) { 534 if (strcmp(g_category_groups[i], category_group) == 0) { 535 return &g_category_group_enabled[i]; 536 } 537 } 538 539 unsigned char* category_group_enabled = NULL; 540 // This is the slow path: the lock is not held in the case above, so more 541 // than one thread could have reached here trying to add the same category. 542 // Only hold to lock when actually appending a new category, and 543 // check the categories groups again. 544 AutoLock lock(lock_); 545 size_t category_index = base::subtle::Acquire_Load(&g_category_index); 546 for (size_t i = 0; i < category_index; ++i) { 547 if (strcmp(g_category_groups[i], category_group) == 0) { 548 return &g_category_group_enabled[i]; 549 } 550 } 551 552 // Create a new category group. 553 DCHECK(category_index < MAX_CATEGORY_GROUPS) 554 << "must increase MAX_CATEGORY_GROUPS"; 555 if (category_index < MAX_CATEGORY_GROUPS) { 556 // Don't hold on to the category_group pointer, so that we can create 557 // category groups with strings not known at compile time (this is 558 // required by SetWatchEvent). 559 const char* new_group = strdup(category_group); 560 ANNOTATE_LEAKING_OBJECT_PTR(new_group); 561 g_category_groups[category_index] = new_group; 562 DCHECK(!g_category_group_enabled[category_index]); 563 // Note that if both included and excluded patterns in the 564 // TraceConfig are empty, we exclude nothing, 565 // thereby enabling this category group. 566 UpdateCategoryGroupEnabledFlag(category_index); 567 category_group_enabled = &g_category_group_enabled[category_index]; 568 // Update the max index now. 569 base::subtle::Release_Store(&g_category_index, category_index + 1); 570 } else { 571 category_group_enabled = 572 &g_category_group_enabled[g_category_categories_exhausted]; 573 } 574 return category_group_enabled; 575 } 576 577 void TraceLog::GetKnownCategoryGroups( 578 std::vector<std::string>* category_groups) { 579 AutoLock lock(lock_); 580 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); 581 for (size_t i = g_num_builtin_categories; i < category_index; i++) 582 category_groups->push_back(g_category_groups[i]); 583 } 584 585 void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) { 586 std::vector<EnabledStateObserver*> observer_list; 587 std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map; 588 { 589 AutoLock lock(lock_); 590 591 // Can't enable tracing when Flush() is in progress. 592 DCHECK(!flush_task_runner_); 593 594 InternalTraceOptions new_options = 595 GetInternalOptionsFromTraceConfig(trace_config); 596 597 InternalTraceOptions old_options = trace_options(); 598 599 if (IsEnabled()) { 600 if (new_options != old_options) { 601 DLOG(ERROR) << "Attempting to re-enable tracing with a different " 602 << "set of options."; 603 } 604 605 if (mode != mode_) { 606 DLOG(ERROR) << "Attempting to re-enable tracing with a different mode."; 607 } 608 609 trace_config_.Merge(trace_config); 610 UpdateCategoryGroupEnabledFlags(); 611 return; 612 } 613 614 if (dispatching_to_observer_list_) { 615 DLOG(ERROR) 616 << "Cannot manipulate TraceLog::Enabled state from an observer."; 617 return; 618 } 619 620 mode_ = mode; 621 622 if (new_options != old_options) { 623 subtle::NoBarrier_Store(&trace_options_, new_options); 624 UseNextTraceBuffer(); 625 } 626 627 num_traces_recorded_++; 628 629 trace_config_ = TraceConfig(trace_config); 630 UpdateCategoryGroupEnabledFlags(); 631 UpdateSyntheticDelaysFromTraceConfig(); 632 633 if (new_options & kInternalEnableSampling) { 634 sampling_thread_.reset(new TraceSamplingThread); 635 sampling_thread_->RegisterSampleBucket( 636 &g_trace_state[0], "bucket0", 637 Bind(&TraceSamplingThread::DefaultSamplingCallback)); 638 sampling_thread_->RegisterSampleBucket( 639 &g_trace_state[1], "bucket1", 640 Bind(&TraceSamplingThread::DefaultSamplingCallback)); 641 sampling_thread_->RegisterSampleBucket( 642 &g_trace_state[2], "bucket2", 643 Bind(&TraceSamplingThread::DefaultSamplingCallback)); 644 if (!PlatformThread::Create(0, sampling_thread_.get(), 645 &sampling_thread_handle_)) { 646 DCHECK(false) << "failed to create thread"; 647 } 648 } 649 650 dispatching_to_observer_list_ = true; 651 observer_list = enabled_state_observer_list_; 652 observer_map = async_observers_; 653 } 654 // Notify observers outside the lock in case they trigger trace events. 655 for (size_t i = 0; i < observer_list.size(); ++i) 656 observer_list[i]->OnTraceLogEnabled(); 657 for (const auto& it : observer_map) { 658 it.second.task_runner->PostTask( 659 FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled, 660 it.second.observer)); 661 } 662 663 { 664 AutoLock lock(lock_); 665 dispatching_to_observer_list_ = false; 666 } 667 } 668 669 void TraceLog::SetArgumentFilterPredicate( 670 const ArgumentFilterPredicate& argument_filter_predicate) { 671 AutoLock lock(lock_); 672 DCHECK(!argument_filter_predicate.is_null()); 673 DCHECK(argument_filter_predicate_.is_null()); 674 argument_filter_predicate_ = argument_filter_predicate; 675 } 676 677 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig( 678 const TraceConfig& config) { 679 InternalTraceOptions ret = 680 config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone; 681 if (config.IsArgumentFilterEnabled()) 682 ret |= kInternalEnableArgumentFilter; 683 switch (config.GetTraceRecordMode()) { 684 case RECORD_UNTIL_FULL: 685 return ret | kInternalRecordUntilFull; 686 case RECORD_CONTINUOUSLY: 687 return ret | kInternalRecordContinuously; 688 case ECHO_TO_CONSOLE: 689 return ret | kInternalEchoToConsole; 690 case RECORD_AS_MUCH_AS_POSSIBLE: 691 return ret | kInternalRecordAsMuchAsPossible; 692 } 693 NOTREACHED(); 694 return kInternalNone; 695 } 696 697 TraceConfig TraceLog::GetCurrentTraceConfig() const { 698 AutoLock lock(lock_); 699 return trace_config_; 700 } 701 702 void TraceLog::SetDisabled() { 703 AutoLock lock(lock_); 704 SetDisabledWhileLocked(); 705 } 706 707 void TraceLog::SetDisabledWhileLocked() { 708 lock_.AssertAcquired(); 709 710 if (!IsEnabled()) 711 return; 712 713 if (dispatching_to_observer_list_) { 714 DLOG(ERROR) 715 << "Cannot manipulate TraceLog::Enabled state from an observer."; 716 return; 717 } 718 719 mode_ = DISABLED; 720 721 if (sampling_thread_.get()) { 722 // Stop the sampling thread. 723 sampling_thread_->Stop(); 724 lock_.Release(); 725 PlatformThread::Join(sampling_thread_handle_); 726 lock_.Acquire(); 727 sampling_thread_handle_ = PlatformThreadHandle(); 728 sampling_thread_.reset(); 729 } 730 731 trace_config_.Clear(); 732 subtle::NoBarrier_Store(&watch_category_, 0); 733 watch_event_name_ = ""; 734 UpdateCategoryGroupEnabledFlags(); 735 AddMetadataEventsWhileLocked(); 736 737 // Remove metadata events so they will not get added to a subsequent trace. 738 metadata_events_.clear(); 739 740 dispatching_to_observer_list_ = true; 741 std::vector<EnabledStateObserver*> observer_list = 742 enabled_state_observer_list_; 743 std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map = 744 async_observers_; 745 746 { 747 // Dispatch to observers outside the lock in case the observer triggers a 748 // trace event. 749 AutoUnlock unlock(lock_); 750 for (size_t i = 0; i < observer_list.size(); ++i) 751 observer_list[i]->OnTraceLogDisabled(); 752 for (const auto& it : observer_map) { 753 it.second.task_runner->PostTask( 754 FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled, 755 it.second.observer)); 756 } 757 } 758 dispatching_to_observer_list_ = false; 759 } 760 761 int TraceLog::GetNumTracesRecorded() { 762 AutoLock lock(lock_); 763 if (!IsEnabled()) 764 return -1; 765 return num_traces_recorded_; 766 } 767 768 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) { 769 AutoLock lock(lock_); 770 enabled_state_observer_list_.push_back(listener); 771 } 772 773 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) { 774 AutoLock lock(lock_); 775 std::vector<EnabledStateObserver*>::iterator it = 776 std::find(enabled_state_observer_list_.begin(), 777 enabled_state_observer_list_.end(), listener); 778 if (it != enabled_state_observer_list_.end()) 779 enabled_state_observer_list_.erase(it); 780 } 781 782 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const { 783 AutoLock lock(lock_); 784 return ContainsValue(enabled_state_observer_list_, listener); 785 } 786 787 TraceLogStatus TraceLog::GetStatus() const { 788 AutoLock lock(lock_); 789 TraceLogStatus result; 790 result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity()); 791 result.event_count = static_cast<uint32_t>(logged_events_->Size()); 792 return result; 793 } 794 795 bool TraceLog::BufferIsFull() const { 796 AutoLock lock(lock_); 797 return logged_events_->IsFull(); 798 } 799 800 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked( 801 TraceEventHandle* handle, 802 bool check_buffer_is_full) { 803 lock_.AssertAcquired(); 804 805 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) { 806 logged_events_->ReturnChunk(thread_shared_chunk_index_, 807 std::move(thread_shared_chunk_)); 808 } 809 810 if (!thread_shared_chunk_) { 811 thread_shared_chunk_ = 812 logged_events_->GetChunk(&thread_shared_chunk_index_); 813 if (check_buffer_is_full) 814 CheckIfBufferIsFullWhileLocked(); 815 } 816 if (!thread_shared_chunk_) 817 return NULL; 818 819 size_t event_index; 820 TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index); 821 if (trace_event && handle) { 822 MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_, 823 event_index, handle); 824 } 825 return trace_event; 826 } 827 828 void TraceLog::CheckIfBufferIsFullWhileLocked() { 829 lock_.AssertAcquired(); 830 if (logged_events_->IsFull()) { 831 if (buffer_limit_reached_timestamp_.is_null()) { 832 buffer_limit_reached_timestamp_ = OffsetNow(); 833 } 834 SetDisabledWhileLocked(); 835 } 836 } 837 838 void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config, 839 EventCallback cb) { 840 AutoLock lock(lock_); 841 subtle::NoBarrier_Store(&event_callback_, 842 reinterpret_cast<subtle::AtomicWord>(cb)); 843 event_callback_trace_config_ = trace_config; 844 UpdateCategoryGroupEnabledFlags(); 845 } 846 847 void TraceLog::SetEventCallbackDisabled() { 848 AutoLock lock(lock_); 849 subtle::NoBarrier_Store(&event_callback_, 0); 850 UpdateCategoryGroupEnabledFlags(); 851 } 852 853 // Flush() works as the following: 854 // 1. Flush() is called in thread A whose task runner is saved in 855 // flush_task_runner_; 856 // 2. If thread_message_loops_ is not empty, thread A posts task to each message 857 // loop to flush the thread local buffers; otherwise finish the flush; 858 // 3. FlushCurrentThread() deletes the thread local event buffer: 859 // - The last batch of events of the thread are flushed into the main buffer; 860 // - The message loop will be removed from thread_message_loops_; 861 // If this is the last message loop, finish the flush; 862 // 4. If any thread hasn't finish its flush in time, finish the flush. 863 void TraceLog::Flush(const TraceLog::OutputCallback& cb, 864 bool use_worker_thread) { 865 FlushInternal(cb, use_worker_thread, false); 866 } 867 868 void TraceLog::CancelTracing(const OutputCallback& cb) { 869 SetDisabled(); 870 FlushInternal(cb, false, true); 871 } 872 873 void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb, 874 bool use_worker_thread, 875 bool discard_events) { 876 use_worker_thread_ = use_worker_thread; 877 if (IsEnabled()) { 878 // Can't flush when tracing is enabled because otherwise PostTask would 879 // - generate more trace events; 880 // - deschedule the calling thread on some platforms causing inaccurate 881 // timing of the trace events. 882 scoped_refptr<RefCountedString> empty_result = new RefCountedString; 883 if (!cb.is_null()) 884 cb.Run(empty_result, false); 885 LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled"; 886 return; 887 } 888 889 int generation = this->generation(); 890 // Copy of thread_message_loops_ to be used without locking. 891 std::vector<scoped_refptr<SingleThreadTaskRunner>> 892 thread_message_loop_task_runners; 893 { 894 AutoLock lock(lock_); 895 DCHECK(!flush_task_runner_); 896 flush_task_runner_ = ThreadTaskRunnerHandle::IsSet() 897 ? ThreadTaskRunnerHandle::Get() 898 : nullptr; 899 DCHECK(thread_message_loops_.empty() || flush_task_runner_); 900 flush_output_callback_ = cb; 901 902 if (thread_shared_chunk_) { 903 logged_events_->ReturnChunk(thread_shared_chunk_index_, 904 std::move(thread_shared_chunk_)); 905 } 906 907 if (thread_message_loops_.size()) { 908 for (hash_set<MessageLoop*>::const_iterator it = 909 thread_message_loops_.begin(); 910 it != thread_message_loops_.end(); ++it) { 911 thread_message_loop_task_runners.push_back((*it)->task_runner()); 912 } 913 } 914 } 915 916 if (thread_message_loop_task_runners.size()) { 917 for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) { 918 thread_message_loop_task_runners[i]->PostTask( 919 FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this), 920 generation, discard_events)); 921 } 922 flush_task_runner_->PostDelayedTask( 923 FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation, 924 discard_events), 925 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs)); 926 return; 927 } 928 929 FinishFlush(generation, discard_events); 930 } 931 932 // Usually it runs on a different thread. 933 void TraceLog::ConvertTraceEventsToTraceFormat( 934 std::unique_ptr<TraceBuffer> logged_events, 935 const OutputCallback& flush_output_callback, 936 const ArgumentFilterPredicate& argument_filter_predicate) { 937 if (flush_output_callback.is_null()) 938 return; 939 940 HEAP_PROFILER_SCOPED_IGNORE; 941 // The callback need to be called at least once even if there is no events 942 // to let the caller know the completion of flush. 943 scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString(); 944 while (const TraceBufferChunk* chunk = logged_events->NextChunk()) { 945 for (size_t j = 0; j < chunk->size(); ++j) { 946 size_t size = json_events_str_ptr->size(); 947 if (size > kTraceEventBufferSizeInBytes) { 948 flush_output_callback.Run(json_events_str_ptr, true); 949 json_events_str_ptr = new RefCountedString(); 950 } else if (size) { 951 json_events_str_ptr->data().append(",\n"); 952 } 953 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()), 954 argument_filter_predicate); 955 } 956 } 957 flush_output_callback.Run(json_events_str_ptr, false); 958 } 959 960 void TraceLog::FinishFlush(int generation, bool discard_events) { 961 std::unique_ptr<TraceBuffer> previous_logged_events; 962 OutputCallback flush_output_callback; 963 ArgumentFilterPredicate argument_filter_predicate; 964 965 if (!CheckGeneration(generation)) 966 return; 967 968 { 969 AutoLock lock(lock_); 970 971 previous_logged_events.swap(logged_events_); 972 UseNextTraceBuffer(); 973 thread_message_loops_.clear(); 974 975 flush_task_runner_ = NULL; 976 flush_output_callback = flush_output_callback_; 977 flush_output_callback_.Reset(); 978 979 if (trace_options() & kInternalEnableArgumentFilter) { 980 CHECK(!argument_filter_predicate_.is_null()); 981 argument_filter_predicate = argument_filter_predicate_; 982 } 983 } 984 985 if (discard_events) { 986 if (!flush_output_callback.is_null()) { 987 scoped_refptr<RefCountedString> empty_result = new RefCountedString; 988 flush_output_callback.Run(empty_result, false); 989 } 990 return; 991 } 992 993 if (use_worker_thread_ && 994 WorkerPool::PostTask( 995 FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat, 996 Passed(&previous_logged_events), 997 flush_output_callback, argument_filter_predicate), 998 true)) { 999 return; 1000 } 1001 1002 ConvertTraceEventsToTraceFormat(std::move(previous_logged_events), 1003 flush_output_callback, 1004 argument_filter_predicate); 1005 } 1006 1007 // Run in each thread holding a local event buffer. 1008 void TraceLog::FlushCurrentThread(int generation, bool discard_events) { 1009 { 1010 AutoLock lock(lock_); 1011 if (!CheckGeneration(generation) || !flush_task_runner_) { 1012 // This is late. The corresponding flush has finished. 1013 return; 1014 } 1015 } 1016 1017 // This will flush the thread local buffer. 1018 delete thread_local_event_buffer_.Get(); 1019 1020 AutoLock lock(lock_); 1021 if (!CheckGeneration(generation) || !flush_task_runner_ || 1022 thread_message_loops_.size()) 1023 return; 1024 1025 flush_task_runner_->PostTask( 1026 FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation, 1027 discard_events)); 1028 } 1029 1030 void TraceLog::OnFlushTimeout(int generation, bool discard_events) { 1031 { 1032 AutoLock lock(lock_); 1033 if (!CheckGeneration(generation) || !flush_task_runner_) { 1034 // Flush has finished before timeout. 1035 return; 1036 } 1037 1038 LOG(WARNING) 1039 << "The following threads haven't finished flush in time. " 1040 "If this happens stably for some thread, please call " 1041 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from " 1042 "the thread to avoid its trace events from being lost."; 1043 for (hash_set<MessageLoop*>::const_iterator it = 1044 thread_message_loops_.begin(); 1045 it != thread_message_loops_.end(); ++it) { 1046 LOG(WARNING) << "Thread: " << (*it)->GetThreadName(); 1047 } 1048 } 1049 FinishFlush(generation, discard_events); 1050 } 1051 1052 void TraceLog::UseNextTraceBuffer() { 1053 logged_events_.reset(CreateTraceBuffer()); 1054 subtle::NoBarrier_AtomicIncrement(&generation_, 1); 1055 thread_shared_chunk_.reset(); 1056 thread_shared_chunk_index_ = 0; 1057 } 1058 1059 TraceEventHandle TraceLog::AddTraceEvent( 1060 char phase, 1061 const unsigned char* category_group_enabled, 1062 const char* name, 1063 const char* scope, 1064 unsigned long long id, 1065 int num_args, 1066 const char** arg_names, 1067 const unsigned char* arg_types, 1068 const unsigned long long* arg_values, 1069 std::unique_ptr<ConvertableToTraceFormat>* convertable_values, 1070 unsigned int flags) { 1071 int thread_id = static_cast<int>(base::PlatformThread::CurrentId()); 1072 base::TimeTicks now = base::TimeTicks::Now(); 1073 return AddTraceEventWithThreadIdAndTimestamp( 1074 phase, 1075 category_group_enabled, 1076 name, 1077 scope, 1078 id, 1079 trace_event_internal::kNoId, // bind_id 1080 thread_id, 1081 now, 1082 num_args, 1083 arg_names, 1084 arg_types, 1085 arg_values, 1086 convertable_values, 1087 flags); 1088 } 1089 1090 TraceEventHandle TraceLog::AddTraceEventWithBindId( 1091 char phase, 1092 const unsigned char* category_group_enabled, 1093 const char* name, 1094 const char* scope, 1095 unsigned long long id, 1096 unsigned long long bind_id, 1097 int num_args, 1098 const char** arg_names, 1099 const unsigned char* arg_types, 1100 const unsigned long long* arg_values, 1101 std::unique_ptr<ConvertableToTraceFormat>* convertable_values, 1102 unsigned int flags) { 1103 int thread_id = static_cast<int>(base::PlatformThread::CurrentId()); 1104 base::TimeTicks now = base::TimeTicks::Now(); 1105 return AddTraceEventWithThreadIdAndTimestamp( 1106 phase, 1107 category_group_enabled, 1108 name, 1109 scope, 1110 id, 1111 bind_id, 1112 thread_id, 1113 now, 1114 num_args, 1115 arg_names, 1116 arg_types, 1117 arg_values, 1118 convertable_values, 1119 flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID); 1120 } 1121 1122 TraceEventHandle TraceLog::AddTraceEventWithProcessId( 1123 char phase, 1124 const unsigned char* category_group_enabled, 1125 const char* name, 1126 const char* scope, 1127 unsigned long long id, 1128 int process_id, 1129 int num_args, 1130 const char** arg_names, 1131 const unsigned char* arg_types, 1132 const unsigned long long* arg_values, 1133 std::unique_ptr<ConvertableToTraceFormat>* convertable_values, 1134 unsigned int flags) { 1135 base::TimeTicks now = base::TimeTicks::Now(); 1136 return AddTraceEventWithThreadIdAndTimestamp( 1137 phase, 1138 category_group_enabled, 1139 name, 1140 scope, 1141 id, 1142 trace_event_internal::kNoId, // bind_id 1143 process_id, 1144 now, 1145 num_args, 1146 arg_names, 1147 arg_types, 1148 arg_values, 1149 convertable_values, 1150 flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID); 1151 } 1152 1153 // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp 1154 // with kNoId as bind_id 1155 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( 1156 char phase, 1157 const unsigned char* category_group_enabled, 1158 const char* name, 1159 const char* scope, 1160 unsigned long long id, 1161 int thread_id, 1162 const TimeTicks& timestamp, 1163 int num_args, 1164 const char** arg_names, 1165 const unsigned char* arg_types, 1166 const unsigned long long* arg_values, 1167 std::unique_ptr<ConvertableToTraceFormat>* convertable_values, 1168 unsigned int flags) { 1169 return AddTraceEventWithThreadIdAndTimestamp( 1170 phase, 1171 category_group_enabled, 1172 name, 1173 scope, 1174 id, 1175 trace_event_internal::kNoId, // bind_id 1176 thread_id, 1177 timestamp, 1178 num_args, 1179 arg_names, 1180 arg_types, 1181 arg_values, 1182 convertable_values, 1183 flags); 1184 } 1185 1186 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( 1187 char phase, 1188 const unsigned char* category_group_enabled, 1189 const char* name, 1190 const char* scope, 1191 unsigned long long id, 1192 unsigned long long bind_id, 1193 int thread_id, 1194 const TimeTicks& timestamp, 1195 int num_args, 1196 const char** arg_names, 1197 const unsigned char* arg_types, 1198 const unsigned long long* arg_values, 1199 std::unique_ptr<ConvertableToTraceFormat>* convertable_values, 1200 unsigned int flags) { 1201 TraceEventHandle handle = {0, 0, 0}; 1202 if (!*category_group_enabled) 1203 return handle; 1204 1205 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when 1206 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> 1207 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... 1208 if (thread_is_in_trace_event_.Get()) 1209 return handle; 1210 1211 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); 1212 1213 DCHECK(name); 1214 DCHECK(!timestamp.is_null()); 1215 1216 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) { 1217 if ((flags & TRACE_EVENT_FLAG_FLOW_IN) || 1218 (flags & TRACE_EVENT_FLAG_FLOW_OUT)) 1219 bind_id = MangleEventId(bind_id); 1220 id = MangleEventId(id); 1221 } 1222 1223 TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp); 1224 ThreadTicks thread_now = ThreadNow(); 1225 1226 // |thread_local_event_buffer_| can be null if the current thread doesn't have 1227 // a message loop or the message loop is blocked. 1228 InitializeThreadLocalEventBufferIfSupported(); 1229 auto* thread_local_event_buffer = thread_local_event_buffer_.Get(); 1230 1231 // Check and update the current thread name only if the event is for the 1232 // current thread to avoid locks in most cases. 1233 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { 1234 const char* new_name = 1235 ThreadIdNameManager::GetInstance()->GetName(thread_id); 1236 // Check if the thread name has been set or changed since the previous 1237 // call (if any), but don't bother if the new name is empty. Note this will 1238 // not detect a thread name change within the same char* buffer address: we 1239 // favor common case performance over corner case correctness. 1240 if (new_name != g_current_thread_name.Get().Get() && new_name && 1241 *new_name) { 1242 g_current_thread_name.Get().Set(new_name); 1243 1244 AutoLock thread_info_lock(thread_info_lock_); 1245 1246 hash_map<int, std::string>::iterator existing_name = 1247 thread_names_.find(thread_id); 1248 if (existing_name == thread_names_.end()) { 1249 // This is a new thread id, and a new name. 1250 thread_names_[thread_id] = new_name; 1251 } else { 1252 // This is a thread id that we've seen before, but potentially with a 1253 // new name. 1254 std::vector<StringPiece> existing_names = base::SplitStringPiece( 1255 existing_name->second, ",", base::KEEP_WHITESPACE, 1256 base::SPLIT_WANT_NONEMPTY); 1257 bool found = std::find(existing_names.begin(), existing_names.end(), 1258 new_name) != existing_names.end(); 1259 if (!found) { 1260 if (existing_names.size()) 1261 existing_name->second.push_back(','); 1262 existing_name->second.append(new_name); 1263 } 1264 } 1265 } 1266 } 1267 1268 #if defined(OS_WIN) 1269 // This is done sooner rather than later, to avoid creating the event and 1270 // acquiring the lock, which is not needed for ETW as it's already threadsafe. 1271 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) 1272 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, 1273 num_args, arg_names, arg_types, arg_values, 1274 convertable_values); 1275 #endif // OS_WIN 1276 1277 std::string console_message; 1278 if (*category_group_enabled & ENABLED_FOR_RECORDING) { 1279 OptionalAutoLock lock(&lock_); 1280 1281 TraceEvent* trace_event = NULL; 1282 if (thread_local_event_buffer) { 1283 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); 1284 } else { 1285 lock.EnsureAcquired(); 1286 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); 1287 } 1288 1289 if (trace_event) { 1290 trace_event->Initialize(thread_id, 1291 offset_event_timestamp, 1292 thread_now, 1293 phase, 1294 category_group_enabled, 1295 name, 1296 scope, 1297 id, 1298 bind_id, 1299 num_args, 1300 arg_names, 1301 arg_types, 1302 arg_values, 1303 convertable_values, 1304 flags); 1305 1306 #if defined(OS_ANDROID) 1307 trace_event->SendToATrace(); 1308 #endif 1309 } 1310 1311 if (trace_options() & kInternalEchoToConsole) { 1312 console_message = EventToConsoleMessage( 1313 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, 1314 timestamp, trace_event); 1315 } 1316 } 1317 1318 if (console_message.size()) 1319 LOG(ERROR) << console_message; 1320 1321 if (reinterpret_cast<const unsigned char*>( 1322 subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled) { 1323 bool event_name_matches; 1324 WatchEventCallback watch_event_callback_copy; 1325 { 1326 AutoLock lock(lock_); 1327 event_name_matches = watch_event_name_ == name; 1328 watch_event_callback_copy = watch_event_callback_; 1329 } 1330 if (event_name_matches) { 1331 if (!watch_event_callback_copy.is_null()) 1332 watch_event_callback_copy.Run(); 1333 } 1334 } 1335 1336 if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) { 1337 EventCallback event_callback = reinterpret_cast<EventCallback>( 1338 subtle::NoBarrier_Load(&event_callback_)); 1339 if (event_callback) { 1340 event_callback( 1341 offset_event_timestamp, 1342 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, 1343 category_group_enabled, name, scope, id, num_args, arg_names, 1344 arg_types, arg_values, flags); 1345 } 1346 } 1347 1348 // TODO(primiano): Add support for events with copied name crbug.com/581078 1349 if (!(flags & TRACE_EVENT_FLAG_COPY)) { 1350 if (AllocationContextTracker::capture_mode() == 1351 AllocationContextTracker::CaptureMode::PSEUDO_STACK) { 1352 if (phase == TRACE_EVENT_PHASE_BEGIN || 1353 phase == TRACE_EVENT_PHASE_COMPLETE) { 1354 AllocationContextTracker::GetInstanceForCurrentThread() 1355 ->PushPseudoStackFrame(name); 1356 } else if (phase == TRACE_EVENT_PHASE_END) { 1357 // The pop for |TRACE_EVENT_PHASE_COMPLETE| events 1358 // is in |TraceLog::UpdateTraceEventDuration|. 1359 AllocationContextTracker::GetInstanceForCurrentThread() 1360 ->PopPseudoStackFrame(name); 1361 } 1362 } 1363 } 1364 1365 return handle; 1366 } 1367 1368 void TraceLog::AddMetadataEvent( 1369 const unsigned char* category_group_enabled, 1370 const char* name, 1371 int num_args, 1372 const char** arg_names, 1373 const unsigned char* arg_types, 1374 const unsigned long long* arg_values, 1375 std::unique_ptr<ConvertableToTraceFormat>* convertable_values, 1376 unsigned int flags) { 1377 HEAP_PROFILER_SCOPED_IGNORE; 1378 std::unique_ptr<TraceEvent> trace_event(new TraceEvent); 1379 int thread_id = static_cast<int>(base::PlatformThread::CurrentId()); 1380 ThreadTicks thread_now = ThreadNow(); 1381 TimeTicks now = OffsetNow(); 1382 AutoLock lock(lock_); 1383 trace_event->Initialize( 1384 thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA, 1385 category_group_enabled, name, 1386 trace_event_internal::kGlobalScope, // scope 1387 trace_event_internal::kNoId, // id 1388 trace_event_internal::kNoId, // bind_id 1389 num_args, arg_names, arg_types, arg_values, convertable_values, flags); 1390 metadata_events_.push_back(std::move(trace_event)); 1391 } 1392 1393 // May be called when a COMPELETE event ends and the unfinished event has been 1394 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). 1395 std::string TraceLog::EventToConsoleMessage(unsigned char phase, 1396 const TimeTicks& timestamp, 1397 TraceEvent* trace_event) { 1398 HEAP_PROFILER_SCOPED_IGNORE; 1399 AutoLock thread_info_lock(thread_info_lock_); 1400 1401 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to 1402 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END. 1403 DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE); 1404 1405 TimeDelta duration; 1406 int thread_id = 1407 trace_event ? trace_event->thread_id() : PlatformThread::CurrentId(); 1408 if (phase == TRACE_EVENT_PHASE_END) { 1409 duration = timestamp - thread_event_start_times_[thread_id].top(); 1410 thread_event_start_times_[thread_id].pop(); 1411 } 1412 1413 std::string thread_name = thread_names_[thread_id]; 1414 if (thread_colors_.find(thread_name) == thread_colors_.end()) 1415 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1; 1416 1417 std::ostringstream log; 1418 log << base::StringPrintf("%s: \x1b[0;3%dm", thread_name.c_str(), 1419 thread_colors_[thread_name]); 1420 1421 size_t depth = 0; 1422 if (thread_event_start_times_.find(thread_id) != 1423 thread_event_start_times_.end()) 1424 depth = thread_event_start_times_[thread_id].size(); 1425 1426 for (size_t i = 0; i < depth; ++i) 1427 log << "| "; 1428 1429 if (trace_event) 1430 trace_event->AppendPrettyPrinted(&log); 1431 if (phase == TRACE_EVENT_PHASE_END) 1432 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); 1433 1434 log << "\x1b[0;m"; 1435 1436 if (phase == TRACE_EVENT_PHASE_BEGIN) 1437 thread_event_start_times_[thread_id].push(timestamp); 1438 1439 return log.str(); 1440 } 1441 1442 void TraceLog::UpdateTraceEventDuration( 1443 const unsigned char* category_group_enabled, 1444 const char* name, 1445 TraceEventHandle handle) { 1446 char category_group_enabled_local = *category_group_enabled; 1447 if (!category_group_enabled_local) 1448 return; 1449 1450 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when 1451 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> 1452 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... 1453 if (thread_is_in_trace_event_.Get()) 1454 return; 1455 1456 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); 1457 1458 ThreadTicks thread_now = ThreadNow(); 1459 TimeTicks now = OffsetNow(); 1460 1461 #if defined(OS_WIN) 1462 // Generate an ETW event that marks the end of a complete event. 1463 if (category_group_enabled_local & ENABLED_FOR_ETW_EXPORT) 1464 TraceEventETWExport::AddCompleteEndEvent(name); 1465 #endif // OS_WIN 1466 1467 std::string console_message; 1468 if (category_group_enabled_local & ENABLED_FOR_RECORDING) { 1469 OptionalAutoLock lock(&lock_); 1470 1471 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock); 1472 if (trace_event) { 1473 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE); 1474 trace_event->UpdateDuration(now, thread_now); 1475 #if defined(OS_ANDROID) 1476 trace_event->SendToATrace(); 1477 #endif 1478 } 1479 1480 if (trace_options() & kInternalEchoToConsole) { 1481 console_message = 1482 EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event); 1483 } 1484 1485 if (AllocationContextTracker::capture_mode() == 1486 AllocationContextTracker::CaptureMode::PSEUDO_STACK) { 1487 // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|. 1488 AllocationContextTracker::GetInstanceForCurrentThread() 1489 ->PopPseudoStackFrame(name); 1490 } 1491 } 1492 1493 if (console_message.size()) 1494 LOG(ERROR) << console_message; 1495 1496 if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) { 1497 EventCallback event_callback = reinterpret_cast<EventCallback>( 1498 subtle::NoBarrier_Load(&event_callback_)); 1499 if (event_callback) { 1500 event_callback( 1501 now, TRACE_EVENT_PHASE_END, category_group_enabled, name, 1502 trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0, 1503 nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE); 1504 } 1505 } 1506 } 1507 1508 void TraceLog::SetWatchEvent(const std::string& category_name, 1509 const std::string& event_name, 1510 const WatchEventCallback& callback) { 1511 const unsigned char* category = 1512 GetCategoryGroupEnabled(category_name.c_str()); 1513 AutoLock lock(lock_); 1514 subtle::NoBarrier_Store(&watch_category_, 1515 reinterpret_cast<subtle::AtomicWord>(category)); 1516 watch_event_name_ = event_name; 1517 watch_event_callback_ = callback; 1518 } 1519 1520 void TraceLog::CancelWatchEvent() { 1521 AutoLock lock(lock_); 1522 subtle::NoBarrier_Store(&watch_category_, 0); 1523 watch_event_name_ = ""; 1524 watch_event_callback_.Reset(); 1525 } 1526 1527 uint64_t TraceLog::MangleEventId(uint64_t id) { 1528 return id ^ process_id_hash_; 1529 } 1530 1531 void TraceLog::AddMetadataEventsWhileLocked() { 1532 lock_.AssertAcquired(); 1533 1534 // Move metadata added by |AddMetadataEvent| into the trace log. 1535 while (!metadata_events_.empty()) { 1536 TraceEvent* event = AddEventToThreadSharedChunkWhileLocked(nullptr, false); 1537 event->MoveFrom(std::move(metadata_events_.back())); 1538 metadata_events_.pop_back(); 1539 } 1540 1541 #if !defined(OS_NACL) // NaCl shouldn't expose the process id. 1542 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), 1543 0, "num_cpus", "number", 1544 base::SysInfo::NumberOfProcessors()); 1545 #endif 1546 1547 int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId()); 1548 if (process_sort_index_ != 0) { 1549 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), 1550 current_thread_id, "process_sort_index", 1551 "sort_index", process_sort_index_); 1552 } 1553 1554 if (process_name_.size()) { 1555 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), 1556 current_thread_id, "process_name", "name", 1557 process_name_); 1558 } 1559 1560 if (process_labels_.size() > 0) { 1561 std::vector<std::string> labels; 1562 for (base::hash_map<int, std::string>::iterator it = 1563 process_labels_.begin(); 1564 it != process_labels_.end(); it++) { 1565 labels.push_back(it->second); 1566 } 1567 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), 1568 current_thread_id, "process_labels", "labels", 1569 base::JoinString(labels, ",")); 1570 } 1571 1572 // Thread sort indices. 1573 for (hash_map<int, int>::iterator it = thread_sort_indices_.begin(); 1574 it != thread_sort_indices_.end(); it++) { 1575 if (it->second == 0) 1576 continue; 1577 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), 1578 it->first, "thread_sort_index", "sort_index", 1579 it->second); 1580 } 1581 1582 // Thread names. 1583 AutoLock thread_info_lock(thread_info_lock_); 1584 for (hash_map<int, std::string>::iterator it = thread_names_.begin(); 1585 it != thread_names_.end(); it++) { 1586 if (it->second.empty()) 1587 continue; 1588 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), 1589 it->first, "thread_name", "name", it->second); 1590 } 1591 1592 // If buffer is full, add a metadata record to report this. 1593 if (!buffer_limit_reached_timestamp_.is_null()) { 1594 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), 1595 current_thread_id, "trace_buffer_overflowed", 1596 "overflowed_at_ts", 1597 buffer_limit_reached_timestamp_); 1598 } 1599 } 1600 1601 void TraceLog::WaitSamplingEventForTesting() { 1602 if (!sampling_thread_) 1603 return; 1604 sampling_thread_->WaitSamplingEventForTesting(); 1605 } 1606 1607 void TraceLog::DeleteForTesting() { 1608 internal::DeleteTraceLogForTesting::Delete(); 1609 } 1610 1611 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { 1612 return GetEventByHandleInternal(handle, NULL); 1613 } 1614 1615 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle, 1616 OptionalAutoLock* lock) { 1617 if (!handle.chunk_seq) 1618 return NULL; 1619 1620 if (thread_local_event_buffer_.Get()) { 1621 TraceEvent* trace_event = 1622 thread_local_event_buffer_.Get()->GetEventByHandle(handle); 1623 if (trace_event) 1624 return trace_event; 1625 } 1626 1627 // The event has been out-of-control of the thread local buffer. 1628 // Try to get the event from the main buffer with a lock. 1629 if (lock) 1630 lock->EnsureAcquired(); 1631 1632 if (thread_shared_chunk_ && 1633 handle.chunk_index == thread_shared_chunk_index_) { 1634 return handle.chunk_seq == thread_shared_chunk_->seq() 1635 ? thread_shared_chunk_->GetEventAt(handle.event_index) 1636 : NULL; 1637 } 1638 1639 return logged_events_->GetEventByHandle(handle); 1640 } 1641 1642 void TraceLog::SetProcessID(int process_id) { 1643 process_id_ = process_id; 1644 // Create a FNV hash from the process ID for XORing. 1645 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details. 1646 unsigned long long offset_basis = 14695981039346656037ull; 1647 unsigned long long fnv_prime = 1099511628211ull; 1648 unsigned long long pid = static_cast<unsigned long long>(process_id_); 1649 process_id_hash_ = (offset_basis ^ pid) * fnv_prime; 1650 } 1651 1652 void TraceLog::SetProcessSortIndex(int sort_index) { 1653 AutoLock lock(lock_); 1654 process_sort_index_ = sort_index; 1655 } 1656 1657 void TraceLog::SetProcessName(const std::string& process_name) { 1658 AutoLock lock(lock_); 1659 process_name_ = process_name; 1660 } 1661 1662 void TraceLog::UpdateProcessLabel(int label_id, 1663 const std::string& current_label) { 1664 if (!current_label.length()) 1665 return RemoveProcessLabel(label_id); 1666 1667 AutoLock lock(lock_); 1668 process_labels_[label_id] = current_label; 1669 } 1670 1671 void TraceLog::RemoveProcessLabel(int label_id) { 1672 AutoLock lock(lock_); 1673 base::hash_map<int, std::string>::iterator it = 1674 process_labels_.find(label_id); 1675 if (it == process_labels_.end()) 1676 return; 1677 1678 process_labels_.erase(it); 1679 } 1680 1681 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) { 1682 AutoLock lock(lock_); 1683 thread_sort_indices_[static_cast<int>(thread_id)] = sort_index; 1684 } 1685 1686 void TraceLog::SetTimeOffset(TimeDelta offset) { 1687 time_offset_ = offset; 1688 } 1689 1690 size_t TraceLog::GetObserverCountForTest() const { 1691 return enabled_state_observer_list_.size(); 1692 } 1693 1694 void TraceLog::SetCurrentThreadBlocksMessageLoop() { 1695 thread_blocks_message_loop_.Set(true); 1696 if (thread_local_event_buffer_.Get()) { 1697 // This will flush the thread local buffer. 1698 delete thread_local_event_buffer_.Get(); 1699 } 1700 } 1701 1702 TraceBuffer* TraceLog::CreateTraceBuffer() { 1703 HEAP_PROFILER_SCOPED_IGNORE; 1704 InternalTraceOptions options = trace_options(); 1705 if (options & kInternalRecordContinuously) 1706 return TraceBuffer::CreateTraceBufferRingBuffer( 1707 kTraceEventRingBufferChunks); 1708 else if (options & kInternalEchoToConsole) 1709 return TraceBuffer::CreateTraceBufferRingBuffer( 1710 kEchoToConsoleTraceEventBufferChunks); 1711 else if (options & kInternalRecordAsMuchAsPossible) 1712 return TraceBuffer::CreateTraceBufferVectorOfSize( 1713 kTraceEventVectorBigBufferChunks); 1714 return TraceBuffer::CreateTraceBufferVectorOfSize( 1715 kTraceEventVectorBufferChunks); 1716 } 1717 1718 #if defined(OS_WIN) 1719 void TraceLog::UpdateETWCategoryGroupEnabledFlags() { 1720 AutoLock lock(lock_); 1721 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); 1722 // Go through each category and set/clear the ETW bit depending on whether the 1723 // category is enabled. 1724 for (size_t i = 0; i < category_index; i++) { 1725 const char* category_group = g_category_groups[i]; 1726 DCHECK(category_group); 1727 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( 1728 category_group)) { 1729 g_category_group_enabled[i] |= ENABLED_FOR_ETW_EXPORT; 1730 } else { 1731 g_category_group_enabled[i] &= ~ENABLED_FOR_ETW_EXPORT; 1732 } 1733 } 1734 } 1735 #endif // defined(OS_WIN) 1736 1737 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead( 1738 TraceEventMemoryOverhead* overhead) { 1739 overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this)); 1740 } 1741 1742 void TraceLog::AddAsyncEnabledStateObserver( 1743 WeakPtr<AsyncEnabledStateObserver> listener) { 1744 AutoLock lock(lock_); 1745 async_observers_.insert( 1746 std::make_pair(listener.get(), RegisteredAsyncObserver(listener))); 1747 } 1748 1749 void TraceLog::RemoveAsyncEnabledStateObserver( 1750 AsyncEnabledStateObserver* listener) { 1751 AutoLock lock(lock_); 1752 async_observers_.erase(listener); 1753 } 1754 1755 bool TraceLog::HasAsyncEnabledStateObserver( 1756 AsyncEnabledStateObserver* listener) const { 1757 AutoLock lock(lock_); 1758 return ContainsKey(async_observers_, listener); 1759 } 1760 1761 } // namespace trace_event 1762 } // namespace base 1763 1764 namespace trace_event_internal { 1765 1766 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient( 1767 const char* category_group, 1768 const char* name) { 1769 // The single atom works because for now the category_group can only be "gpu". 1770 DCHECK_EQ(strcmp(category_group, "gpu"), 0); 1771 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0; 1772 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( 1773 category_group, atomic, category_group_enabled_); 1774 name_ = name; 1775 if (*category_group_enabled_) { 1776 event_handle_ = 1777 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP( 1778 TRACE_EVENT_PHASE_COMPLETE, 1779 category_group_enabled_, 1780 name, 1781 trace_event_internal::kGlobalScope, // scope 1782 trace_event_internal::kNoId, // id 1783 static_cast<int>(base::PlatformThread::CurrentId()), // thread_id 1784 base::TimeTicks::Now(), 1785 trace_event_internal::kZeroNumArgs, 1786 nullptr, 1787 nullptr, 1788 nullptr, 1789 nullptr, 1790 TRACE_EVENT_FLAG_NONE); 1791 } 1792 } 1793 1794 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 1795 if (*category_group_enabled_) { 1796 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, 1797 event_handle_); 1798 } 1799 } 1800 1801 } // namespace trace_event_internal 1802