Home | History | Annotate | Download | only in trace_event
      1 // Copyright 2015 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/trace_event/trace_log.h"
      6 
      7 #include <algorithm>
      8 #include <cmath>
      9 #include <memory>
     10 #include <utility>
     11 
     12 #include "base/base_switches.h"
     13 #include "base/bind.h"
     14 #include "base/command_line.h"
     15 #include "base/debug/leak_annotations.h"
     16 #include "base/location.h"
     17 #include "base/macros.h"
     18 #include "base/memory/ptr_util.h"
     19 #include "base/memory/ref_counted_memory.h"
     20 #include "base/memory/singleton.h"
     21 #include "base/message_loop/message_loop.h"
     22 #include "base/process/process_info.h"
     23 #include "base/process/process_metrics.h"
     24 #include "base/stl_util.h"
     25 #include "base/strings/string_piece.h"
     26 #include "base/strings/string_split.h"
     27 #include "base/strings/string_tokenizer.h"
     28 #include "base/strings/stringprintf.h"
     29 #include "base/sys_info.h"
     30 // post_task.h pulls in a lot of code not needed on Arc++.
     31 #if 0
     32 #include "base/task_scheduler/post_task.h"
     33 #endif
     34 #include "base/threading/platform_thread.h"
     35 #include "base/threading/thread_id_name_manager.h"
     36 #include "base/threading/thread_task_runner_handle.h"
     37 #include "base/time/time.h"
     38 #include "base/trace_event/category_registry.h"
     39 #include "base/trace_event/event_name_filter.h"
     40 #include "base/trace_event/heap_profiler.h"
     41 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
     42 #include "base/trace_event/heap_profiler_event_filter.h"
     43 #include "base/trace_event/memory_dump_manager.h"
     44 #include "base/trace_event/memory_dump_provider.h"
     45 #include "base/trace_event/process_memory_dump.h"
     46 #include "base/trace_event/trace_buffer.h"
     47 #include "base/trace_event/trace_event.h"
     48 #include "base/trace_event/trace_event_synthetic_delay.h"
     49 #include "build/build_config.h"
     50 
     51 #if defined(OS_WIN)
     52 #include "base/trace_event/trace_event_etw_export_win.h"
     53 #endif
     54 
     55 namespace base {
     56 namespace internal {
     57 
     58 class DeleteTraceLogForTesting {
     59  public:
     60   static void Delete() {
     61     Singleton<trace_event::TraceLog,
     62               LeakySingletonTraits<trace_event::TraceLog>>::OnExit(0);
     63   }
     64 };
     65 
     66 }  // namespace internal
     67 
     68 namespace trace_event {
     69 
     70 namespace {
     71 
     72 // Controls the number of trace events we will buffer in-memory
     73 // before throwing them away.
     74 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
     75 
     76 const size_t kTraceEventVectorBigBufferChunks =
     77     512000000 / kTraceBufferChunkSize;
     78 static_assert(
     79     kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
     80     "Too many big buffer chunks");
     81 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
     82 static_assert(
     83     kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
     84     "Too many vector buffer chunks");
     85 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
     86 
     87 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
     88 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
     89 
     90 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
     91 const int kThreadFlushTimeoutMs = 3000;
     92 
     93 #define MAX_TRACE_EVENT_FILTERS 32
     94 
     95 // List of TraceEventFilter objects from the most recent tracing session.
     96 std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
     97   static auto* filters = new std::vector<std::unique_ptr<TraceEventFilter>>();
     98   return *filters;
     99 }
    100 
    101 ThreadTicks ThreadNow() {
    102   return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
    103 }
    104 
    105 template <typename T>
    106 void InitializeMetadataEvent(TraceEvent* trace_event,
    107                              int thread_id,
    108                              const char* metadata_name,
    109                              const char* arg_name,
    110                              const T& value) {
    111   if (!trace_event)
    112     return;
    113 
    114   int num_args = 1;
    115   unsigned char arg_type;
    116   unsigned long long arg_value;
    117   ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
    118   trace_event->Initialize(
    119       thread_id,
    120       TimeTicks(),
    121       ThreadTicks(),
    122       TRACE_EVENT_PHASE_METADATA,
    123       CategoryRegistry::kCategoryMetadata->state_ptr(),
    124       metadata_name,
    125       trace_event_internal::kGlobalScope,  // scope
    126       trace_event_internal::kNoId,  // id
    127       trace_event_internal::kNoId,  // bind_id
    128       num_args,
    129       &arg_name,
    130       &arg_type,
    131       &arg_value,
    132       nullptr,
    133       TRACE_EVENT_FLAG_NONE);
    134 }
    135 
    136 class AutoThreadLocalBoolean {
    137  public:
    138   explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
    139       : thread_local_boolean_(thread_local_boolean) {
    140     DCHECK(!thread_local_boolean_->Get());
    141     thread_local_boolean_->Set(true);
    142   }
    143   ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
    144 
    145  private:
    146   ThreadLocalBoolean* thread_local_boolean_;
    147   DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
    148 };
    149 
    150 // Use this function instead of TraceEventHandle constructor to keep the
    151 // overhead of ScopedTracer (trace_event.h) constructor minimum.
    152 void MakeHandle(uint32_t chunk_seq,
    153                 size_t chunk_index,
    154                 size_t event_index,
    155                 TraceEventHandle* handle) {
    156   DCHECK(chunk_seq);
    157   DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
    158   DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
    159   DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
    160   handle->chunk_seq = chunk_seq;
    161   handle->chunk_index = static_cast<uint16_t>(chunk_index);
    162   handle->event_index = static_cast<uint16_t>(event_index);
    163 }
    164 
    165 template <typename Function>
    166 void ForEachCategoryFilter(const unsigned char* category_group_enabled,
    167                            Function filter_fn) {
    168   const TraceCategory* category =
    169       CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
    170   uint32_t filter_bitmap = category->enabled_filters();
    171   for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
    172     if (filter_bitmap & 1 && GetCategoryGroupFilters()[index])
    173       filter_fn(GetCategoryGroupFilters()[index].get());
    174   }
    175 }
    176 
    177 }  // namespace
    178 
    179 // A helper class that allows the lock to be acquired in the middle of the scope
    180 // and unlocks at the end of scope if locked.
    181 class TraceLog::OptionalAutoLock {
    182  public:
    183   explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
    184 
    185   ~OptionalAutoLock() {
    186     if (locked_)
    187       lock_->Release();
    188   }
    189 
    190   void EnsureAcquired() {
    191     if (!locked_) {
    192       lock_->Acquire();
    193       locked_ = true;
    194     }
    195   }
    196 
    197  private:
    198   Lock* lock_;
    199   bool locked_;
    200   DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
    201 };
    202 
    203 class TraceLog::ThreadLocalEventBuffer
    204     : public MessageLoop::DestructionObserver,
    205       public MemoryDumpProvider {
    206  public:
    207   explicit ThreadLocalEventBuffer(TraceLog* trace_log);
    208   ~ThreadLocalEventBuffer() override;
    209 
    210   TraceEvent* AddTraceEvent(TraceEventHandle* handle);
    211 
    212   TraceEvent* GetEventByHandle(TraceEventHandle handle) {
    213     if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
    214         handle.chunk_index != chunk_index_) {
    215       return nullptr;
    216     }
    217 
    218     return chunk_->GetEventAt(handle.event_index);
    219   }
    220 
    221   int generation() const { return generation_; }
    222 
    223  private:
    224   // MessageLoop::DestructionObserver
    225   void WillDestroyCurrentMessageLoop() override;
    226 
    227   // MemoryDumpProvider implementation.
    228   bool OnMemoryDump(const MemoryDumpArgs& args,
    229                     ProcessMemoryDump* pmd) override;
    230 
    231   void FlushWhileLocked();
    232 
    233   void CheckThisIsCurrentBuffer() const {
    234     DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
    235   }
    236 
    237   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
    238   // as long as the thread exists.
    239   TraceLog* trace_log_;
    240   std::unique_ptr<TraceBufferChunk> chunk_;
    241   size_t chunk_index_;
    242   int generation_;
    243 
    244   DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
    245 };
    246 
    247 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
    248     : trace_log_(trace_log),
    249       chunk_index_(0),
    250       generation_(trace_log->generation()) {
    251   // ThreadLocalEventBuffer is created only if the thread has a message loop, so
    252   // the following message_loop won't be NULL.
    253   MessageLoop* message_loop = MessageLoop::current();
    254   message_loop->AddDestructionObserver(this);
    255 
    256   // This is to report the local memory usage when memory-infra is enabled.
    257   MemoryDumpManager::GetInstance()->RegisterDumpProvider(
    258       this, "ThreadLocalEventBuffer", ThreadTaskRunnerHandle::Get());
    259 
    260   AutoLock lock(trace_log->lock_);
    261   trace_log->thread_message_loops_.insert(message_loop);
    262 }
    263 
    264 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
    265   CheckThisIsCurrentBuffer();
    266   MessageLoop::current()->RemoveDestructionObserver(this);
    267   MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
    268 
    269   {
    270     AutoLock lock(trace_log_->lock_);
    271     FlushWhileLocked();
    272     trace_log_->thread_message_loops_.erase(MessageLoop::current());
    273   }
    274   trace_log_->thread_local_event_buffer_.Set(NULL);
    275 }
    276 
    277 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
    278     TraceEventHandle* handle) {
    279   CheckThisIsCurrentBuffer();
    280 
    281   if (chunk_ && chunk_->IsFull()) {
    282     AutoLock lock(trace_log_->lock_);
    283     FlushWhileLocked();
    284     chunk_.reset();
    285   }
    286   if (!chunk_) {
    287     AutoLock lock(trace_log_->lock_);
    288     chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
    289     trace_log_->CheckIfBufferIsFullWhileLocked();
    290   }
    291   if (!chunk_)
    292     return NULL;
    293 
    294   size_t event_index;
    295   TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
    296   if (trace_event && handle)
    297     MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
    298 
    299   return trace_event;
    300 }
    301 
    302 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
    303   delete this;
    304 }
    305 
    306 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
    307                                                     ProcessMemoryDump* pmd) {
    308   if (!chunk_)
    309     return true;
    310   std::string dump_base_name = StringPrintf(
    311       "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
    312   TraceEventMemoryOverhead overhead;
    313   chunk_->EstimateTraceMemoryOverhead(&overhead);
    314   overhead.DumpInto(dump_base_name.c_str(), pmd);
    315   return true;
    316 }
    317 
    318 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
    319   if (!chunk_)
    320     return;
    321 
    322   trace_log_->lock_.AssertAcquired();
    323   if (trace_log_->CheckGeneration(generation_)) {
    324     // Return the chunk to the buffer only if the generation matches.
    325     trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
    326   }
    327   // Otherwise this method may be called from the destructor, or TraceLog will
    328   // find the generation mismatch and delete this buffer soon.
    329 }
    330 
    331 struct TraceLog::RegisteredAsyncObserver {
    332   explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
    333       : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
    334   ~RegisteredAsyncObserver() {}
    335 
    336   WeakPtr<AsyncEnabledStateObserver> observer;
    337   scoped_refptr<SequencedTaskRunner> task_runner;
    338 };
    339 
    340 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
    341 
    342 TraceLogStatus::~TraceLogStatus() {}
    343 
    344 // static
    345 TraceLog* TraceLog::GetInstance() {
    346   return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get();
    347 }
    348 
    349 TraceLog::TraceLog()
    350     : enabled_modes_(0),
    351       num_traces_recorded_(0),
    352       dispatching_to_observer_list_(false),
    353       process_sort_index_(0),
    354       process_id_hash_(0),
    355       process_id_(0),
    356       trace_options_(kInternalRecordUntilFull),
    357       trace_config_(TraceConfig()),
    358       thread_shared_chunk_index_(0),
    359       generation_(0),
    360       use_worker_thread_(false),
    361       filter_factory_for_testing_(nullptr) {
    362   CategoryRegistry::Initialize();
    363 
    364 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
    365   SetProcessID(0);
    366 #else
    367   SetProcessID(static_cast<int>(GetCurrentProcId()));
    368 #endif
    369 
    370   logged_events_.reset(CreateTraceBuffer());
    371 
    372   MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
    373                                                          nullptr);
    374 }
    375 
    376 TraceLog::~TraceLog() {}
    377 
    378 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
    379   // A ThreadLocalEventBuffer needs the message loop
    380   // - to know when the thread exits;
    381   // - to handle the final flush.
    382   // For a thread without a message loop or the message loop may be blocked, the
    383   // trace events will be added into the main buffer directly.
    384   if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
    385     return;
    386   HEAP_PROFILER_SCOPED_IGNORE;
    387   auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
    388   if (thread_local_event_buffer &&
    389       !CheckGeneration(thread_local_event_buffer->generation())) {
    390     delete thread_local_event_buffer;
    391     thread_local_event_buffer = NULL;
    392   }
    393   if (!thread_local_event_buffer) {
    394     thread_local_event_buffer = new ThreadLocalEventBuffer(this);
    395     thread_local_event_buffer_.Set(thread_local_event_buffer);
    396   }
    397 }
    398 
    399 bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
    400                             ProcessMemoryDump* pmd) {
    401   // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
    402   // (crbug.com/499731).
    403   TraceEventMemoryOverhead overhead;
    404   overhead.Add("TraceLog", sizeof(*this));
    405   {
    406     AutoLock lock(lock_);
    407     if (logged_events_)
    408       logged_events_->EstimateTraceMemoryOverhead(&overhead);
    409 
    410     for (auto& metadata_event : metadata_events_)
    411       metadata_event->EstimateTraceMemoryOverhead(&overhead);
    412   }
    413   overhead.AddSelf();
    414   overhead.DumpInto("tracing/main_trace_log", pmd);
    415   return true;
    416 }
    417 
    418 const unsigned char* TraceLog::GetCategoryGroupEnabled(
    419     const char* category_group) {
    420   TraceLog* tracelog = GetInstance();
    421   if (!tracelog) {
    422     DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
    423     return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
    424   }
    425   TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
    426   if (!category) {
    427     // Slow path: in the case of a new category we have to repeat the check
    428     // holding the lock, as multiple threads might have reached this point
    429     // at the same time.
    430     auto category_initializer = [](TraceCategory* category) {
    431       TraceLog::GetInstance()->UpdateCategoryState(category);
    432     };
    433     AutoLock lock(tracelog->lock_);
    434     CategoryRegistry::GetOrCreateCategoryLocked(
    435         category_group, category_initializer, &category);
    436   }
    437   DCHECK(category->state_ptr());
    438   return category->state_ptr();
    439 }
    440 
    441 const char* TraceLog::GetCategoryGroupName(
    442     const unsigned char* category_group_enabled) {
    443   return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
    444       ->name();
    445 }
    446 
    447 void TraceLog::UpdateCategoryState(TraceCategory* category) {
    448   lock_.AssertAcquired();
    449   DCHECK(category->is_valid());
    450   unsigned char state_flags = 0;
    451   if (enabled_modes_ & RECORDING_MODE &&
    452       trace_config_.IsCategoryGroupEnabled(category->name())) {
    453     state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
    454   }
    455 
    456   // TODO(primiano): this is a temporary workaround for catapult:#2341,
    457   // to guarantee that metadata events are always added even if the category
    458   // filter is "-*". See crbug.com/618054 for more details and long-term fix.
    459   if (enabled_modes_ & RECORDING_MODE &&
    460       category == CategoryRegistry::kCategoryMetadata) {
    461     state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
    462   }
    463 
    464 #if defined(OS_WIN)
    465   if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
    466           category->name())) {
    467     state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
    468   }
    469 #endif
    470 
    471   uint32_t enabled_filters_bitmap = 0;
    472   int index = 0;
    473   for (const auto& event_filter : enabled_event_filters_) {
    474     if (event_filter.IsCategoryGroupEnabled(category->name())) {
    475       state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
    476       DCHECK(GetCategoryGroupFilters()[index]);
    477       enabled_filters_bitmap |= 1 << index;
    478     }
    479     if (index++ >= MAX_TRACE_EVENT_FILTERS) {
    480       NOTREACHED();
    481       break;
    482     }
    483   }
    484   category->set_enabled_filters(enabled_filters_bitmap);
    485   category->set_state(state_flags);
    486 }
    487 
    488 void TraceLog::UpdateCategoryRegistry() {
    489   lock_.AssertAcquired();
    490   CreateFiltersForTraceConfig();
    491   for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
    492     UpdateCategoryState(&category);
    493   }
    494 }
    495 
    496 void TraceLog::CreateFiltersForTraceConfig() {
    497   if (!(enabled_modes_ & FILTERING_MODE))
    498     return;
    499 
    500   // Filters were already added and tracing could be enabled. Filters list
    501   // cannot be changed when trace events are using them.
    502   if (GetCategoryGroupFilters().size())
    503     return;
    504 
    505   for (auto& filter_config : enabled_event_filters_) {
    506     if (GetCategoryGroupFilters().size() >= MAX_TRACE_EVENT_FILTERS) {
    507       NOTREACHED()
    508           << "Too many trace event filters installed in the current session";
    509       break;
    510     }
    511 
    512     std::unique_ptr<TraceEventFilter> new_filter;
    513     const std::string& predicate_name = filter_config.predicate_name();
    514     if (predicate_name == EventNameFilter::kName) {
    515       auto whitelist = MakeUnique<std::unordered_set<std::string>>();
    516       CHECK(filter_config.GetArgAsSet("event_name_whitelist", &*whitelist));
    517       new_filter = MakeUnique<EventNameFilter>(std::move(whitelist));
    518     } else if (predicate_name == HeapProfilerEventFilter::kName) {
    519       new_filter = MakeUnique<HeapProfilerEventFilter>();
    520     } else {
    521       if (filter_factory_for_testing_)
    522         new_filter = filter_factory_for_testing_(predicate_name);
    523       CHECK(new_filter) << "Unknown trace filter " << predicate_name;
    524     }
    525     GetCategoryGroupFilters().push_back(std::move(new_filter));
    526   }
    527 }
    528 
    529 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
    530   ResetTraceEventSyntheticDelays();
    531   const TraceConfig::StringList& delays =
    532       trace_config_.GetSyntheticDelayValues();
    533   TraceConfig::StringList::const_iterator ci;
    534   for (ci = delays.begin(); ci != delays.end(); ++ci) {
    535     StringTokenizer tokens(*ci, ";");
    536     if (!tokens.GetNext())
    537       continue;
    538     TraceEventSyntheticDelay* delay =
    539         TraceEventSyntheticDelay::Lookup(tokens.token());
    540     while (tokens.GetNext()) {
    541       std::string token = tokens.token();
    542       char* duration_end;
    543       double target_duration = strtod(token.c_str(), &duration_end);
    544       if (duration_end != token.c_str()) {
    545         delay->SetTargetDuration(TimeDelta::FromMicroseconds(
    546             static_cast<int64_t>(target_duration * 1e6)));
    547       } else if (token == "static") {
    548         delay->SetMode(TraceEventSyntheticDelay::STATIC);
    549       } else if (token == "oneshot") {
    550         delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
    551       } else if (token == "alternating") {
    552         delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
    553       }
    554     }
    555   }
    556 }
    557 
    558 void TraceLog::GetKnownCategoryGroups(
    559     std::vector<std::string>* category_groups) {
    560   for (const auto& category : CategoryRegistry::GetAllCategories()) {
    561     if (!CategoryRegistry::IsBuiltinCategory(&category))
    562       category_groups->push_back(category.name());
    563   }
    564 }
    565 
    566 void TraceLog::SetEnabled(const TraceConfig& trace_config,
    567                           uint8_t modes_to_enable) {
    568   std::vector<EnabledStateObserver*> observer_list;
    569   std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
    570   {
    571     AutoLock lock(lock_);
    572 
    573     // Can't enable tracing when Flush() is in progress.
    574     DCHECK(!flush_task_runner_);
    575 
    576     InternalTraceOptions new_options =
    577         GetInternalOptionsFromTraceConfig(trace_config);
    578 
    579     InternalTraceOptions old_options = trace_options();
    580 
    581     if (dispatching_to_observer_list_) {
    582       // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
    583       DLOG(ERROR)
    584           << "Cannot manipulate TraceLog::Enabled state from an observer.";
    585       return;
    586     }
    587 
    588     // Clear all filters from previous tracing session. These filters are not
    589     // cleared at the end of tracing because some threads which hit trace event
    590     // when disabling, could try to use the filters.
    591     if (!enabled_modes_)
    592       GetCategoryGroupFilters().clear();
    593 
    594     // Update trace config for recording.
    595     const bool already_recording = enabled_modes_ & RECORDING_MODE;
    596     if (modes_to_enable & RECORDING_MODE) {
    597       if (already_recording) {
    598         // TODO(ssid): Stop suporting enabling of RECODING_MODE when already
    599         // enabled crbug.com/625170.
    600         DCHECK_EQ(new_options, old_options) << "Attempting to re-enable "
    601                                                "tracing with a different set "
    602                                                "of options.";
    603         trace_config_.Merge(trace_config);
    604       } else {
    605         trace_config_ = trace_config;
    606       }
    607     }
    608 
    609     // Update event filters.
    610     if (modes_to_enable & FILTERING_MODE) {
    611       DCHECK(!trace_config.event_filters().empty())
    612           << "Attempting to enable filtering without any filters";
    613       DCHECK(enabled_event_filters_.empty()) << "Attempting to re-enable "
    614                                                 "filtering when filters are "
    615                                                 "already enabled.";
    616 
    617       // Use the given event filters only if filtering was not enabled.
    618       if (enabled_event_filters_.empty())
    619         enabled_event_filters_ = trace_config.event_filters();
    620     }
    621     // Keep the |trace_config_| updated with only enabled filters in case anyone
    622     // tries to read it using |GetCurrentTraceConfig| (even if filters are
    623     // empty).
    624     trace_config_.SetEventFilters(enabled_event_filters_);
    625 
    626     enabled_modes_ |= modes_to_enable;
    627     UpdateCategoryRegistry();
    628 
    629     // Do not notify observers or create trace buffer if only enabled for
    630     // filtering or if recording was already enabled.
    631     if (!(modes_to_enable & RECORDING_MODE) || already_recording)
    632       return;
    633 
    634     if (new_options != old_options) {
    635       subtle::NoBarrier_Store(&trace_options_, new_options);
    636       UseNextTraceBuffer();
    637     }
    638 
    639     num_traces_recorded_++;
    640 
    641     UpdateCategoryRegistry();
    642     UpdateSyntheticDelaysFromTraceConfig();
    643 
    644     dispatching_to_observer_list_ = true;
    645     observer_list = enabled_state_observer_list_;
    646     observer_map = async_observers_;
    647   }
    648   // Notify observers outside the lock in case they trigger trace events.
    649   for (EnabledStateObserver* observer : observer_list)
    650     observer->OnTraceLogEnabled();
    651   for (const auto& it : observer_map) {
    652     it.second.task_runner->PostTask(
    653         FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled,
    654                         it.second.observer));
    655   }
    656 
    657   {
    658     AutoLock lock(lock_);
    659     dispatching_to_observer_list_ = false;
    660   }
    661 }
    662 
    663 void TraceLog::SetArgumentFilterPredicate(
    664     const ArgumentFilterPredicate& argument_filter_predicate) {
    665   AutoLock lock(lock_);
    666   DCHECK(!argument_filter_predicate.is_null());
    667   DCHECK(argument_filter_predicate_.is_null());
    668   argument_filter_predicate_ = argument_filter_predicate;
    669 }
    670 
    671 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
    672     const TraceConfig& config) {
    673   InternalTraceOptions ret = config.IsArgumentFilterEnabled()
    674                                  ? kInternalEnableArgumentFilter
    675                                  : kInternalNone;
    676   switch (config.GetTraceRecordMode()) {
    677     case RECORD_UNTIL_FULL:
    678       return ret | kInternalRecordUntilFull;
    679     case RECORD_CONTINUOUSLY:
    680       return ret | kInternalRecordContinuously;
    681     case ECHO_TO_CONSOLE:
    682       return ret | kInternalEchoToConsole;
    683     case RECORD_AS_MUCH_AS_POSSIBLE:
    684       return ret | kInternalRecordAsMuchAsPossible;
    685   }
    686   NOTREACHED();
    687   return kInternalNone;
    688 }
    689 
    690 TraceConfig TraceLog::GetCurrentTraceConfig() const {
    691   AutoLock lock(lock_);
    692   return trace_config_;
    693 }
    694 
    695 void TraceLog::SetDisabled() {
    696   AutoLock lock(lock_);
    697   SetDisabledWhileLocked(RECORDING_MODE);
    698 }
    699 
    700 void TraceLog::SetDisabled(uint8_t modes_to_disable) {
    701   AutoLock lock(lock_);
    702   SetDisabledWhileLocked(modes_to_disable);
    703 }
    704 
    705 void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
    706   lock_.AssertAcquired();
    707 
    708   if (!(enabled_modes_ & modes_to_disable))
    709     return;
    710 
    711   if (dispatching_to_observer_list_) {
    712     // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
    713     DLOG(ERROR)
    714         << "Cannot manipulate TraceLog::Enabled state from an observer.";
    715     return;
    716   }
    717 
    718   bool is_recording_mode_disabled =
    719       (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE);
    720   enabled_modes_ &= ~modes_to_disable;
    721 
    722   if (modes_to_disable & FILTERING_MODE)
    723     enabled_event_filters_.clear();
    724 
    725   if (modes_to_disable & RECORDING_MODE)
    726     trace_config_.Clear();
    727 
    728   UpdateCategoryRegistry();
    729 
    730   // Add metadata events and notify observers only if recording mode was
    731   // disabled now.
    732   if (!is_recording_mode_disabled)
    733     return;
    734 
    735   AddMetadataEventsWhileLocked();
    736 
    737   // Remove metadata events so they will not get added to a subsequent trace.
    738   metadata_events_.clear();
    739 
    740   dispatching_to_observer_list_ = true;
    741   std::vector<EnabledStateObserver*> observer_list =
    742       enabled_state_observer_list_;
    743   std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map =
    744       async_observers_;
    745 
    746   {
    747     // Dispatch to observers outside the lock in case the observer triggers a
    748     // trace event.
    749     AutoUnlock unlock(lock_);
    750     for (EnabledStateObserver* observer : observer_list)
    751       observer->OnTraceLogDisabled();
    752     for (const auto& it : observer_map) {
    753       it.second.task_runner->PostTask(
    754           FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled,
    755                           it.second.observer));
    756     }
    757   }
    758   dispatching_to_observer_list_ = false;
    759 }
    760 
    761 int TraceLog::GetNumTracesRecorded() {
    762   AutoLock lock(lock_);
    763   if (!IsEnabled())
    764     return -1;
    765   return num_traces_recorded_;
    766 }
    767 
    768 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
    769   AutoLock lock(lock_);
    770   enabled_state_observer_list_.push_back(listener);
    771 }
    772 
    773 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
    774   AutoLock lock(lock_);
    775   std::vector<EnabledStateObserver*>::iterator it =
    776       std::find(enabled_state_observer_list_.begin(),
    777                 enabled_state_observer_list_.end(), listener);
    778   if (it != enabled_state_observer_list_.end())
    779     enabled_state_observer_list_.erase(it);
    780 }
    781 
    782 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
    783   AutoLock lock(lock_);
    784   return ContainsValue(enabled_state_observer_list_, listener);
    785 }
    786 
    787 TraceLogStatus TraceLog::GetStatus() const {
    788   AutoLock lock(lock_);
    789   TraceLogStatus result;
    790   result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
    791   result.event_count = static_cast<uint32_t>(logged_events_->Size());
    792   return result;
    793 }
    794 
    795 bool TraceLog::BufferIsFull() const {
    796   AutoLock lock(lock_);
    797   return logged_events_->IsFull();
    798 }
    799 
    800 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
    801     TraceEventHandle* handle,
    802     bool check_buffer_is_full) {
    803   lock_.AssertAcquired();
    804 
    805   if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
    806     logged_events_->ReturnChunk(thread_shared_chunk_index_,
    807                                 std::move(thread_shared_chunk_));
    808   }
    809 
    810   if (!thread_shared_chunk_) {
    811     thread_shared_chunk_ =
    812         logged_events_->GetChunk(&thread_shared_chunk_index_);
    813     if (check_buffer_is_full)
    814       CheckIfBufferIsFullWhileLocked();
    815   }
    816   if (!thread_shared_chunk_)
    817     return NULL;
    818 
    819   size_t event_index;
    820   TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
    821   if (trace_event && handle) {
    822     MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
    823                event_index, handle);
    824   }
    825   return trace_event;
    826 }
    827 
    828 void TraceLog::CheckIfBufferIsFullWhileLocked() {
    829   lock_.AssertAcquired();
    830   if (logged_events_->IsFull()) {
    831     if (buffer_limit_reached_timestamp_.is_null()) {
    832       buffer_limit_reached_timestamp_ = OffsetNow();
    833     }
    834     SetDisabledWhileLocked(RECORDING_MODE);
    835   }
    836 }
    837 
    838 // Flush() works as the following:
    839 // 1. Flush() is called in thread A whose task runner is saved in
    840 //    flush_task_runner_;
    841 // 2. If thread_message_loops_ is not empty, thread A posts task to each message
    842 //    loop to flush the thread local buffers; otherwise finish the flush;
    843 // 3. FlushCurrentThread() deletes the thread local event buffer:
    844 //    - The last batch of events of the thread are flushed into the main buffer;
    845 //    - The message loop will be removed from thread_message_loops_;
    846 //    If this is the last message loop, finish the flush;
    847 // 4. If any thread hasn't finish its flush in time, finish the flush.
    848 void TraceLog::Flush(const TraceLog::OutputCallback& cb,
    849                      bool use_worker_thread) {
    850   FlushInternal(cb, use_worker_thread, false);
    851 }
    852 
    853 void TraceLog::CancelTracing(const OutputCallback& cb) {
    854   SetDisabled();
    855   FlushInternal(cb, false, true);
    856 }
    857 
    858 void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
    859                              bool use_worker_thread,
    860                              bool discard_events) {
    861   use_worker_thread_ = use_worker_thread;
    862   if (IsEnabled()) {
    863     // Can't flush when tracing is enabled because otherwise PostTask would
    864     // - generate more trace events;
    865     // - deschedule the calling thread on some platforms causing inaccurate
    866     //   timing of the trace events.
    867     scoped_refptr<RefCountedString> empty_result = new RefCountedString;
    868     if (!cb.is_null())
    869       cb.Run(empty_result, false);
    870     LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
    871     return;
    872   }
    873 
    874   int gen = generation();
    875   // Copy of thread_message_loops_ to be used without locking.
    876   std::vector<scoped_refptr<SingleThreadTaskRunner>>
    877       thread_message_loop_task_runners;
    878   {
    879     AutoLock lock(lock_);
    880     DCHECK(!flush_task_runner_);
    881     flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
    882                              ? ThreadTaskRunnerHandle::Get()
    883                              : nullptr;
    884     DCHECK(thread_message_loops_.empty() || flush_task_runner_);
    885     flush_output_callback_ = cb;
    886 
    887     if (thread_shared_chunk_) {
    888       logged_events_->ReturnChunk(thread_shared_chunk_index_,
    889                                   std::move(thread_shared_chunk_));
    890     }
    891 
    892     for (MessageLoop* loop : thread_message_loops_)
    893       thread_message_loop_task_runners.push_back(loop->task_runner());
    894   }
    895 
    896   if (!thread_message_loop_task_runners.empty()) {
    897     for (auto& task_runner : thread_message_loop_task_runners) {
    898       task_runner->PostTask(
    899           FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
    900                           gen, discard_events));
    901     }
    902     flush_task_runner_->PostDelayedTask(
    903         FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), gen,
    904                         discard_events),
    905         TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
    906     return;
    907   }
    908 
    909   FinishFlush(gen, discard_events);
    910 }
    911 
    912 // Usually it runs on a different thread.
    913 void TraceLog::ConvertTraceEventsToTraceFormat(
    914     std::unique_ptr<TraceBuffer> logged_events,
    915     const OutputCallback& flush_output_callback,
    916     const ArgumentFilterPredicate& argument_filter_predicate) {
    917   if (flush_output_callback.is_null())
    918     return;
    919 
    920   HEAP_PROFILER_SCOPED_IGNORE;
    921   // The callback need to be called at least once even if there is no events
    922   // to let the caller know the completion of flush.
    923   scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
    924   while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
    925     for (size_t j = 0; j < chunk->size(); ++j) {
    926       size_t size = json_events_str_ptr->size();
    927       if (size > kTraceEventBufferSizeInBytes) {
    928         flush_output_callback.Run(json_events_str_ptr, true);
    929         json_events_str_ptr = new RefCountedString();
    930       } else if (size) {
    931         json_events_str_ptr->data().append(",\n");
    932       }
    933       chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
    934                                          argument_filter_predicate);
    935     }
    936   }
    937   flush_output_callback.Run(json_events_str_ptr, false);
    938 }
    939 
    940 void TraceLog::FinishFlush(int generation, bool discard_events) {
    941   std::unique_ptr<TraceBuffer> previous_logged_events;
    942   OutputCallback flush_output_callback;
    943   ArgumentFilterPredicate argument_filter_predicate;
    944 
    945   if (!CheckGeneration(generation))
    946     return;
    947 
    948   {
    949     AutoLock lock(lock_);
    950 
    951     previous_logged_events.swap(logged_events_);
    952     UseNextTraceBuffer();
    953     thread_message_loops_.clear();
    954 
    955     flush_task_runner_ = NULL;
    956     flush_output_callback = flush_output_callback_;
    957     flush_output_callback_.Reset();
    958 
    959     if (trace_options() & kInternalEnableArgumentFilter) {
    960       CHECK(!argument_filter_predicate_.is_null());
    961       argument_filter_predicate = argument_filter_predicate_;
    962     }
    963   }
    964 
    965   if (discard_events) {
    966     if (!flush_output_callback.is_null()) {
    967       scoped_refptr<RefCountedString> empty_result = new RefCountedString;
    968       flush_output_callback.Run(empty_result, false);
    969     }
    970     return;
    971   }
    972 
    973   if (use_worker_thread_) {
    974 #if 0
    975     base::PostTaskWithTraits(
    976         FROM_HERE, base::TaskTraits()
    977                        .MayBlock()
    978                        .WithPriority(base::TaskPriority::BACKGROUND)
    979                        .WithShutdownBehavior(
    980                            base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
    981         Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
    982              Passed(&previous_logged_events), flush_output_callback,
    983              argument_filter_predicate));
    984     return;
    985 #else
    986     NOTREACHED();
    987 #endif
    988   }
    989 
    990   ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
    991                                   flush_output_callback,
    992                                   argument_filter_predicate);
    993 }
    994 
    995 // Run in each thread holding a local event buffer.
    996 void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
    997   {
    998     AutoLock lock(lock_);
    999     if (!CheckGeneration(generation) || !flush_task_runner_) {
   1000       // This is late. The corresponding flush has finished.
   1001       return;
   1002     }
   1003   }
   1004 
   1005   // This will flush the thread local buffer.
   1006   delete thread_local_event_buffer_.Get();
   1007 
   1008   AutoLock lock(lock_);
   1009   if (!CheckGeneration(generation) || !flush_task_runner_ ||
   1010       !thread_message_loops_.empty())
   1011     return;
   1012 
   1013   flush_task_runner_->PostTask(
   1014       FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation,
   1015                       discard_events));
   1016 }
   1017 
   1018 void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
   1019   {
   1020     AutoLock lock(lock_);
   1021     if (!CheckGeneration(generation) || !flush_task_runner_) {
   1022       // Flush has finished before timeout.
   1023       return;
   1024     }
   1025 
   1026     LOG(WARNING)
   1027         << "The following threads haven't finished flush in time. "
   1028            "If this happens stably for some thread, please call "
   1029            "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
   1030            "the thread to avoid its trace events from being lost.";
   1031     for (hash_set<MessageLoop*>::const_iterator it =
   1032              thread_message_loops_.begin();
   1033          it != thread_message_loops_.end(); ++it) {
   1034       LOG(WARNING) << "Thread: " << (*it)->GetThreadName();
   1035     }
   1036   }
   1037   FinishFlush(generation, discard_events);
   1038 }
   1039 
   1040 void TraceLog::UseNextTraceBuffer() {
   1041   logged_events_.reset(CreateTraceBuffer());
   1042   subtle::NoBarrier_AtomicIncrement(&generation_, 1);
   1043   thread_shared_chunk_.reset();
   1044   thread_shared_chunk_index_ = 0;
   1045 }
   1046 
   1047 TraceEventHandle TraceLog::AddTraceEvent(
   1048     char phase,
   1049     const unsigned char* category_group_enabled,
   1050     const char* name,
   1051     const char* scope,
   1052     unsigned long long id,
   1053     int num_args,
   1054     const char** arg_names,
   1055     const unsigned char* arg_types,
   1056     const unsigned long long* arg_values,
   1057     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
   1058     unsigned int flags) {
   1059   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   1060   base::TimeTicks now = base::TimeTicks::Now();
   1061   return AddTraceEventWithThreadIdAndTimestamp(
   1062       phase,
   1063       category_group_enabled,
   1064       name,
   1065       scope,
   1066       id,
   1067       trace_event_internal::kNoId,  // bind_id
   1068       thread_id,
   1069       now,
   1070       num_args,
   1071       arg_names,
   1072       arg_types,
   1073       arg_values,
   1074       convertable_values,
   1075       flags);
   1076 }
   1077 
   1078 TraceEventHandle TraceLog::AddTraceEventWithBindId(
   1079     char phase,
   1080     const unsigned char* category_group_enabled,
   1081     const char* name,
   1082     const char* scope,
   1083     unsigned long long id,
   1084     unsigned long long bind_id,
   1085     int num_args,
   1086     const char** arg_names,
   1087     const unsigned char* arg_types,
   1088     const unsigned long long* arg_values,
   1089     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
   1090     unsigned int flags) {
   1091   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   1092   base::TimeTicks now = base::TimeTicks::Now();
   1093   return AddTraceEventWithThreadIdAndTimestamp(
   1094       phase,
   1095       category_group_enabled,
   1096       name,
   1097       scope,
   1098       id,
   1099       bind_id,
   1100       thread_id,
   1101       now,
   1102       num_args,
   1103       arg_names,
   1104       arg_types,
   1105       arg_values,
   1106       convertable_values,
   1107       flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
   1108 }
   1109 
   1110 TraceEventHandle TraceLog::AddTraceEventWithProcessId(
   1111     char phase,
   1112     const unsigned char* category_group_enabled,
   1113     const char* name,
   1114     const char* scope,
   1115     unsigned long long id,
   1116     int process_id,
   1117     int num_args,
   1118     const char** arg_names,
   1119     const unsigned char* arg_types,
   1120     const unsigned long long* arg_values,
   1121     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
   1122     unsigned int flags) {
   1123   base::TimeTicks now = base::TimeTicks::Now();
   1124   return AddTraceEventWithThreadIdAndTimestamp(
   1125       phase,
   1126       category_group_enabled,
   1127       name,
   1128       scope,
   1129       id,
   1130       trace_event_internal::kNoId,  // bind_id
   1131       process_id,
   1132       now,
   1133       num_args,
   1134       arg_names,
   1135       arg_types,
   1136       arg_values,
   1137       convertable_values,
   1138       flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
   1139 }
   1140 
   1141 // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
   1142 // with kNoId as bind_id
   1143 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
   1144     char phase,
   1145     const unsigned char* category_group_enabled,
   1146     const char* name,
   1147     const char* scope,
   1148     unsigned long long id,
   1149     int thread_id,
   1150     const TimeTicks& timestamp,
   1151     int num_args,
   1152     const char** arg_names,
   1153     const unsigned char* arg_types,
   1154     const unsigned long long* arg_values,
   1155     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
   1156     unsigned int flags) {
   1157   return AddTraceEventWithThreadIdAndTimestamp(
   1158       phase,
   1159       category_group_enabled,
   1160       name,
   1161       scope,
   1162       id,
   1163       trace_event_internal::kNoId,  // bind_id
   1164       thread_id,
   1165       timestamp,
   1166       num_args,
   1167       arg_names,
   1168       arg_types,
   1169       arg_values,
   1170       convertable_values,
   1171       flags);
   1172 }
   1173 
   1174 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
   1175     char phase,
   1176     const unsigned char* category_group_enabled,
   1177     const char* name,
   1178     const char* scope,
   1179     unsigned long long id,
   1180     unsigned long long bind_id,
   1181     int thread_id,
   1182     const TimeTicks& timestamp,
   1183     int num_args,
   1184     const char** arg_names,
   1185     const unsigned char* arg_types,
   1186     const unsigned long long* arg_values,
   1187     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
   1188     unsigned int flags) {
   1189   TraceEventHandle handle = {0, 0, 0};
   1190   if (!*category_group_enabled)
   1191     return handle;
   1192 
   1193   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
   1194   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
   1195   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
   1196   if (thread_is_in_trace_event_.Get())
   1197     return handle;
   1198 
   1199   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
   1200 
   1201   DCHECK(name);
   1202   DCHECK(!timestamp.is_null());
   1203 
   1204   if (flags & TRACE_EVENT_FLAG_MANGLE_ID) {
   1205     if ((flags & TRACE_EVENT_FLAG_FLOW_IN) ||
   1206         (flags & TRACE_EVENT_FLAG_FLOW_OUT))
   1207       bind_id = MangleEventId(bind_id);
   1208     id = MangleEventId(id);
   1209   }
   1210 
   1211   TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
   1212   ThreadTicks thread_now = ThreadNow();
   1213 
   1214   ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
   1215   if (*category_group_enabled & RECORDING_MODE) {
   1216     // |thread_local_event_buffer_| can be null if the current thread doesn't
   1217     // have a message loop or the message loop is blocked.
   1218     InitializeThreadLocalEventBufferIfSupported();
   1219     thread_local_event_buffer = thread_local_event_buffer_.Get();
   1220   }
   1221 
   1222   // Check and update the current thread name only if the event is for the
   1223   // current thread to avoid locks in most cases.
   1224   if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
   1225     const char* new_name =
   1226         ThreadIdNameManager::GetInstance()->GetName(thread_id);
   1227     // Check if the thread name has been set or changed since the previous
   1228     // call (if any), but don't bother if the new name is empty. Note this will
   1229     // not detect a thread name change within the same char* buffer address: we
   1230     // favor common case performance over corner case correctness.
   1231     static auto* current_thread_name = new ThreadLocalPointer<const char>();
   1232     if (new_name != current_thread_name->Get() && new_name && *new_name) {
   1233       current_thread_name->Set(new_name);
   1234 
   1235       AutoLock thread_info_lock(thread_info_lock_);
   1236 
   1237       hash_map<int, std::string>::iterator existing_name =
   1238           thread_names_.find(thread_id);
   1239       if (existing_name == thread_names_.end()) {
   1240         // This is a new thread id, and a new name.
   1241         thread_names_[thread_id] = new_name;
   1242       } else {
   1243         // This is a thread id that we've seen before, but potentially with a
   1244         // new name.
   1245         std::vector<StringPiece> existing_names = base::SplitStringPiece(
   1246             existing_name->second, ",", base::KEEP_WHITESPACE,
   1247             base::SPLIT_WANT_NONEMPTY);
   1248         bool found = std::find(existing_names.begin(), existing_names.end(),
   1249                                new_name) != existing_names.end();
   1250         if (!found) {
   1251           if (!existing_names.empty())
   1252             existing_name->second.push_back(',');
   1253           existing_name->second.append(new_name);
   1254         }
   1255       }
   1256     }
   1257   }
   1258 
   1259 #if defined(OS_WIN)
   1260   // This is done sooner rather than later, to avoid creating the event and
   1261   // acquiring the lock, which is not needed for ETW as it's already threadsafe.
   1262   if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
   1263     TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
   1264                                   num_args, arg_names, arg_types, arg_values,
   1265                                   convertable_values);
   1266 #endif  // OS_WIN
   1267 
   1268   std::string console_message;
   1269   std::unique_ptr<TraceEvent> filtered_trace_event;
   1270   bool disabled_by_filters = false;
   1271   if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
   1272     std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent);
   1273     new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
   1274                                 phase, category_group_enabled, name, scope, id,
   1275                                 bind_id, num_args, arg_names, arg_types,
   1276                                 arg_values, convertable_values, flags);
   1277 
   1278     disabled_by_filters = true;
   1279     ForEachCategoryFilter(
   1280         category_group_enabled, [&new_trace_event, &disabled_by_filters](
   1281                                     TraceEventFilter* trace_event_filter) {
   1282           if (trace_event_filter->FilterTraceEvent(*new_trace_event))
   1283             disabled_by_filters = false;
   1284         });
   1285     if (!disabled_by_filters)
   1286       filtered_trace_event = std::move(new_trace_event);
   1287   }
   1288 
   1289   // If enabled for recording, the event should be added only if one of the
   1290   // filters indicates or category is not enabled for filtering.
   1291   if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) &&
   1292       !disabled_by_filters) {
   1293     OptionalAutoLock lock(&lock_);
   1294 
   1295     TraceEvent* trace_event = NULL;
   1296     if (thread_local_event_buffer) {
   1297       trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
   1298     } else {
   1299       lock.EnsureAcquired();
   1300       trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
   1301     }
   1302 
   1303     if (trace_event) {
   1304       if (filtered_trace_event) {
   1305         trace_event->MoveFrom(std::move(filtered_trace_event));
   1306       } else {
   1307         trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
   1308                                 phase, category_group_enabled, name, scope, id,
   1309                                 bind_id, num_args, arg_names, arg_types,
   1310                                 arg_values, convertable_values, flags);
   1311       }
   1312 
   1313 #if defined(OS_ANDROID)
   1314       trace_event->SendToATrace();
   1315 #endif
   1316     }
   1317 
   1318     if (trace_options() & kInternalEchoToConsole) {
   1319       console_message = EventToConsoleMessage(
   1320           phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
   1321           timestamp, trace_event);
   1322     }
   1323   }
   1324 
   1325   if (!console_message.empty())
   1326     LOG(ERROR) << console_message;
   1327 
   1328   return handle;
   1329 }
   1330 
   1331 void TraceLog::AddMetadataEvent(
   1332     const unsigned char* category_group_enabled,
   1333     const char* name,
   1334     int num_args,
   1335     const char** arg_names,
   1336     const unsigned char* arg_types,
   1337     const unsigned long long* arg_values,
   1338     std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
   1339     unsigned int flags) {
   1340   HEAP_PROFILER_SCOPED_IGNORE;
   1341   std::unique_ptr<TraceEvent> trace_event(new TraceEvent);
   1342   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   1343   ThreadTicks thread_now = ThreadNow();
   1344   TimeTicks now = OffsetNow();
   1345   AutoLock lock(lock_);
   1346   trace_event->Initialize(
   1347       thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
   1348       category_group_enabled, name,
   1349       trace_event_internal::kGlobalScope,  // scope
   1350       trace_event_internal::kNoId,         // id
   1351       trace_event_internal::kNoId,         // bind_id
   1352       num_args, arg_names, arg_types, arg_values, convertable_values, flags);
   1353   metadata_events_.push_back(std::move(trace_event));
   1354 }
   1355 
   1356 // May be called when a COMPELETE event ends and the unfinished event has been
   1357 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
   1358 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
   1359                                             const TimeTicks& timestamp,
   1360                                             TraceEvent* trace_event) {
   1361   HEAP_PROFILER_SCOPED_IGNORE;
   1362   AutoLock thread_info_lock(thread_info_lock_);
   1363 
   1364   // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
   1365   // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
   1366   DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
   1367 
   1368   TimeDelta duration;
   1369   int thread_id =
   1370       trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
   1371   if (phase == TRACE_EVENT_PHASE_END) {
   1372     duration = timestamp - thread_event_start_times_[thread_id].top();
   1373     thread_event_start_times_[thread_id].pop();
   1374   }
   1375 
   1376   std::string thread_name = thread_names_[thread_id];
   1377   if (thread_colors_.find(thread_name) == thread_colors_.end())
   1378     thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
   1379 
   1380   std::ostringstream log;
   1381   log << base::StringPrintf("%s: \x1b[0;3%dm", thread_name.c_str(),
   1382                             thread_colors_[thread_name]);
   1383 
   1384   size_t depth = 0;
   1385   auto it = thread_event_start_times_.find(thread_id);
   1386   if (it != thread_event_start_times_.end())
   1387     depth = it->second.size();
   1388 
   1389   for (size_t i = 0; i < depth; ++i)
   1390     log << "| ";
   1391 
   1392   if (trace_event)
   1393     trace_event->AppendPrettyPrinted(&log);
   1394   if (phase == TRACE_EVENT_PHASE_END)
   1395     log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
   1396 
   1397   log << "\x1b[0;m";
   1398 
   1399   if (phase == TRACE_EVENT_PHASE_BEGIN)
   1400     thread_event_start_times_[thread_id].push(timestamp);
   1401 
   1402   return log.str();
   1403 }
   1404 
   1405 void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
   1406                                 const char* name,
   1407                                 TraceEventHandle handle) {
   1408   const char* category_name = GetCategoryGroupName(category_group_enabled);
   1409   ForEachCategoryFilter(
   1410       category_group_enabled,
   1411       [name, category_name](TraceEventFilter* trace_event_filter) {
   1412         trace_event_filter->EndEvent(category_name, name);
   1413       });
   1414 }
   1415 
   1416 void TraceLog::UpdateTraceEventDuration(
   1417     const unsigned char* category_group_enabled,
   1418     const char* name,
   1419     TraceEventHandle handle) {
   1420   char category_group_enabled_local = *category_group_enabled;
   1421   if (!category_group_enabled_local)
   1422     return;
   1423 
   1424   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
   1425   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
   1426   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
   1427   if (thread_is_in_trace_event_.Get())
   1428     return;
   1429 
   1430   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
   1431 
   1432   ThreadTicks thread_now = ThreadNow();
   1433   TimeTicks now = OffsetNow();
   1434 
   1435 #if defined(OS_WIN)
   1436   // Generate an ETW event that marks the end of a complete event.
   1437   if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT)
   1438     TraceEventETWExport::AddCompleteEndEvent(name);
   1439 #endif  // OS_WIN
   1440 
   1441   std::string console_message;
   1442   if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
   1443     OptionalAutoLock lock(&lock_);
   1444 
   1445     TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
   1446     if (trace_event) {
   1447       DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
   1448       // TEMP(oysteine) to debug crbug.com/638744
   1449       if (trace_event->duration().ToInternalValue() != -1) {
   1450         DVLOG(1) << "TraceHandle: chunk_seq " << handle.chunk_seq
   1451                  << ", chunk_index " << handle.chunk_index << ", event_index "
   1452                  << handle.event_index;
   1453 
   1454         std::string serialized_event;
   1455         trace_event->AppendAsJSON(&serialized_event, ArgumentFilterPredicate());
   1456         DVLOG(1) << "TraceEvent: " << serialized_event;
   1457         lock_.AssertAcquired();
   1458       }
   1459 
   1460       trace_event->UpdateDuration(now, thread_now);
   1461 #if defined(OS_ANDROID)
   1462       trace_event->SendToATrace();
   1463 #endif
   1464     }
   1465 
   1466     if (trace_options() & kInternalEchoToConsole) {
   1467       console_message =
   1468           EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
   1469     }
   1470   }
   1471 
   1472   if (!console_message.empty())
   1473     LOG(ERROR) << console_message;
   1474 
   1475   if (category_group_enabled_local & TraceCategory::ENABLED_FOR_FILTERING)
   1476     EndFilteredEvent(category_group_enabled, name, handle);
   1477 }
   1478 
   1479 uint64_t TraceLog::MangleEventId(uint64_t id) {
   1480   return id ^ process_id_hash_;
   1481 }
   1482 
   1483 void TraceLog::AddMetadataEventsWhileLocked() {
   1484   lock_.AssertAcquired();
   1485 
   1486   // Move metadata added by |AddMetadataEvent| into the trace log.
   1487   while (!metadata_events_.empty()) {
   1488     TraceEvent* event = AddEventToThreadSharedChunkWhileLocked(nullptr, false);
   1489     event->MoveFrom(std::move(metadata_events_.back()));
   1490     metadata_events_.pop_back();
   1491   }
   1492 
   1493 #if !defined(OS_NACL)  // NaCl shouldn't expose the process id.
   1494   InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1495                           0, "num_cpus", "number",
   1496                           base::SysInfo::NumberOfProcessors());
   1497 #endif
   1498 
   1499   int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   1500   if (process_sort_index_ != 0) {
   1501     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1502                             current_thread_id, "process_sort_index",
   1503                             "sort_index", process_sort_index_);
   1504   }
   1505 
   1506   if (!process_name_.empty()) {
   1507     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1508                             current_thread_id, "process_name", "name",
   1509                             process_name_);
   1510   }
   1511 
   1512 #if !defined(OS_NACL) && !defined(OS_IOS)
   1513   Time process_creation_time = CurrentProcessInfo::CreationTime();
   1514   if (!process_creation_time.is_null()) {
   1515     TimeDelta process_uptime = Time::Now() - process_creation_time;
   1516     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1517                             current_thread_id, "process_uptime_seconds",
   1518                             "uptime", process_uptime.InSeconds());
   1519   }
   1520 #endif  // !defined(OS_NACL) && !defined(OS_IOS)
   1521 
   1522   if (!process_labels_.empty()) {
   1523     std::vector<base::StringPiece> labels;
   1524     for (const auto& it : process_labels_)
   1525       labels.push_back(it.second);
   1526     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1527                             current_thread_id, "process_labels", "labels",
   1528                             base::JoinString(labels, ","));
   1529   }
   1530 
   1531   // Thread sort indices.
   1532   for (const auto& it : thread_sort_indices_) {
   1533     if (it.second == 0)
   1534       continue;
   1535     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1536                             it.first, "thread_sort_index", "sort_index",
   1537                             it.second);
   1538   }
   1539 
   1540   // Thread names.
   1541   AutoLock thread_info_lock(thread_info_lock_);
   1542   for (const auto& it : thread_names_) {
   1543     if (it.second.empty())
   1544       continue;
   1545     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1546                             it.first, "thread_name", "name", it.second);
   1547   }
   1548 
   1549   // If buffer is full, add a metadata record to report this.
   1550   if (!buffer_limit_reached_timestamp_.is_null()) {
   1551     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1552                             current_thread_id, "trace_buffer_overflowed",
   1553                             "overflowed_at_ts",
   1554                             buffer_limit_reached_timestamp_);
   1555   }
   1556 }
   1557 
   1558 void TraceLog::DeleteForTesting() {
   1559   internal::DeleteTraceLogForTesting::Delete();
   1560   CategoryRegistry::ResetForTesting();
   1561 }
   1562 
   1563 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
   1564   return GetEventByHandleInternal(handle, NULL);
   1565 }
   1566 
   1567 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
   1568                                                OptionalAutoLock* lock) {
   1569   if (!handle.chunk_seq)
   1570     return NULL;
   1571 
   1572   DCHECK(handle.chunk_seq);
   1573   DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
   1574   DCHECK(handle.event_index < TraceBufferChunk::kTraceBufferChunkSize);
   1575 
   1576   if (thread_local_event_buffer_.Get()) {
   1577     TraceEvent* trace_event =
   1578         thread_local_event_buffer_.Get()->GetEventByHandle(handle);
   1579     if (trace_event)
   1580       return trace_event;
   1581   }
   1582 
   1583   // The event has been out-of-control of the thread local buffer.
   1584   // Try to get the event from the main buffer with a lock.
   1585   if (lock)
   1586     lock->EnsureAcquired();
   1587 
   1588   if (thread_shared_chunk_ &&
   1589       handle.chunk_index == thread_shared_chunk_index_) {
   1590     return handle.chunk_seq == thread_shared_chunk_->seq()
   1591                ? thread_shared_chunk_->GetEventAt(handle.event_index)
   1592                : NULL;
   1593   }
   1594 
   1595   return logged_events_->GetEventByHandle(handle);
   1596 }
   1597 
   1598 void TraceLog::SetProcessID(int process_id) {
   1599   process_id_ = process_id;
   1600   // Create a FNV hash from the process ID for XORing.
   1601   // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
   1602   const unsigned long long kOffsetBasis = 14695981039346656037ull;
   1603   const unsigned long long kFnvPrime = 1099511628211ull;
   1604   const unsigned long long pid = static_cast<unsigned long long>(process_id_);
   1605   process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
   1606 }
   1607 
   1608 void TraceLog::SetProcessSortIndex(int sort_index) {
   1609   AutoLock lock(lock_);
   1610   process_sort_index_ = sort_index;
   1611 }
   1612 
   1613 void TraceLog::SetProcessName(const char* process_name) {
   1614   AutoLock lock(lock_);
   1615   process_name_ = process_name;
   1616 }
   1617 
   1618 void TraceLog::UpdateProcessLabel(int label_id,
   1619                                   const std::string& current_label) {
   1620   if (!current_label.length())
   1621     return RemoveProcessLabel(label_id);
   1622 
   1623   AutoLock lock(lock_);
   1624   process_labels_[label_id] = current_label;
   1625 }
   1626 
   1627 void TraceLog::RemoveProcessLabel(int label_id) {
   1628   AutoLock lock(lock_);
   1629   process_labels_.erase(label_id);
   1630 }
   1631 
   1632 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
   1633   AutoLock lock(lock_);
   1634   thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
   1635 }
   1636 
   1637 void TraceLog::SetTimeOffset(TimeDelta offset) {
   1638   time_offset_ = offset;
   1639 }
   1640 
   1641 size_t TraceLog::GetObserverCountForTest() const {
   1642   return enabled_state_observer_list_.size();
   1643 }
   1644 
   1645 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
   1646   thread_blocks_message_loop_.Set(true);
   1647   // This will flush the thread local buffer.
   1648   delete thread_local_event_buffer_.Get();
   1649 }
   1650 
   1651 TraceBuffer* TraceLog::CreateTraceBuffer() {
   1652   HEAP_PROFILER_SCOPED_IGNORE;
   1653   InternalTraceOptions options = trace_options();
   1654   if (options & kInternalRecordContinuously) {
   1655     return TraceBuffer::CreateTraceBufferRingBuffer(
   1656         kTraceEventRingBufferChunks);
   1657   }
   1658   if (options & kInternalEchoToConsole) {
   1659     return TraceBuffer::CreateTraceBufferRingBuffer(
   1660         kEchoToConsoleTraceEventBufferChunks);
   1661   }
   1662   if (options & kInternalRecordAsMuchAsPossible) {
   1663     return TraceBuffer::CreateTraceBufferVectorOfSize(
   1664         kTraceEventVectorBigBufferChunks);
   1665   }
   1666   return TraceBuffer::CreateTraceBufferVectorOfSize(
   1667       kTraceEventVectorBufferChunks);
   1668 }
   1669 
   1670 #if defined(OS_WIN)
   1671 void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
   1672   // Go through each category and set/clear the ETW bit depending on whether the
   1673   // category is enabled.
   1674   for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
   1675     if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
   1676             category.name())) {
   1677       category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
   1678     } else {
   1679       category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
   1680     }
   1681   }
   1682 }
   1683 #endif  // defined(OS_WIN)
   1684 
   1685 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
   1686     TraceEventMemoryOverhead* overhead) {
   1687   overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
   1688 }
   1689 
   1690 void TraceLog::AddAsyncEnabledStateObserver(
   1691     WeakPtr<AsyncEnabledStateObserver> listener) {
   1692   AutoLock lock(lock_);
   1693   async_observers_.insert(
   1694       std::make_pair(listener.get(), RegisteredAsyncObserver(listener)));
   1695 }
   1696 
   1697 void TraceLog::RemoveAsyncEnabledStateObserver(
   1698     AsyncEnabledStateObserver* listener) {
   1699   AutoLock lock(lock_);
   1700   async_observers_.erase(listener);
   1701 }
   1702 
   1703 bool TraceLog::HasAsyncEnabledStateObserver(
   1704     AsyncEnabledStateObserver* listener) const {
   1705   AutoLock lock(lock_);
   1706   return ContainsKey(async_observers_, listener);
   1707 }
   1708 
   1709 }  // namespace trace_event
   1710 }  // namespace base
   1711 
   1712 namespace trace_event_internal {
   1713 
   1714 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
   1715     const char* category_group,
   1716     const char* name) {
   1717   // The single atom works because for now the category_group can only be "gpu".
   1718   DCHECK_EQ(strcmp(category_group, "gpu"), 0);
   1719   static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
   1720   INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
   1721       category_group, atomic, category_group_enabled_);
   1722   name_ = name;
   1723   if (*category_group_enabled_) {
   1724     event_handle_ =
   1725         TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
   1726             TRACE_EVENT_PHASE_COMPLETE,
   1727             category_group_enabled_,
   1728             name,
   1729             trace_event_internal::kGlobalScope,  // scope
   1730             trace_event_internal::kNoId,  // id
   1731             static_cast<int>(base::PlatformThread::CurrentId()),  // thread_id
   1732             base::TimeTicks::Now(),
   1733             trace_event_internal::kZeroNumArgs,
   1734             nullptr,
   1735             nullptr,
   1736             nullptr,
   1737             nullptr,
   1738             TRACE_EVENT_FLAG_NONE);
   1739   }
   1740 }
   1741 
   1742 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
   1743   if (*category_group_enabled_) {
   1744     TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
   1745                                                 event_handle_);
   1746   }
   1747 }
   1748 
   1749 }  // namespace trace_event_internal
   1750