Home | History | Annotate | Download | only in debug
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/debug/trace_event_impl.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/base_switches.h"
     10 #include "base/bind.h"
     11 #include "base/command_line.h"
     12 #include "base/debug/leak_annotations.h"
     13 #include "base/debug/trace_event.h"
     14 #include "base/format_macros.h"
     15 #include "base/lazy_instance.h"
     16 #include "base/memory/singleton.h"
     17 #include "base/process/process_metrics.h"
     18 #include "base/stl_util.h"
     19 #include "base/strings/string_split.h"
     20 #include "base/strings/string_tokenizer.h"
     21 #include "base/strings/string_util.h"
     22 #include "base/strings/stringprintf.h"
     23 #include "base/strings/utf_string_conversions.h"
     24 #include "base/synchronization/cancellation_flag.h"
     25 #include "base/synchronization/waitable_event.h"
     26 #include "base/sys_info.h"
     27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
     28 #include "base/threading/platform_thread.h"
     29 #include "base/threading/thread_id_name_manager.h"
     30 #include "base/threading/thread_local.h"
     31 #include "base/time/time.h"
     32 
     33 #if defined(OS_WIN)
     34 #include "base/debug/trace_event_win.h"
     35 #endif
     36 
     37 class DeleteTraceLogForTesting {
     38  public:
     39   static void Delete() {
     40     Singleton<base::debug::TraceLog,
     41               LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
     42   }
     43 };
     44 
     45 // The thread buckets for the sampling profiler.
     46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
     47 
     48 namespace base {
     49 namespace debug {
     50 
     51 // Controls the number of trace events we will buffer in-memory
     52 // before throwing them away.
     53 const size_t kTraceEventBufferSize = 500000;
     54 const size_t kTraceEventBatchSize = 1000;
     55 const size_t kTraceEventInitialBufferSize = 1024;
     56 
     57 #define MAX_CATEGORY_GROUPS 100
     58 
     59 namespace {
     60 
     61 // Parallel arrays g_category_groups and g_category_group_enabled are separate
     62 // so that a pointer to a member of g_category_group_enabled can be easily
     63 // converted to an index into g_category_groups. This allows macros to deal
     64 // only with char enabled pointers from g_category_group_enabled, and we can
     65 // convert internally to determine the category name from the char enabled
     66 // pointer.
     67 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
     68   "tracing already shutdown",
     69   "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
     70   "__metadata",
     71 };
     72 
     73 // The enabled flag is char instead of bool so that the API can be used from C.
     74 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
     75 const int g_category_already_shutdown = 0;
     76 const int g_category_categories_exhausted = 1;
     77 const int g_category_metadata = 2;
     78 const int g_num_builtin_categories = 3;
     79 int g_category_index = g_num_builtin_categories; // Skip default categories.
     80 
     81 // The name of the current thread. This is used to decide if the current
     82 // thread name has changed. We combine all the seen thread names into the
     83 // output name for the thread.
     84 LazyInstance<ThreadLocalPointer<const char> >::Leaky
     85     g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
     86 
     87 const char kRecordUntilFull[] = "record-until-full";
     88 const char kRecordContinuously[] = "record-continuously";
     89 const char kEnableSampling[] = "enable-sampling";
     90 
     91 size_t NextIndex(size_t index) {
     92   index++;
     93   if (index >= kTraceEventBufferSize)
     94     index = 0;
     95   return index;
     96 }
     97 
     98 }  // namespace
     99 
    100 class TraceBufferRingBuffer : public TraceBuffer {
    101  public:
    102   TraceBufferRingBuffer()
    103       : unused_event_index_(0),
    104         oldest_event_index_(0) {
    105     logged_events_.reserve(kTraceEventInitialBufferSize);
    106   }
    107 
    108   virtual ~TraceBufferRingBuffer() {}
    109 
    110   virtual void AddEvent(const TraceEvent& event) OVERRIDE {
    111     if (unused_event_index_ < Size())
    112       logged_events_[unused_event_index_] = event;
    113     else
    114       logged_events_.push_back(event);
    115 
    116     unused_event_index_ = NextIndex(unused_event_index_);
    117     if (unused_event_index_ == oldest_event_index_) {
    118       oldest_event_index_ = NextIndex(oldest_event_index_);
    119     }
    120   }
    121 
    122   virtual bool HasMoreEvents() const OVERRIDE {
    123     return oldest_event_index_ != unused_event_index_;
    124   }
    125 
    126   virtual const TraceEvent& NextEvent() OVERRIDE {
    127     DCHECK(HasMoreEvents());
    128 
    129     size_t next = oldest_event_index_;
    130     oldest_event_index_ = NextIndex(oldest_event_index_);
    131     return GetEventAt(next);
    132   }
    133 
    134   virtual bool IsFull() const OVERRIDE {
    135     return false;
    136   }
    137 
    138   virtual size_t CountEnabledByName(
    139       const unsigned char* category,
    140       const std::string& event_name) const OVERRIDE {
    141     size_t notify_count = 0;
    142     size_t index = oldest_event_index_;
    143     while (index != unused_event_index_) {
    144       const TraceEvent& event = GetEventAt(index);
    145       if (category == event.category_group_enabled() &&
    146           strcmp(event_name.c_str(), event.name()) == 0) {
    147         ++notify_count;
    148       }
    149       index = NextIndex(index);
    150     }
    151     return notify_count;
    152   }
    153 
    154   virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
    155     DCHECK(index < logged_events_.size());
    156     return logged_events_[index];
    157   }
    158 
    159   virtual size_t Size() const OVERRIDE {
    160     return logged_events_.size();
    161   }
    162 
    163  private:
    164   size_t unused_event_index_;
    165   size_t oldest_event_index_;
    166   std::vector<TraceEvent> logged_events_;
    167 
    168   DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
    169 };
    170 
    171 class TraceBufferVector : public TraceBuffer {
    172  public:
    173   TraceBufferVector() : current_iteration_index_(0) {
    174     logged_events_.reserve(kTraceEventInitialBufferSize);
    175   }
    176 
    177   virtual ~TraceBufferVector() {
    178   }
    179 
    180   virtual void AddEvent(const TraceEvent& event) OVERRIDE {
    181     // Note, we have two callers which need to be handled. The first is
    182     // AddTraceEventWithThreadIdAndTimestamp() which checks Size() and does an
    183     // early exit if full. The second is AddThreadNameMetadataEvents().
    184     // We can not DECHECK(!IsFull()) because we have to add the metadata
    185     // events even if the buffer is full.
    186     logged_events_.push_back(event);
    187   }
    188 
    189   virtual bool HasMoreEvents() const OVERRIDE {
    190     return current_iteration_index_ < Size();
    191   }
    192 
    193   virtual const TraceEvent& NextEvent() OVERRIDE {
    194     DCHECK(HasMoreEvents());
    195     return GetEventAt(current_iteration_index_++);
    196   }
    197 
    198   virtual bool IsFull() const OVERRIDE {
    199     return Size() >= kTraceEventBufferSize;
    200   }
    201 
    202   virtual size_t CountEnabledByName(
    203       const unsigned char* category,
    204       const std::string& event_name) const OVERRIDE {
    205     size_t notify_count = 0;
    206     for (size_t i = 0; i < Size(); i++) {
    207       const TraceEvent& event = GetEventAt(i);
    208       if (category == event.category_group_enabled() &&
    209           strcmp(event_name.c_str(), event.name()) == 0) {
    210         ++notify_count;
    211       }
    212     }
    213     return notify_count;
    214   }
    215 
    216   virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
    217     DCHECK(index < logged_events_.size());
    218     return logged_events_[index];
    219   }
    220 
    221   virtual size_t Size() const OVERRIDE {
    222     return logged_events_.size();
    223   }
    224 
    225  private:
    226   size_t current_iteration_index_;
    227   std::vector<TraceEvent> logged_events_;
    228 
    229   DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
    230 };
    231 
    232 class TraceBufferDiscardsEvents : public TraceBuffer {
    233  public:
    234   virtual ~TraceBufferDiscardsEvents() { }
    235 
    236   virtual void AddEvent(const TraceEvent& event) OVERRIDE {}
    237   virtual bool HasMoreEvents() const OVERRIDE { return false; }
    238 
    239   virtual const TraceEvent& NextEvent() OVERRIDE {
    240     NOTREACHED();
    241     return *static_cast<TraceEvent*>(NULL);
    242   }
    243 
    244   virtual bool IsFull() const OVERRIDE { return false; }
    245 
    246   virtual size_t CountEnabledByName(
    247       const unsigned char* category,
    248       const std::string& event_name) const OVERRIDE {
    249     return 0;
    250   }
    251 
    252   virtual size_t Size() const OVERRIDE { return 0; }
    253 
    254   virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
    255     NOTREACHED();
    256     return *static_cast<TraceEvent*>(NULL);
    257   }
    258 };
    259 
    260 ////////////////////////////////////////////////////////////////////////////////
    261 //
    262 // TraceEvent
    263 //
    264 ////////////////////////////////////////////////////////////////////////////////
    265 
    266 namespace {
    267 
    268 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
    269 
    270 // Copies |*member| into |*buffer|, sets |*member| to point to this new
    271 // location, and then advances |*buffer| by the amount written.
    272 void CopyTraceEventParameter(char** buffer,
    273                              const char** member,
    274                              const char* end) {
    275   if (*member) {
    276     size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
    277     DCHECK_LE(static_cast<int>(written), end - *buffer);
    278     *member = *buffer;
    279     *buffer += written;
    280   }
    281 }
    282 
    283 }  // namespace
    284 
    285 TraceEvent::TraceEvent()
    286     : id_(0u),
    287       category_group_enabled_(NULL),
    288       name_(NULL),
    289       thread_id_(0),
    290       phase_(TRACE_EVENT_PHASE_BEGIN),
    291       flags_(0) {
    292   arg_names_[0] = NULL;
    293   arg_names_[1] = NULL;
    294   memset(arg_values_, 0, sizeof(arg_values_));
    295 }
    296 
    297 TraceEvent::TraceEvent(
    298     int thread_id,
    299     TimeTicks timestamp,
    300     char phase,
    301     const unsigned char* category_group_enabled,
    302     const char* name,
    303     unsigned long long id,
    304     int num_args,
    305     const char** arg_names,
    306     const unsigned char* arg_types,
    307     const unsigned long long* arg_values,
    308     scoped_ptr<ConvertableToTraceFormat> convertable_values[],
    309     unsigned char flags)
    310     : timestamp_(timestamp),
    311       id_(id),
    312       category_group_enabled_(category_group_enabled),
    313       name_(name),
    314       thread_id_(thread_id),
    315       phase_(phase),
    316       flags_(flags) {
    317   // Clamp num_args since it may have been set by a third_party library.
    318   num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
    319   int i = 0;
    320   for (; i < num_args; ++i) {
    321     arg_names_[i] = arg_names[i];
    322     arg_types_[i] = arg_types[i];
    323 
    324     if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    325       convertable_values_[i].reset(convertable_values[i].release());
    326     else
    327       arg_values_[i].as_uint = arg_values[i];
    328   }
    329   for (; i < kTraceMaxNumArgs; ++i) {
    330     arg_names_[i] = NULL;
    331     arg_values_[i].as_uint = 0u;
    332     convertable_values_[i].reset();
    333     arg_types_[i] = TRACE_VALUE_TYPE_UINT;
    334   }
    335 
    336   bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
    337   size_t alloc_size = 0;
    338   if (copy) {
    339     alloc_size += GetAllocLength(name);
    340     for (i = 0; i < num_args; ++i) {
    341       alloc_size += GetAllocLength(arg_names_[i]);
    342       if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
    343         arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
    344     }
    345   }
    346 
    347   bool arg_is_copy[kTraceMaxNumArgs];
    348   for (i = 0; i < num_args; ++i) {
    349     // No copying of convertable types, we retain ownership.
    350     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    351       continue;
    352 
    353     // We only take a copy of arg_vals if they are of type COPY_STRING.
    354     arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
    355     if (arg_is_copy[i])
    356       alloc_size += GetAllocLength(arg_values_[i].as_string);
    357   }
    358 
    359   if (alloc_size) {
    360     parameter_copy_storage_ = new RefCountedString;
    361     parameter_copy_storage_->data().resize(alloc_size);
    362     char* ptr = string_as_array(&parameter_copy_storage_->data());
    363     const char* end = ptr + alloc_size;
    364     if (copy) {
    365       CopyTraceEventParameter(&ptr, &name_, end);
    366       for (i = 0; i < num_args; ++i) {
    367         CopyTraceEventParameter(&ptr, &arg_names_[i], end);
    368       }
    369     }
    370     for (i = 0; i < num_args; ++i) {
    371       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    372         continue;
    373       if (arg_is_copy[i])
    374         CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
    375     }
    376     DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
    377   }
    378 }
    379 
    380 TraceEvent::TraceEvent(const TraceEvent& other)
    381     : timestamp_(other.timestamp_),
    382       id_(other.id_),
    383       category_group_enabled_(other.category_group_enabled_),
    384       name_(other.name_),
    385       thread_id_(other.thread_id_),
    386       phase_(other.phase_),
    387       flags_(other.flags_) {
    388   parameter_copy_storage_ = other.parameter_copy_storage_;
    389 
    390   for (int i = 0; i < kTraceMaxNumArgs; ++i) {
    391     arg_values_[i] = other.arg_values_[i];
    392     arg_names_[i] = other.arg_names_[i];
    393     arg_types_[i] = other.arg_types_[i];
    394 
    395     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
    396       convertable_values_[i].reset(
    397           const_cast<TraceEvent*>(&other)->convertable_values_[i].release());
    398     } else {
    399       convertable_values_[i].reset();
    400     }
    401   }
    402 }
    403 
    404 TraceEvent& TraceEvent::operator=(const TraceEvent& other) {
    405   if (this == &other)
    406     return *this;
    407 
    408   timestamp_ = other.timestamp_;
    409   id_ = other.id_;
    410   category_group_enabled_ = other.category_group_enabled_;
    411   name_ = other.name_;
    412   parameter_copy_storage_ = other.parameter_copy_storage_;
    413   thread_id_ = other.thread_id_;
    414   phase_ = other.phase_;
    415   flags_ = other.flags_;
    416 
    417   for (int i = 0; i < kTraceMaxNumArgs; ++i) {
    418     arg_values_[i] = other.arg_values_[i];
    419     arg_names_[i] = other.arg_names_[i];
    420     arg_types_[i] = other.arg_types_[i];
    421 
    422     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
    423       convertable_values_[i].reset(
    424           const_cast<TraceEvent*>(&other)->convertable_values_[i].release());
    425     } else {
    426       convertable_values_[i].reset();
    427     }
    428   }
    429   return *this;
    430 }
    431 
    432 TraceEvent::~TraceEvent() {
    433 }
    434 
    435 // static
    436 void TraceEvent::AppendValueAsJSON(unsigned char type,
    437                                    TraceEvent::TraceValue value,
    438                                    std::string* out) {
    439   std::string::size_type start_pos;
    440   switch (type) {
    441     case TRACE_VALUE_TYPE_BOOL:
    442       *out += value.as_bool ? "true" : "false";
    443       break;
    444     case TRACE_VALUE_TYPE_UINT:
    445       StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint));
    446       break;
    447     case TRACE_VALUE_TYPE_INT:
    448       StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int));
    449       break;
    450     case TRACE_VALUE_TYPE_DOUBLE:
    451       StringAppendF(out, "%f", value.as_double);
    452       break;
    453     case TRACE_VALUE_TYPE_POINTER:
    454       // JSON only supports double and int numbers.
    455       // So as not to lose bits from a 64-bit pointer, output as a hex string.
    456       StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>(
    457                                      reinterpret_cast<intptr_t>(
    458                                      value.as_pointer)));
    459       break;
    460     case TRACE_VALUE_TYPE_STRING:
    461     case TRACE_VALUE_TYPE_COPY_STRING:
    462       *out += "\"";
    463       start_pos = out->size();
    464       *out += value.as_string ? value.as_string : "NULL";
    465       // insert backslash before special characters for proper json format.
    466       while ((start_pos = out->find_first_of("\\\"", start_pos)) !=
    467              std::string::npos) {
    468         out->insert(start_pos, 1, '\\');
    469         // skip inserted escape character and following character.
    470         start_pos += 2;
    471       }
    472       *out += "\"";
    473       break;
    474     default:
    475       NOTREACHED() << "Don't know how to print this value";
    476       break;
    477   }
    478 }
    479 
    480 void TraceEvent::AppendAsJSON(std::string* out) const {
    481   int64 time_int64 = timestamp_.ToInternalValue();
    482   int process_id = TraceLog::GetInstance()->process_id();
    483   // Category group checked at category creation time.
    484   DCHECK(!strchr(name_, '"'));
    485   StringAppendF(out,
    486       "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
    487       "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
    488       TraceLog::GetCategoryGroupName(category_group_enabled_),
    489       process_id,
    490       thread_id_,
    491       time_int64,
    492       phase_,
    493       name_);
    494 
    495   // Output argument names and values, stop at first NULL argument name.
    496   for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
    497     if (i > 0)
    498       *out += ",";
    499     *out += "\"";
    500     *out += arg_names_[i];
    501     *out += "\":";
    502 
    503     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    504       convertable_values_[i]->AppendAsTraceFormat(out);
    505     else
    506       AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
    507   }
    508   *out += "}";
    509 
    510   // If id_ is set, print it out as a hex string so we don't loose any
    511   // bits (it might be a 64-bit pointer).
    512   if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
    513     StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
    514 
    515   // Instant events also output their scope.
    516   if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
    517     char scope = '?';
    518     switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
    519       case TRACE_EVENT_SCOPE_GLOBAL:
    520         scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
    521         break;
    522 
    523       case TRACE_EVENT_SCOPE_PROCESS:
    524         scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
    525         break;
    526 
    527       case TRACE_EVENT_SCOPE_THREAD:
    528         scope = TRACE_EVENT_SCOPE_NAME_THREAD;
    529         break;
    530     }
    531     StringAppendF(out, ",\"s\":\"%c\"", scope);
    532   }
    533 
    534   *out += "}";
    535 }
    536 
    537 void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
    538   *out << name_ << "[";
    539   *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
    540   *out << "]";
    541   if (arg_names_[0]) {
    542     *out << ", {";
    543     for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
    544       if (i > 0)
    545         *out << ", ";
    546       *out << arg_names_[i] << ":";
    547       std::string value_as_text;
    548 
    549       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    550         convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
    551       else
    552         AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
    553 
    554       *out << value_as_text;
    555     }
    556     *out << "}";
    557   }
    558 }
    559 
    560 ////////////////////////////////////////////////////////////////////////////////
    561 //
    562 // TraceResultBuffer
    563 //
    564 ////////////////////////////////////////////////////////////////////////////////
    565 
    566 TraceResultBuffer::OutputCallback
    567     TraceResultBuffer::SimpleOutput::GetCallback() {
    568   return Bind(&SimpleOutput::Append, Unretained(this));
    569 }
    570 
    571 void TraceResultBuffer::SimpleOutput::Append(
    572     const std::string& json_trace_output) {
    573   json_output += json_trace_output;
    574 }
    575 
    576 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
    577 }
    578 
    579 TraceResultBuffer::~TraceResultBuffer() {
    580 }
    581 
    582 void TraceResultBuffer::SetOutputCallback(
    583     const OutputCallback& json_chunk_callback) {
    584   output_callback_ = json_chunk_callback;
    585 }
    586 
    587 void TraceResultBuffer::Start() {
    588   append_comma_ = false;
    589   output_callback_.Run("[");
    590 }
    591 
    592 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
    593   if (append_comma_)
    594     output_callback_.Run(",");
    595   append_comma_ = true;
    596   output_callback_.Run(trace_fragment);
    597 }
    598 
    599 void TraceResultBuffer::Finish() {
    600   output_callback_.Run("]");
    601 }
    602 
    603 ////////////////////////////////////////////////////////////////////////////////
    604 //
    605 // TraceSamplingThread
    606 //
    607 ////////////////////////////////////////////////////////////////////////////////
    608 class TraceBucketData;
    609 typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
    610 
    611 class TraceBucketData {
    612  public:
    613   TraceBucketData(base::subtle::AtomicWord* bucket,
    614                   const char* name,
    615                   TraceSampleCallback callback);
    616   ~TraceBucketData();
    617 
    618   TRACE_EVENT_API_ATOMIC_WORD* bucket;
    619   const char* bucket_name;
    620   TraceSampleCallback callback;
    621 };
    622 
    623 // This object must be created on the IO thread.
    624 class TraceSamplingThread : public PlatformThread::Delegate {
    625  public:
    626   TraceSamplingThread();
    627   virtual ~TraceSamplingThread();
    628 
    629   // Implementation of PlatformThread::Delegate:
    630   virtual void ThreadMain() OVERRIDE;
    631 
    632   static void DefaultSampleCallback(TraceBucketData* bucekt_data);
    633 
    634   void Stop();
    635   void InstallWaitableEventForSamplingTesting(WaitableEvent* waitable_event);
    636 
    637  private:
    638   friend class TraceLog;
    639 
    640   void GetSamples();
    641   // Not thread-safe. Once the ThreadMain has been called, this can no longer
    642   // be called.
    643   void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
    644                             const char* const name,
    645                             TraceSampleCallback callback);
    646   // Splits a combined "category\0name" into the two component parts.
    647   static void ExtractCategoryAndName(const char* combined,
    648                                      const char** category,
    649                                      const char** name);
    650   std::vector<TraceBucketData> sample_buckets_;
    651   bool thread_running_;
    652   scoped_ptr<CancellationFlag> cancellation_flag_;
    653   scoped_ptr<WaitableEvent> waitable_event_for_testing_;
    654 };
    655 
    656 
    657 TraceSamplingThread::TraceSamplingThread()
    658     : thread_running_(false) {
    659   cancellation_flag_.reset(new CancellationFlag);
    660 }
    661 
    662 TraceSamplingThread::~TraceSamplingThread() {
    663 }
    664 
    665 void TraceSamplingThread::ThreadMain() {
    666   PlatformThread::SetName("Sampling Thread");
    667   thread_running_ = true;
    668   const int kSamplingFrequencyMicroseconds = 1000;
    669   while (!cancellation_flag_->IsSet()) {
    670     PlatformThread::Sleep(
    671         TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
    672     GetSamples();
    673     if (waitable_event_for_testing_.get())
    674       waitable_event_for_testing_->Signal();
    675   }
    676 }
    677 
    678 // static
    679 void TraceSamplingThread::DefaultSampleCallback(TraceBucketData* bucket_data) {
    680   TRACE_EVENT_API_ATOMIC_WORD category_and_name =
    681       TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
    682   if (!category_and_name)
    683     return;
    684   const char* const combined =
    685       reinterpret_cast<const char* const>(category_and_name);
    686   const char* category_group;
    687   const char* name;
    688   ExtractCategoryAndName(combined, &category_group, &name);
    689   TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
    690       TraceLog::GetCategoryGroupEnabled(category_group),
    691       name, 0, 0, NULL, NULL, NULL, NULL, 0);
    692 }
    693 
    694 void TraceSamplingThread::GetSamples() {
    695   for (size_t i = 0; i < sample_buckets_.size(); ++i) {
    696     TraceBucketData* bucket_data = &sample_buckets_[i];
    697     bucket_data->callback.Run(bucket_data);
    698   }
    699 }
    700 
    701 void TraceSamplingThread::RegisterSampleBucket(
    702     TRACE_EVENT_API_ATOMIC_WORD* bucket,
    703     const char* const name,
    704     TraceSampleCallback callback) {
    705   DCHECK(!thread_running_);
    706   sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
    707 }
    708 
    709 // static
    710 void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
    711                                                  const char** category,
    712                                                  const char** name) {
    713   *category = combined;
    714   *name = &combined[strlen(combined) + 1];
    715 }
    716 
    717 void TraceSamplingThread::Stop() {
    718   cancellation_flag_->Set();
    719 }
    720 
    721 void TraceSamplingThread::InstallWaitableEventForSamplingTesting(
    722     WaitableEvent* waitable_event) {
    723   waitable_event_for_testing_.reset(waitable_event);
    724 }
    725 
    726 
    727 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
    728                                  const char* name,
    729                                  TraceSampleCallback callback)
    730     : bucket(bucket),
    731       bucket_name(name),
    732       callback(callback) {
    733 }
    734 
    735 TraceBucketData::~TraceBucketData() {
    736 }
    737 
    738 ////////////////////////////////////////////////////////////////////////////////
    739 //
    740 // TraceLog
    741 //
    742 ////////////////////////////////////////////////////////////////////////////////
    743 
    744 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log)
    745     : trace_log_(trace_log),
    746       notification_(0) {
    747 }
    748 
    749 TraceLog::NotificationHelper::~NotificationHelper() {
    750 }
    751 
    752 void TraceLog::NotificationHelper::AddNotificationWhileLocked(
    753     int notification) {
    754   if (trace_log_->notification_callback_.is_null())
    755     return;
    756   if (notification_ == 0)
    757     callback_copy_ = trace_log_->notification_callback_;
    758   notification_ |= notification;
    759 }
    760 
    761 void TraceLog::NotificationHelper::SendNotificationIfAny() {
    762   if (notification_)
    763     callback_copy_.Run(notification_);
    764 }
    765 
    766 // static
    767 TraceLog* TraceLog::GetInstance() {
    768   return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
    769 }
    770 
    771 // static
    772 // Note, if you add more options here you also need to update:
    773 // content/browser/devtools/devtools_tracing_handler:TraceOptionsFromString
    774 TraceLog::Options TraceLog::TraceOptionsFromString(const std::string& options) {
    775   std::vector<std::string> split;
    776   base::SplitString(options, ',', &split);
    777   int ret = 0;
    778   for (std::vector<std::string>::iterator iter = split.begin();
    779        iter != split.end();
    780        ++iter) {
    781     if (*iter == kRecordUntilFull) {
    782       ret |= RECORD_UNTIL_FULL;
    783     } else if (*iter == kRecordContinuously) {
    784       ret |= RECORD_CONTINUOUSLY;
    785     } else if (*iter == kEnableSampling) {
    786       ret |= ENABLE_SAMPLING;
    787     } else {
    788       NOTREACHED();  // Unknown option provided.
    789     }
    790   }
    791   if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY))
    792     ret |= RECORD_UNTIL_FULL;  // Default when no options are specified.
    793 
    794   return static_cast<Options>(ret);
    795 }
    796 
    797 TraceLog::TraceLog()
    798     : enable_count_(0),
    799       num_traces_recorded_(0),
    800       event_callback_(NULL),
    801       dispatching_to_observer_list_(false),
    802       process_sort_index_(0),
    803       watch_category_(NULL),
    804       trace_options_(RECORD_UNTIL_FULL),
    805       sampling_thread_handle_(0),
    806       category_filter_(CategoryFilter::kDefaultCategoryFilterString) {
    807   // Trace is enabled or disabled on one thread while other threads are
    808   // accessing the enabled flag. We don't care whether edge-case events are
    809   // traced or not, so we allow races on the enabled flag to keep the trace
    810   // macros fast.
    811   // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
    812   // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
    813   //                            sizeof(g_category_group_enabled),
    814   //                           "trace_event category enabled");
    815   for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
    816     ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
    817                          "trace_event category enabled");
    818   }
    819 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
    820   SetProcessID(0);
    821 #else
    822   SetProcessID(static_cast<int>(GetCurrentProcId()));
    823 
    824   // NaCl also shouldn't access the command line.
    825   if (CommandLine::InitializedForCurrentProcess() &&
    826       CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
    827     std::string category_string =
    828         CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
    829             switches::kTraceToConsole);
    830 
    831     if (category_string.empty())
    832       category_string = "*";
    833 
    834     SetEnabled(CategoryFilter(category_string), ECHO_TO_CONSOLE);
    835   }
    836 #endif
    837 
    838   logged_events_.reset(GetTraceBuffer());
    839 }
    840 
    841 TraceLog::~TraceLog() {
    842 }
    843 
    844 const unsigned char* TraceLog::GetCategoryGroupEnabled(
    845     const char* category_group) {
    846   TraceLog* tracelog = GetInstance();
    847   if (!tracelog) {
    848     DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
    849     return &g_category_group_enabled[g_category_already_shutdown];
    850   }
    851   return tracelog->GetCategoryGroupEnabledInternal(category_group);
    852 }
    853 
    854 const char* TraceLog::GetCategoryGroupName(
    855     const unsigned char* category_group_enabled) {
    856   // Calculate the index of the category group by finding
    857   // category_group_enabled in g_category_group_enabled array.
    858   uintptr_t category_begin =
    859       reinterpret_cast<uintptr_t>(g_category_group_enabled);
    860   uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
    861   DCHECK(category_ptr >= category_begin &&
    862          category_ptr < reinterpret_cast<uintptr_t>(
    863              g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
    864       "out of bounds category pointer";
    865   uintptr_t category_index =
    866       (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
    867   return g_category_groups[category_index];
    868 }
    869 
    870 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index) {
    871   bool is_enabled = enable_count_ && category_filter_.IsCategoryGroupEnabled(
    872       g_category_groups[category_index]);
    873   SetCategoryGroupEnabled(category_index, is_enabled);
    874 }
    875 
    876 void TraceLog::UpdateCategoryGroupEnabledFlags() {
    877   for (int i = 0; i < g_category_index; i++)
    878     UpdateCategoryGroupEnabledFlag(i);
    879 }
    880 
    881 void TraceLog::SetCategoryGroupEnabled(int category_index, bool is_enabled) {
    882   g_category_group_enabled[category_index] =
    883       is_enabled ? CATEGORY_GROUP_ENABLED : 0;
    884 
    885 #if defined(OS_ANDROID)
    886   ApplyATraceEnabledFlag(&g_category_group_enabled[category_index]);
    887 #endif
    888 }
    889 
    890 bool TraceLog::IsCategoryGroupEnabled(
    891     const unsigned char* category_group_enabled) {
    892   // On Android, ATrace and normal trace can be enabled independently.
    893   // This function checks if the normal trace is enabled.
    894   return *category_group_enabled & CATEGORY_GROUP_ENABLED;
    895 }
    896 
    897 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
    898     const char* category_group) {
    899   DCHECK(!strchr(category_group, '"')) <<
    900       "Category groups may not contain double quote";
    901   AutoLock lock(lock_);
    902 
    903   unsigned char* category_group_enabled = NULL;
    904   // Search for pre-existing category group.
    905   for (int i = 0; i < g_category_index; i++) {
    906     if (strcmp(g_category_groups[i], category_group) == 0) {
    907       category_group_enabled = &g_category_group_enabled[i];
    908       break;
    909     }
    910   }
    911 
    912   if (!category_group_enabled) {
    913     // Create a new category group
    914     DCHECK(g_category_index < MAX_CATEGORY_GROUPS) <<
    915         "must increase MAX_CATEGORY_GROUPS";
    916     if (g_category_index < MAX_CATEGORY_GROUPS) {
    917       int new_index = g_category_index++;
    918       // Don't hold on to the category_group pointer, so that we can create
    919       // category groups with strings not known at compile time (this is
    920       // required by SetWatchEvent).
    921       const char* new_group = strdup(category_group);
    922       ANNOTATE_LEAKING_OBJECT_PTR(new_group);
    923       g_category_groups[new_index] = new_group;
    924       DCHECK(!g_category_group_enabled[new_index]);
    925       // Note that if both included and excluded patterns in the
    926       // CategoryFilter are empty, we exclude nothing,
    927       // thereby enabling this category group.
    928       UpdateCategoryGroupEnabledFlag(new_index);
    929       category_group_enabled = &g_category_group_enabled[new_index];
    930     } else {
    931       category_group_enabled =
    932           &g_category_group_enabled[g_category_categories_exhausted];
    933     }
    934   }
    935   return category_group_enabled;
    936 }
    937 
    938 void TraceLog::GetKnownCategoryGroups(
    939     std::vector<std::string>* category_groups) {
    940   AutoLock lock(lock_);
    941   for (int i = g_num_builtin_categories; i < g_category_index; i++)
    942     category_groups->push_back(g_category_groups[i]);
    943 }
    944 
    945 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
    946                           Options options) {
    947   std::vector<EnabledStateObserver*> observer_list;
    948   {
    949     AutoLock lock(lock_);
    950 
    951     if (enable_count_++ > 0) {
    952       if (options != trace_options_) {
    953         DLOG(ERROR) << "Attemting to re-enable tracing with a different "
    954                     << "set of options.";
    955       }
    956 
    957       category_filter_.Merge(category_filter);
    958       UpdateCategoryGroupEnabledFlags();
    959       return;
    960     }
    961 
    962     if (options != trace_options_) {
    963       trace_options_ = options;
    964       logged_events_.reset(GetTraceBuffer());
    965     }
    966 
    967     if (dispatching_to_observer_list_) {
    968       DLOG(ERROR) <<
    969           "Cannot manipulate TraceLog::Enabled state from an observer.";
    970       return;
    971     }
    972 
    973     num_traces_recorded_++;
    974 
    975     category_filter_ = CategoryFilter(category_filter);
    976     UpdateCategoryGroupEnabledFlags();
    977 
    978     if (options & ENABLE_SAMPLING) {
    979       sampling_thread_.reset(new TraceSamplingThread);
    980       sampling_thread_->RegisterSampleBucket(
    981           &g_trace_state[0],
    982           "bucket0",
    983           Bind(&TraceSamplingThread::DefaultSampleCallback));
    984       sampling_thread_->RegisterSampleBucket(
    985           &g_trace_state[1],
    986           "bucket1",
    987           Bind(&TraceSamplingThread::DefaultSampleCallback));
    988       sampling_thread_->RegisterSampleBucket(
    989           &g_trace_state[2],
    990           "bucket2",
    991           Bind(&TraceSamplingThread::DefaultSampleCallback));
    992       if (!PlatformThread::Create(
    993             0, sampling_thread_.get(), &sampling_thread_handle_)) {
    994         DCHECK(false) << "failed to create thread";
    995       }
    996     }
    997 
    998     dispatching_to_observer_list_ = true;
    999     observer_list = enabled_state_observer_list_;
   1000   }
   1001   // Notify observers outside the lock in case they trigger trace events.
   1002   for (size_t i = 0; i < observer_list.size(); ++i)
   1003     observer_list[i]->OnTraceLogEnabled();
   1004 
   1005   {
   1006     AutoLock lock(lock_);
   1007     dispatching_to_observer_list_ = false;
   1008   }
   1009 }
   1010 
   1011 const CategoryFilter& TraceLog::GetCurrentCategoryFilter() {
   1012   AutoLock lock(lock_);
   1013   DCHECK(enable_count_ > 0);
   1014   return category_filter_;
   1015 }
   1016 
   1017 void TraceLog::SetDisabled() {
   1018   std::vector<EnabledStateObserver*> observer_list;
   1019   {
   1020     AutoLock lock(lock_);
   1021     DCHECK(enable_count_ > 0);
   1022     if (--enable_count_ != 0)
   1023       return;
   1024 
   1025     if (dispatching_to_observer_list_) {
   1026       DLOG(ERROR)
   1027           << "Cannot manipulate TraceLog::Enabled state from an observer.";
   1028       return;
   1029     }
   1030 
   1031     if (sampling_thread_.get()) {
   1032       // Stop the sampling thread.
   1033       sampling_thread_->Stop();
   1034       lock_.Release();
   1035       PlatformThread::Join(sampling_thread_handle_);
   1036       lock_.Acquire();
   1037       sampling_thread_handle_ = PlatformThreadHandle();
   1038       sampling_thread_.reset();
   1039     }
   1040 
   1041     category_filter_.Clear();
   1042     watch_category_ = NULL;
   1043     watch_event_name_ = "";
   1044     UpdateCategoryGroupEnabledFlags();
   1045     AddMetadataEvents();
   1046 
   1047     dispatching_to_observer_list_ = true;
   1048     observer_list = enabled_state_observer_list_;
   1049   }
   1050 
   1051   // Dispatch to observers outside the lock in case the observer triggers a
   1052   // trace event.
   1053   for (size_t i = 0; i < observer_list.size(); ++i)
   1054     observer_list[i]->OnTraceLogDisabled();
   1055 
   1056   {
   1057     AutoLock lock(lock_);
   1058     dispatching_to_observer_list_ = false;
   1059   }
   1060 }
   1061 
   1062 int TraceLog::GetNumTracesRecorded() {
   1063   AutoLock lock(lock_);
   1064   if (enable_count_ == 0)
   1065     return -1;
   1066   return num_traces_recorded_;
   1067 }
   1068 
   1069 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
   1070   enabled_state_observer_list_.push_back(listener);
   1071 }
   1072 
   1073 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
   1074   std::vector<EnabledStateObserver*>::iterator it =
   1075       std::find(enabled_state_observer_list_.begin(),
   1076                 enabled_state_observer_list_.end(),
   1077                 listener);
   1078   if (it != enabled_state_observer_list_.end())
   1079     enabled_state_observer_list_.erase(it);
   1080 }
   1081 
   1082 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
   1083   std::vector<EnabledStateObserver*>::const_iterator it =
   1084       std::find(enabled_state_observer_list_.begin(),
   1085                 enabled_state_observer_list_.end(),
   1086                 listener);
   1087   return it != enabled_state_observer_list_.end();
   1088 }
   1089 
   1090 float TraceLog::GetBufferPercentFull() const {
   1091   return (float)((double)logged_events_->Size()/(double)kTraceEventBufferSize);
   1092 }
   1093 
   1094 void TraceLog::SetNotificationCallback(
   1095     const TraceLog::NotificationCallback& cb) {
   1096   AutoLock lock(lock_);
   1097   notification_callback_ = cb;
   1098 }
   1099 
   1100 TraceBuffer* TraceLog::GetTraceBuffer() {
   1101   if (trace_options_ & RECORD_CONTINUOUSLY)
   1102     return new TraceBufferRingBuffer();
   1103   else if (trace_options_ & ECHO_TO_CONSOLE)
   1104     return new TraceBufferDiscardsEvents();
   1105   return new TraceBufferVector();
   1106 }
   1107 
   1108 void TraceLog::SetEventCallback(EventCallback cb) {
   1109   AutoLock lock(lock_);
   1110   event_callback_ = cb;
   1111 };
   1112 
   1113 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
   1114   // Ignore memory allocations from here down.
   1115   INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"),
   1116                         TRACE_MEMORY_IGNORE);
   1117   scoped_ptr<TraceBuffer> previous_logged_events;
   1118   {
   1119     AutoLock lock(lock_);
   1120     previous_logged_events.swap(logged_events_);
   1121     logged_events_.reset(GetTraceBuffer());
   1122   }  // release lock
   1123 
   1124   while (previous_logged_events->HasMoreEvents()) {
   1125     scoped_refptr<RefCountedString> json_events_str_ptr =
   1126         new RefCountedString();
   1127 
   1128     for (size_t i = 0; i < kTraceEventBatchSize; ++i) {
   1129       if (i > 0)
   1130         *(&(json_events_str_ptr->data())) += ",";
   1131 
   1132       previous_logged_events->NextEvent().AppendAsJSON(
   1133           &(json_events_str_ptr->data()));
   1134 
   1135       if (!previous_logged_events->HasMoreEvents())
   1136         break;
   1137     }
   1138 
   1139     cb.Run(json_events_str_ptr);
   1140   }
   1141 }
   1142 
   1143 void TraceLog::AddTraceEvent(
   1144     char phase,
   1145     const unsigned char* category_group_enabled,
   1146     const char* name,
   1147     unsigned long long id,
   1148     int num_args,
   1149     const char** arg_names,
   1150     const unsigned char* arg_types,
   1151     const unsigned long long* arg_values,
   1152     scoped_ptr<ConvertableToTraceFormat> convertable_values[],
   1153     unsigned char flags) {
   1154   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   1155   base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
   1156   AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled, name, id,
   1157                                         thread_id, now, num_args, arg_names,
   1158                                         arg_types, arg_values,
   1159                                         convertable_values, flags);
   1160 }
   1161 
   1162 void TraceLog::AddTraceEventWithThreadIdAndTimestamp(
   1163     char phase,
   1164     const unsigned char* category_group_enabled,
   1165     const char* name,
   1166     unsigned long long id,
   1167     int thread_id,
   1168     const TimeTicks& timestamp,
   1169     int num_args,
   1170     const char** arg_names,
   1171     const unsigned char* arg_types,
   1172     const unsigned long long* arg_values,
   1173     scoped_ptr<ConvertableToTraceFormat> convertable_values[],
   1174     unsigned char flags) {
   1175   DCHECK(name);
   1176 
   1177   if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
   1178     id ^= process_id_hash_;
   1179 
   1180 #if defined(OS_ANDROID)
   1181   SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id,
   1182                num_args, arg_names, arg_types, arg_values, convertable_values,
   1183                flags);
   1184 #endif
   1185 
   1186   if (!IsCategoryGroupEnabled(category_group_enabled))
   1187     return;
   1188 
   1189   TimeTicks now = timestamp - time_offset_;
   1190   EventCallback event_callback_copy;
   1191 
   1192   NotificationHelper notifier(this);
   1193 
   1194   // Check and update the current thread name only if the event is for the
   1195   // current thread to avoid locks in most cases.
   1196   if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
   1197     const char* new_name = ThreadIdNameManager::GetInstance()->
   1198         GetName(thread_id);
   1199     // Check if the thread name has been set or changed since the previous
   1200     // call (if any), but don't bother if the new name is empty. Note this will
   1201     // not detect a thread name change within the same char* buffer address: we
   1202     // favor common case performance over corner case correctness.
   1203     if (new_name != g_current_thread_name.Get().Get() &&
   1204         new_name && *new_name) {
   1205       g_current_thread_name.Get().Set(new_name);
   1206 
   1207       AutoLock lock(lock_);
   1208       hash_map<int, std::string>::iterator existing_name =
   1209           thread_names_.find(thread_id);
   1210       if (existing_name == thread_names_.end()) {
   1211         // This is a new thread id, and a new name.
   1212         thread_names_[thread_id] = new_name;
   1213       } else {
   1214         // This is a thread id that we've seen before, but potentially with a
   1215         // new name.
   1216         std::vector<StringPiece> existing_names;
   1217         Tokenize(existing_name->second, ",", &existing_names);
   1218         bool found = std::find(existing_names.begin(),
   1219                                existing_names.end(),
   1220                                new_name) != existing_names.end();
   1221         if (!found) {
   1222           existing_name->second.push_back(',');
   1223           existing_name->second.append(new_name);
   1224         }
   1225       }
   1226     }
   1227   }
   1228 
   1229   TraceEvent trace_event(thread_id,
   1230       now, phase, category_group_enabled, name, id,
   1231       num_args, arg_names, arg_types, arg_values,
   1232       convertable_values, flags);
   1233 
   1234   do {
   1235     AutoLock lock(lock_);
   1236 
   1237     event_callback_copy = event_callback_;
   1238     if (logged_events_->IsFull())
   1239       break;
   1240 
   1241     logged_events_->AddEvent(trace_event);
   1242 
   1243     if (trace_options_ & ECHO_TO_CONSOLE) {
   1244       TimeDelta duration;
   1245       if (phase == TRACE_EVENT_PHASE_END) {
   1246         duration = timestamp - thread_event_start_times_[thread_id].top();
   1247         thread_event_start_times_[thread_id].pop();
   1248       }
   1249 
   1250       std::string thread_name = thread_names_[thread_id];
   1251       if (thread_colors_.find(thread_name) == thread_colors_.end())
   1252         thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
   1253 
   1254       std::ostringstream log;
   1255       log << base::StringPrintf("%s: \x1b[0;3%dm",
   1256                                 thread_name.c_str(),
   1257                                 thread_colors_[thread_name]);
   1258 
   1259       size_t depth = 0;
   1260       if (thread_event_start_times_.find(thread_id) !=
   1261           thread_event_start_times_.end())
   1262         depth = thread_event_start_times_[thread_id].size();
   1263 
   1264       for (size_t i = 0; i < depth; ++i)
   1265         log << "| ";
   1266 
   1267       trace_event.AppendPrettyPrinted(&log);
   1268       if (phase == TRACE_EVENT_PHASE_END)
   1269         log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
   1270 
   1271       LOG(ERROR) << log.str() << "\x1b[0;m";
   1272 
   1273       if (phase == TRACE_EVENT_PHASE_BEGIN)
   1274         thread_event_start_times_[thread_id].push(timestamp);
   1275     }
   1276 
   1277     if (logged_events_->IsFull())
   1278       notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL);
   1279 
   1280     if (watch_category_ == category_group_enabled && watch_event_name_ == name)
   1281       notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
   1282   } while (0); // release lock
   1283 
   1284   notifier.SendNotificationIfAny();
   1285   if (event_callback_copy != NULL) {
   1286     event_callback_copy(phase, category_group_enabled, name, id,
   1287         num_args, arg_names, arg_types, arg_values,
   1288         flags);
   1289   }
   1290 }
   1291 
   1292 void TraceLog::AddTraceEventEtw(char phase,
   1293                                 const char* name,
   1294                                 const void* id,
   1295                                 const char* extra) {
   1296 #if defined(OS_WIN)
   1297   TraceEventETWProvider::Trace(name, phase, id, extra);
   1298 #endif
   1299   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
   1300                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
   1301 }
   1302 
   1303 void TraceLog::AddTraceEventEtw(char phase,
   1304                                 const char* name,
   1305                                 const void* id,
   1306                                 const std::string& extra)
   1307 {
   1308 #if defined(OS_WIN)
   1309   TraceEventETWProvider::Trace(name, phase, id, extra);
   1310 #endif
   1311   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
   1312                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
   1313 }
   1314 
   1315 void TraceLog::SetWatchEvent(const std::string& category_name,
   1316                              const std::string& event_name) {
   1317   const unsigned char* category = GetCategoryGroupEnabled(
   1318       category_name.c_str());
   1319   size_t notify_count = 0;
   1320   {
   1321     AutoLock lock(lock_);
   1322     watch_category_ = category;
   1323     watch_event_name_ = event_name;
   1324 
   1325     // First, search existing events for watch event because we want to catch
   1326     // it even if it has already occurred.
   1327     notify_count = logged_events_->CountEnabledByName(category, event_name);
   1328   }  // release lock
   1329 
   1330   // Send notification for each event found.
   1331   for (size_t i = 0; i < notify_count; ++i) {
   1332     NotificationHelper notifier(this);
   1333     lock_.Acquire();
   1334     notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
   1335     lock_.Release();
   1336     notifier.SendNotificationIfAny();
   1337   }
   1338 }
   1339 
   1340 void TraceLog::CancelWatchEvent() {
   1341   AutoLock lock(lock_);
   1342   watch_category_ = NULL;
   1343   watch_event_name_ = "";
   1344 }
   1345 
   1346 namespace {
   1347 
   1348 template <typename T>
   1349 void AddMetadataEventToBuffer(
   1350     TraceBuffer* logged_events,
   1351     int thread_id,
   1352     const char* metadata_name, const char* arg_name,
   1353     const T& value) {
   1354   int num_args = 1;
   1355   unsigned char arg_type;
   1356   unsigned long long arg_value;
   1357   trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
   1358   logged_events->AddEvent(TraceEvent(
   1359       thread_id,
   1360       TimeTicks(), TRACE_EVENT_PHASE_METADATA,
   1361       &g_category_group_enabled[g_category_metadata],
   1362       metadata_name, trace_event_internal::kNoEventId,
   1363       num_args, &arg_name, &arg_type, &arg_value, NULL,
   1364       TRACE_EVENT_FLAG_NONE));
   1365 }
   1366 
   1367 }
   1368 
   1369 void TraceLog::AddMetadataEvents() {
   1370   lock_.AssertAcquired();
   1371 
   1372   int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   1373   if (process_sort_index_ != 0) {
   1374     AddMetadataEventToBuffer(logged_events_.get(),
   1375                              current_thread_id,
   1376                              "process_sort_index", "sort_index",
   1377                              process_sort_index_);
   1378   }
   1379 
   1380   if (process_name_.size()) {
   1381     AddMetadataEventToBuffer(logged_events_.get(),
   1382                              current_thread_id,
   1383                              "process_name", "name",
   1384                              process_name_);
   1385   }
   1386 
   1387   if (process_labels_.size() > 0) {
   1388     std::vector<std::string> labels;
   1389     for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
   1390         it != process_labels_.end();
   1391         it++) {
   1392       labels.push_back(it->second);
   1393     }
   1394     AddMetadataEventToBuffer(logged_events_.get(),
   1395                              current_thread_id,
   1396                              "process_labels", "labels",
   1397                              JoinString(labels, ','));
   1398   }
   1399 
   1400   // Thread sort indices.
   1401   for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
   1402       it != thread_sort_indices_.end();
   1403       it++) {
   1404     if (it->second == 0)
   1405       continue;
   1406     AddMetadataEventToBuffer(logged_events_.get(),
   1407                              it->first,
   1408                              "thread_sort_index", "sort_index",
   1409                              it->second);
   1410   }
   1411 
   1412   // Thread names.
   1413   for(hash_map<int, std::string>::iterator it = thread_names_.begin();
   1414       it != thread_names_.end();
   1415       it++) {
   1416     if (it->second.empty())
   1417       continue;
   1418     AddMetadataEventToBuffer(logged_events_.get(),
   1419                              it->first,
   1420                              "thread_name", "name",
   1421                              it->second);
   1422   }
   1423 }
   1424 
   1425 void TraceLog::InstallWaitableEventForSamplingTesting(
   1426     WaitableEvent* waitable_event) {
   1427   sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event);
   1428 }
   1429 
   1430 void TraceLog::DeleteForTesting() {
   1431   DeleteTraceLogForTesting::Delete();
   1432 }
   1433 
   1434 void TraceLog::SetProcessID(int process_id) {
   1435   process_id_ = process_id;
   1436   // Create a FNV hash from the process ID for XORing.
   1437   // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
   1438   unsigned long long offset_basis = 14695981039346656037ull;
   1439   unsigned long long fnv_prime = 1099511628211ull;
   1440   unsigned long long pid = static_cast<unsigned long long>(process_id_);
   1441   process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
   1442 }
   1443 
   1444 void TraceLog::SetProcessSortIndex(int sort_index) {
   1445   AutoLock lock(lock_);
   1446   process_sort_index_ = sort_index;
   1447 }
   1448 
   1449 void TraceLog::SetProcessName(const std::string& process_name) {
   1450   AutoLock lock(lock_);
   1451   process_name_ = process_name;
   1452 }
   1453 
   1454 void TraceLog::UpdateProcessLabel(
   1455     int label_id, const std::string& current_label) {
   1456   if(!current_label.length())
   1457     return RemoveProcessLabel(label_id);
   1458 
   1459   AutoLock lock(lock_);
   1460   process_labels_[label_id] = current_label;
   1461 }
   1462 
   1463 void TraceLog::RemoveProcessLabel(int label_id) {
   1464   AutoLock lock(lock_);
   1465   base::hash_map<int, std::string>::iterator it = process_labels_.find(
   1466         label_id);
   1467   if (it == process_labels_.end())
   1468     return;
   1469 
   1470   process_labels_.erase(it);
   1471 }
   1472 
   1473 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
   1474   AutoLock lock(lock_);
   1475   thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
   1476 }
   1477 
   1478 void TraceLog::SetTimeOffset(TimeDelta offset) {
   1479   time_offset_ = offset;
   1480 }
   1481 
   1482 size_t TraceLog::GetObserverCountForTest() const {
   1483   return enabled_state_observer_list_.size();
   1484 }
   1485 
   1486 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
   1487     const std::string& str) {
   1488   return  str.empty() ||
   1489           str.at(0) == ' ' ||
   1490           str.at(str.length() - 1) == ' ';
   1491 }
   1492 
   1493 bool CategoryFilter::DoesCategoryGroupContainCategory(
   1494     const char* category_group,
   1495     const char* category) const {
   1496   DCHECK(category);
   1497   CStringTokenizer category_group_tokens(category_group,
   1498                           category_group + strlen(category_group), ",");
   1499   while (category_group_tokens.GetNext()) {
   1500     std::string category_group_token = category_group_tokens.token();
   1501     // Don't allow empty tokens, nor tokens with leading or trailing space.
   1502     DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
   1503         category_group_token))
   1504         << "Disallowed category string";
   1505     if (MatchPattern(category_group_token.c_str(), category))
   1506       return true;
   1507   }
   1508   return false;
   1509 }
   1510 
   1511 CategoryFilter::CategoryFilter(const std::string& filter_string) {
   1512   if (!filter_string.empty())
   1513     Initialize(filter_string);
   1514   else
   1515     Initialize(CategoryFilter::kDefaultCategoryFilterString);
   1516 }
   1517 
   1518 CategoryFilter::CategoryFilter(const CategoryFilter& cf)
   1519     : included_(cf.included_),
   1520       disabled_(cf.disabled_),
   1521       excluded_(cf.excluded_) {
   1522 }
   1523 
   1524 CategoryFilter::~CategoryFilter() {
   1525 }
   1526 
   1527 CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
   1528   if (this == &rhs)
   1529     return *this;
   1530 
   1531   included_ = rhs.included_;
   1532   disabled_ = rhs.disabled_;
   1533   excluded_ = rhs.excluded_;
   1534   return *this;
   1535 }
   1536 
   1537 void CategoryFilter::Initialize(const std::string& filter_string) {
   1538   // Tokenize list of categories, delimited by ','.
   1539   StringTokenizer tokens(filter_string, ",");
   1540   // Add each token to the appropriate list (included_,excluded_).
   1541   while (tokens.GetNext()) {
   1542     std::string category = tokens.token();
   1543     // Ignore empty categories.
   1544     if (category.empty())
   1545       continue;
   1546     // Excluded categories start with '-'.
   1547     if (category.at(0) == '-') {
   1548       // Remove '-' from category string.
   1549       category = category.substr(1);
   1550       excluded_.push_back(category);
   1551     } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
   1552                                 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
   1553       disabled_.push_back(category);
   1554     } else {
   1555       included_.push_back(category);
   1556     }
   1557   }
   1558 }
   1559 
   1560 void CategoryFilter::WriteString(const StringList& values,
   1561                                  std::string* out,
   1562                                  bool included) const {
   1563   bool prepend_comma = !out->empty();
   1564   int token_cnt = 0;
   1565   for (StringList::const_iterator ci = values.begin();
   1566        ci != values.end(); ++ci) {
   1567     if (token_cnt > 0 || prepend_comma)
   1568       StringAppendF(out, ",");
   1569     StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
   1570     ++token_cnt;
   1571   }
   1572 }
   1573 
   1574 std::string CategoryFilter::ToString() const {
   1575   std::string filter_string;
   1576   WriteString(included_, &filter_string, true);
   1577   WriteString(disabled_, &filter_string, true);
   1578   WriteString(excluded_, &filter_string, false);
   1579   return filter_string;
   1580 }
   1581 
   1582 bool CategoryFilter::IsCategoryGroupEnabled(
   1583     const char* category_group_name) const {
   1584   // TraceLog should call this method only as  part of enabling/disabling
   1585   // categories.
   1586   StringList::const_iterator ci;
   1587 
   1588   // Check the disabled- filters and the disabled-* wildcard first so that a
   1589   // "*" filter does not include the disabled.
   1590   for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
   1591     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
   1592       return true;
   1593   }
   1594   if (DoesCategoryGroupContainCategory(category_group_name,
   1595                                        TRACE_DISABLED_BY_DEFAULT("*")))
   1596     return false;
   1597 
   1598   for (ci = included_.begin(); ci != included_.end(); ++ci) {
   1599     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
   1600       return true;
   1601   }
   1602 
   1603   for (ci = excluded_.begin(); ci != excluded_.end(); ++ci) {
   1604     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
   1605       return false;
   1606   }
   1607   // If the category group is not excluded, and there are no included patterns
   1608   // we consider this pattern enabled.
   1609   return included_.empty();
   1610 }
   1611 
   1612 bool CategoryFilter::HasIncludedPatterns() const {
   1613   return !included_.empty();
   1614 }
   1615 
   1616 void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
   1617   // Keep included patterns only if both filters have an included entry.
   1618   // Otherwise, one of the filter was specifying "*" and we want to honour the
   1619   // broadest filter.
   1620   if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
   1621     included_.insert(included_.end(),
   1622                      nested_filter.included_.begin(),
   1623                      nested_filter.included_.end());
   1624   } else {
   1625     included_.clear();
   1626   }
   1627 
   1628   disabled_.insert(disabled_.end(),
   1629                    nested_filter.disabled_.begin(),
   1630                    nested_filter.disabled_.end());
   1631   excluded_.insert(excluded_.end(),
   1632                    nested_filter.excluded_.begin(),
   1633                    nested_filter.excluded_.end());
   1634 }
   1635 
   1636 void CategoryFilter::Clear() {
   1637   included_.clear();
   1638   disabled_.clear();
   1639   excluded_.clear();
   1640 }
   1641 
   1642 }  // namespace debug
   1643 }  // namespace base
   1644 
   1645 namespace trace_event_internal {
   1646 
   1647 ScopedTrace::ScopedTrace(
   1648     TRACE_EVENT_API_ATOMIC_WORD* event_uid, const char* name) {
   1649   category_group_enabled_ =
   1650     reinterpret_cast<const unsigned char*>(TRACE_EVENT_API_ATOMIC_LOAD(
   1651         *event_uid));
   1652   if (!category_group_enabled_) {
   1653     category_group_enabled_ = TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("gpu");
   1654     TRACE_EVENT_API_ATOMIC_STORE(
   1655         *event_uid,
   1656         reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(category_group_enabled_));
   1657   }
   1658   if (*category_group_enabled_) {
   1659     name_ = name;
   1660     TRACE_EVENT_API_ADD_TRACE_EVENT(
   1661         TRACE_EVENT_PHASE_BEGIN,    // phase
   1662         category_group_enabled_,          // category enabled
   1663         name,                       // name
   1664         0,                          // id
   1665         0,                          // num_args
   1666         NULL,                       // arg_names
   1667         NULL,                       // arg_types
   1668         NULL,                       // arg_values
   1669         NULL,                       // convertable_values
   1670         TRACE_EVENT_FLAG_NONE);     // flags
   1671   } else {
   1672     category_group_enabled_ = NULL;
   1673   }
   1674 }
   1675 
   1676 ScopedTrace::~ScopedTrace() {
   1677   if (category_group_enabled_ && *category_group_enabled_) {
   1678     TRACE_EVENT_API_ADD_TRACE_EVENT(
   1679         TRACE_EVENT_PHASE_END,   // phase
   1680         category_group_enabled_,       // category enabled
   1681         name_,                   // name
   1682         0,                       // id
   1683         0,                       // num_args
   1684         NULL,                    // arg_names
   1685         NULL,                    // arg_types
   1686         NULL,                    // arg_values
   1687         NULL,                    // convertable values
   1688         TRACE_EVENT_FLAG_NONE);  // flags
   1689   }
   1690 }
   1691 
   1692 }  // namespace trace_event_internal
   1693