Home | History | Annotate | Download | only in debug
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/debug/trace_event_impl.h"
      6 
      7 #include <algorithm>
      8 
      9 #include "base/base_switches.h"
     10 #include "base/bind.h"
     11 #include "base/command_line.h"
     12 #include "base/debug/leak_annotations.h"
     13 #include "base/debug/trace_event.h"
     14 #include "base/format_macros.h"
     15 #include "base/json/string_escape.h"
     16 #include "base/lazy_instance.h"
     17 #include "base/memory/singleton.h"
     18 #include "base/message_loop/message_loop.h"
     19 #include "base/process/process_metrics.h"
     20 #include "base/stl_util.h"
     21 #include "base/strings/string_number_conversions.h"
     22 #include "base/strings/string_split.h"
     23 #include "base/strings/string_tokenizer.h"
     24 #include "base/strings/string_util.h"
     25 #include "base/strings/stringprintf.h"
     26 #include "base/strings/utf_string_conversions.h"
     27 #include "base/synchronization/cancellation_flag.h"
     28 #include "base/synchronization/waitable_event.h"
     29 #include "base/sys_info.h"
     30 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
     31 #include "base/threading/platform_thread.h"
     32 #include "base/threading/thread_id_name_manager.h"
     33 #include "base/time/time.h"
     34 
     35 #if defined(OS_WIN)
     36 #include "base/debug/trace_event_win.h"
     37 #endif
     38 
     39 class DeleteTraceLogForTesting {
     40  public:
     41   static void Delete() {
     42     Singleton<base::debug::TraceLog,
     43               LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
     44   }
     45 };
     46 
     47 // The thread buckets for the sampling profiler.
     48 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
     49 
     50 namespace base {
     51 namespace debug {
     52 
     53 namespace {
     54 
     55 // The overhead of TraceEvent above this threshold will be reported in the
     56 // trace.
     57 const int kOverheadReportThresholdInMicroseconds = 50;
     58 
     59 // Controls the number of trace events we will buffer in-memory
     60 // before throwing them away.
     61 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
     62 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
     63 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
     64 const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize;
     65 // Can store results for 30 seconds with 1 ms sampling interval.
     66 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
     67 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
     68 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
     69 
     70 const int kThreadFlushTimeoutMs = 3000;
     71 
     72 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
     73 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
     74 
     75 #define MAX_CATEGORY_GROUPS 100
     76 
     77 // Parallel arrays g_category_groups and g_category_group_enabled are separate
     78 // so that a pointer to a member of g_category_group_enabled can be easily
     79 // converted to an index into g_category_groups. This allows macros to deal
     80 // only with char enabled pointers from g_category_group_enabled, and we can
     81 // convert internally to determine the category name from the char enabled
     82 // pointer.
     83 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
     84   "tracing already shutdown",
     85   "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
     86   "__metadata",
     87   // For reporting trace_event overhead. For thread local event buffers only.
     88   "trace_event_overhead"};
     89 
     90 // The enabled flag is char instead of bool so that the API can be used from C.
     91 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
     92 const int g_category_already_shutdown = 0;
     93 const int g_category_categories_exhausted = 1;
     94 const int g_category_metadata = 2;
     95 const int g_category_trace_event_overhead = 3;
     96 const int g_num_builtin_categories = 4;
     97 int g_category_index = g_num_builtin_categories; // Skip default categories.
     98 
     99 // The name of the current thread. This is used to decide if the current
    100 // thread name has changed. We combine all the seen thread names into the
    101 // output name for the thread.
    102 LazyInstance<ThreadLocalPointer<const char> >::Leaky
    103     g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
    104 
    105 TimeTicks ThreadNow() {
    106   return TimeTicks::IsThreadNowSupported() ?
    107       TimeTicks::ThreadNow() : TimeTicks();
    108 }
    109 
    110 class TraceBufferRingBuffer : public TraceBuffer {
    111  public:
    112   TraceBufferRingBuffer(size_t max_chunks)
    113       : max_chunks_(max_chunks),
    114         recyclable_chunks_queue_(new size_t[queue_capacity()]),
    115         queue_head_(0),
    116         queue_tail_(max_chunks),
    117         current_iteration_index_(0),
    118         current_chunk_seq_(1) {
    119     chunks_.reserve(max_chunks);
    120     for (size_t i = 0; i < max_chunks; ++i)
    121       recyclable_chunks_queue_[i] = i;
    122   }
    123 
    124   virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
    125     // Because the number of threads is much less than the number of chunks,
    126     // the queue should never be empty.
    127     DCHECK(!QueueIsEmpty());
    128 
    129     *index = recyclable_chunks_queue_[queue_head_];
    130     queue_head_ = NextQueueIndex(queue_head_);
    131     current_iteration_index_ = queue_head_;
    132 
    133     if (*index >= chunks_.size())
    134       chunks_.resize(*index + 1);
    135 
    136     TraceBufferChunk* chunk = chunks_[*index];
    137     chunks_[*index] = NULL;  // Put NULL in the slot of a in-flight chunk.
    138     if (chunk)
    139       chunk->Reset(current_chunk_seq_++);
    140     else
    141       chunk = new TraceBufferChunk(current_chunk_seq_++);
    142 
    143     return scoped_ptr<TraceBufferChunk>(chunk);
    144   }
    145 
    146   virtual void ReturnChunk(size_t index,
    147                            scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
    148     // When this method is called, the queue should not be full because it
    149     // can contain all chunks including the one to be returned.
    150     DCHECK(!QueueIsFull());
    151     DCHECK(chunk);
    152     DCHECK_LT(index, chunks_.size());
    153     DCHECK(!chunks_[index]);
    154     chunks_[index] = chunk.release();
    155     recyclable_chunks_queue_[queue_tail_] = index;
    156     queue_tail_ = NextQueueIndex(queue_tail_);
    157   }
    158 
    159   virtual bool IsFull() const OVERRIDE {
    160     return false;
    161   }
    162 
    163   virtual size_t Size() const OVERRIDE {
    164     // This is approximate because not all of the chunks are full.
    165     return chunks_.size() * kTraceBufferChunkSize;
    166   }
    167 
    168   virtual size_t Capacity() const OVERRIDE {
    169     return max_chunks_ * kTraceBufferChunkSize;
    170   }
    171 
    172   virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
    173     if (handle.chunk_index >= chunks_.size())
    174       return NULL;
    175     TraceBufferChunk* chunk = chunks_[handle.chunk_index];
    176     if (!chunk || chunk->seq() != handle.chunk_seq)
    177       return NULL;
    178     return chunk->GetEventAt(handle.event_index);
    179   }
    180 
    181   virtual const TraceBufferChunk* NextChunk() OVERRIDE {
    182     if (chunks_.empty())
    183       return NULL;
    184 
    185     while (current_iteration_index_ != queue_tail_) {
    186       size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
    187       current_iteration_index_ = NextQueueIndex(current_iteration_index_);
    188       if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
    189         continue;
    190       DCHECK(chunks_[chunk_index]);
    191       return chunks_[chunk_index];
    192     }
    193     return NULL;
    194   }
    195 
    196   virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
    197     scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
    198     for (size_t queue_index = queue_head_; queue_index != queue_tail_;
    199         queue_index = NextQueueIndex(queue_index)) {
    200       size_t chunk_index = recyclable_chunks_queue_[queue_index];
    201       if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
    202         continue;
    203       TraceBufferChunk* chunk = chunks_[chunk_index];
    204       cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
    205     }
    206     return cloned_buffer.PassAs<TraceBuffer>();
    207   }
    208 
    209  private:
    210   class ClonedTraceBuffer : public TraceBuffer {
    211    public:
    212     ClonedTraceBuffer() : current_iteration_index_(0) {}
    213 
    214     // The only implemented method.
    215     virtual const TraceBufferChunk* NextChunk() OVERRIDE {
    216       return current_iteration_index_ < chunks_.size() ?
    217           chunks_[current_iteration_index_++] : NULL;
    218     }
    219 
    220     virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
    221       NOTIMPLEMENTED();
    222       return scoped_ptr<TraceBufferChunk>();
    223     }
    224     virtual void ReturnChunk(size_t index,
    225                              scoped_ptr<TraceBufferChunk>) OVERRIDE {
    226       NOTIMPLEMENTED();
    227     }
    228     virtual bool IsFull() const OVERRIDE { return false; }
    229     virtual size_t Size() const OVERRIDE { return 0; }
    230     virtual size_t Capacity() const OVERRIDE { return 0; }
    231     virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
    232       return NULL;
    233     }
    234     virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
    235       NOTIMPLEMENTED();
    236       return scoped_ptr<TraceBuffer>();
    237     }
    238 
    239     size_t current_iteration_index_;
    240     ScopedVector<TraceBufferChunk> chunks_;
    241   };
    242 
    243   bool QueueIsEmpty() const {
    244     return queue_head_ == queue_tail_;
    245   }
    246 
    247   size_t QueueSize() const {
    248     return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ :
    249         queue_tail_ + queue_capacity() - queue_head_;
    250   }
    251 
    252   bool QueueIsFull() const {
    253     return QueueSize() == queue_capacity() - 1;
    254   }
    255 
    256   size_t queue_capacity() const {
    257     // One extra space to help distinguish full state and empty state.
    258     return max_chunks_ + 1;
    259   }
    260 
    261   size_t NextQueueIndex(size_t index) const {
    262     index++;
    263     if (index >= queue_capacity())
    264       index = 0;
    265     return index;
    266   }
    267 
    268   size_t max_chunks_;
    269   ScopedVector<TraceBufferChunk> chunks_;
    270 
    271   scoped_ptr<size_t[]> recyclable_chunks_queue_;
    272   size_t queue_head_;
    273   size_t queue_tail_;
    274 
    275   size_t current_iteration_index_;
    276   uint32 current_chunk_seq_;
    277 
    278   DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
    279 };
    280 
    281 class TraceBufferVector : public TraceBuffer {
    282  public:
    283   TraceBufferVector()
    284       : in_flight_chunk_count_(0),
    285         current_iteration_index_(0) {
    286     chunks_.reserve(kTraceEventVectorBufferChunks);
    287   }
    288 
    289   virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
    290     // This function may be called when adding normal events or indirectly from
    291     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
    292     // have to add the metadata events and flush thread-local buffers even if
    293     // the buffer is full.
    294     *index = chunks_.size();
    295     chunks_.push_back(NULL);  // Put NULL in the slot of a in-flight chunk.
    296     ++in_flight_chunk_count_;
    297     // + 1 because zero chunk_seq is not allowed.
    298     return scoped_ptr<TraceBufferChunk>(
    299         new TraceBufferChunk(static_cast<uint32>(*index) + 1));
    300   }
    301 
    302   virtual void ReturnChunk(size_t index,
    303                            scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
    304     DCHECK_GT(in_flight_chunk_count_, 0u);
    305     DCHECK_LT(index, chunks_.size());
    306     DCHECK(!chunks_[index]);
    307     --in_flight_chunk_count_;
    308     chunks_[index] = chunk.release();
    309   }
    310 
    311   virtual bool IsFull() const OVERRIDE {
    312     return chunks_.size() >= kTraceEventVectorBufferChunks;
    313   }
    314 
    315   virtual size_t Size() const OVERRIDE {
    316     // This is approximate because not all of the chunks are full.
    317     return chunks_.size() * kTraceBufferChunkSize;
    318   }
    319 
    320   virtual size_t Capacity() const OVERRIDE {
    321     return kTraceEventVectorBufferChunks * kTraceBufferChunkSize;
    322   }
    323 
    324   virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
    325     if (handle.chunk_index >= chunks_.size())
    326       return NULL;
    327     TraceBufferChunk* chunk = chunks_[handle.chunk_index];
    328     if (!chunk || chunk->seq() != handle.chunk_seq)
    329       return NULL;
    330     return chunk->GetEventAt(handle.event_index);
    331   }
    332 
    333   virtual const TraceBufferChunk* NextChunk() OVERRIDE {
    334     while (current_iteration_index_ < chunks_.size()) {
    335       // Skip in-flight chunks.
    336       const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
    337       if (chunk)
    338         return chunk;
    339     }
    340     return NULL;
    341   }
    342 
    343   virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
    344     NOTIMPLEMENTED();
    345     return scoped_ptr<TraceBuffer>();
    346   }
    347 
    348  private:
    349   size_t in_flight_chunk_count_;
    350   size_t current_iteration_index_;
    351   ScopedVector<TraceBufferChunk> chunks_;
    352 
    353   DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
    354 };
    355 
    356 template <typename T>
    357 void InitializeMetadataEvent(TraceEvent* trace_event,
    358                              int thread_id,
    359                              const char* metadata_name, const char* arg_name,
    360                              const T& value) {
    361   if (!trace_event)
    362     return;
    363 
    364   int num_args = 1;
    365   unsigned char arg_type;
    366   unsigned long long arg_value;
    367   ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
    368   trace_event->Initialize(thread_id,
    369                           TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA,
    370                           &g_category_group_enabled[g_category_metadata],
    371                           metadata_name, ::trace_event_internal::kNoEventId,
    372                           num_args, &arg_name, &arg_type, &arg_value, NULL,
    373                           TRACE_EVENT_FLAG_NONE);
    374 }
    375 
    376 class AutoThreadLocalBoolean {
    377  public:
    378   explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
    379       : thread_local_boolean_(thread_local_boolean) {
    380     DCHECK(!thread_local_boolean_->Get());
    381     thread_local_boolean_->Set(true);
    382   }
    383   ~AutoThreadLocalBoolean() {
    384     thread_local_boolean_->Set(false);
    385   }
    386 
    387  private:
    388   ThreadLocalBoolean* thread_local_boolean_;
    389   DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
    390 };
    391 
    392 }  // namespace
    393 
    394 void TraceBufferChunk::Reset(uint32 new_seq) {
    395   for (size_t i = 0; i < next_free_; ++i)
    396     chunk_[i].Reset();
    397   next_free_ = 0;
    398   seq_ = new_seq;
    399 }
    400 
    401 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
    402   DCHECK(!IsFull());
    403   *event_index = next_free_++;
    404   return &chunk_[*event_index];
    405 }
    406 
    407 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
    408   scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
    409   cloned_chunk->next_free_ = next_free_;
    410   for (size_t i = 0; i < next_free_; ++i)
    411     cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
    412   return cloned_chunk.Pass();
    413 }
    414 
    415 // A helper class that allows the lock to be acquired in the middle of the scope
    416 // and unlocks at the end of scope if locked.
    417 class TraceLog::OptionalAutoLock {
    418  public:
    419   explicit OptionalAutoLock(Lock& lock)
    420       : lock_(lock),
    421         locked_(false) {
    422   }
    423 
    424   ~OptionalAutoLock() {
    425     if (locked_)
    426       lock_.Release();
    427   }
    428 
    429   void EnsureAcquired() {
    430     if (!locked_) {
    431       lock_.Acquire();
    432       locked_ = true;
    433     }
    434   }
    435 
    436  private:
    437   Lock& lock_;
    438   bool locked_;
    439   DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
    440 };
    441 
    442 // Use this function instead of TraceEventHandle constructor to keep the
    443 // overhead of ScopedTracer (trace_event.h) constructor minimum.
    444 void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index,
    445                 TraceEventHandle* handle) {
    446   DCHECK(chunk_seq);
    447   DCHECK(chunk_index < (1u << 16));
    448   DCHECK(event_index < (1u << 16));
    449   handle->chunk_seq = chunk_seq;
    450   handle->chunk_index = static_cast<uint16>(chunk_index);
    451   handle->event_index = static_cast<uint16>(event_index);
    452 }
    453 
    454 ////////////////////////////////////////////////////////////////////////////////
    455 //
    456 // TraceEvent
    457 //
    458 ////////////////////////////////////////////////////////////////////////////////
    459 
    460 namespace {
    461 
    462 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
    463 
    464 // Copies |*member| into |*buffer|, sets |*member| to point to this new
    465 // location, and then advances |*buffer| by the amount written.
    466 void CopyTraceEventParameter(char** buffer,
    467                              const char** member,
    468                              const char* end) {
    469   if (*member) {
    470     size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
    471     DCHECK_LE(static_cast<int>(written), end - *buffer);
    472     *member = *buffer;
    473     *buffer += written;
    474   }
    475 }
    476 
    477 }  // namespace
    478 
    479 TraceEvent::TraceEvent()
    480     : duration_(TimeDelta::FromInternalValue(-1)),
    481       id_(0u),
    482       category_group_enabled_(NULL),
    483       name_(NULL),
    484       thread_id_(0),
    485       phase_(TRACE_EVENT_PHASE_BEGIN),
    486       flags_(0) {
    487   for (int i = 0; i < kTraceMaxNumArgs; ++i)
    488     arg_names_[i] = NULL;
    489   memset(arg_values_, 0, sizeof(arg_values_));
    490 }
    491 
    492 TraceEvent::~TraceEvent() {
    493 }
    494 
    495 void TraceEvent::CopyFrom(const TraceEvent& other) {
    496   timestamp_ = other.timestamp_;
    497   thread_timestamp_ = other.thread_timestamp_;
    498   duration_ = other.duration_;
    499   id_ = other.id_;
    500   category_group_enabled_ = other.category_group_enabled_;
    501   name_ = other.name_;
    502   thread_id_ = other.thread_id_;
    503   phase_ = other.phase_;
    504   flags_ = other.flags_;
    505   parameter_copy_storage_ = other.parameter_copy_storage_;
    506 
    507   for (int i = 0; i < kTraceMaxNumArgs; ++i) {
    508     arg_names_[i] = other.arg_names_[i];
    509     arg_types_[i] = other.arg_types_[i];
    510     arg_values_[i] = other.arg_values_[i];
    511     convertable_values_[i] = other.convertable_values_[i];
    512   }
    513 }
    514 
    515 void TraceEvent::Initialize(
    516     int thread_id,
    517     TimeTicks timestamp,
    518     TimeTicks thread_timestamp,
    519     char phase,
    520     const unsigned char* category_group_enabled,
    521     const char* name,
    522     unsigned long long id,
    523     int num_args,
    524     const char** arg_names,
    525     const unsigned char* arg_types,
    526     const unsigned long long* arg_values,
    527     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
    528     unsigned char flags) {
    529   timestamp_ = timestamp;
    530   thread_timestamp_ = thread_timestamp;
    531   duration_ = TimeDelta::FromInternalValue(-1);
    532   id_ = id;
    533   category_group_enabled_ = category_group_enabled;
    534   name_ = name;
    535   thread_id_ = thread_id;
    536   phase_ = phase;
    537   flags_ = flags;
    538 
    539   // Clamp num_args since it may have been set by a third_party library.
    540   num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
    541   int i = 0;
    542   for (; i < num_args; ++i) {
    543     arg_names_[i] = arg_names[i];
    544     arg_types_[i] = arg_types[i];
    545 
    546     if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    547       convertable_values_[i] = convertable_values[i];
    548     else
    549       arg_values_[i].as_uint = arg_values[i];
    550   }
    551   for (; i < kTraceMaxNumArgs; ++i) {
    552     arg_names_[i] = NULL;
    553     arg_values_[i].as_uint = 0u;
    554     convertable_values_[i] = NULL;
    555     arg_types_[i] = TRACE_VALUE_TYPE_UINT;
    556   }
    557 
    558   bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
    559   size_t alloc_size = 0;
    560   if (copy) {
    561     alloc_size += GetAllocLength(name);
    562     for (i = 0; i < num_args; ++i) {
    563       alloc_size += GetAllocLength(arg_names_[i]);
    564       if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
    565         arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
    566     }
    567   }
    568 
    569   bool arg_is_copy[kTraceMaxNumArgs];
    570   for (i = 0; i < num_args; ++i) {
    571     // No copying of convertable types, we retain ownership.
    572     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    573       continue;
    574 
    575     // We only take a copy of arg_vals if they are of type COPY_STRING.
    576     arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
    577     if (arg_is_copy[i])
    578       alloc_size += GetAllocLength(arg_values_[i].as_string);
    579   }
    580 
    581   if (alloc_size) {
    582     parameter_copy_storage_ = new RefCountedString;
    583     parameter_copy_storage_->data().resize(alloc_size);
    584     char* ptr = string_as_array(&parameter_copy_storage_->data());
    585     const char* end = ptr + alloc_size;
    586     if (copy) {
    587       CopyTraceEventParameter(&ptr, &name_, end);
    588       for (i = 0; i < num_args; ++i) {
    589         CopyTraceEventParameter(&ptr, &arg_names_[i], end);
    590       }
    591     }
    592     for (i = 0; i < num_args; ++i) {
    593       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    594         continue;
    595       if (arg_is_copy[i])
    596         CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
    597     }
    598     DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
    599   }
    600 }
    601 
    602 void TraceEvent::Reset() {
    603   // Only reset fields that won't be initialized in Initialize(), or that may
    604   // hold references to other objects.
    605   duration_ = TimeDelta::FromInternalValue(-1);
    606   parameter_copy_storage_ = NULL;
    607   for (int i = 0; i < kTraceMaxNumArgs; ++i)
    608     convertable_values_[i] = NULL;
    609 }
    610 
    611 void TraceEvent::UpdateDuration(const TimeTicks& now,
    612                                 const TimeTicks& thread_now) {
    613   DCHECK(duration_.ToInternalValue() == -1);
    614   duration_ = now - timestamp_;
    615   thread_duration_ = thread_now - thread_timestamp_;
    616 }
    617 
    618 // static
    619 void TraceEvent::AppendValueAsJSON(unsigned char type,
    620                                    TraceEvent::TraceValue value,
    621                                    std::string* out) {
    622   switch (type) {
    623     case TRACE_VALUE_TYPE_BOOL:
    624       *out += value.as_bool ? "true" : "false";
    625       break;
    626     case TRACE_VALUE_TYPE_UINT:
    627       StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint));
    628       break;
    629     case TRACE_VALUE_TYPE_INT:
    630       StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int));
    631       break;
    632     case TRACE_VALUE_TYPE_DOUBLE: {
    633       // FIXME: base/json/json_writer.cc is using the same code,
    634       //        should be made into a common method.
    635       std::string real = DoubleToString(value.as_double);
    636       // Ensure that the number has a .0 if there's no decimal or 'e'.  This
    637       // makes sure that when we read the JSON back, it's interpreted as a
    638       // real rather than an int.
    639       if (real.find('.') == std::string::npos &&
    640           real.find('e') == std::string::npos &&
    641           real.find('E') == std::string::npos) {
    642         real.append(".0");
    643       }
    644       // The JSON spec requires that non-integer values in the range (-1,1)
    645       // have a zero before the decimal point - ".52" is not valid, "0.52" is.
    646       if (real[0] == '.') {
    647         real.insert(0, "0");
    648       } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
    649         // "-.1" bad "-0.1" good
    650         real.insert(1, "0");
    651       }
    652 
    653       StringAppendF(out, "%s", real.c_str());
    654       break;
    655     }
    656     case TRACE_VALUE_TYPE_POINTER:
    657       // JSON only supports double and int numbers.
    658       // So as not to lose bits from a 64-bit pointer, output as a hex string.
    659       StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>(
    660                                      reinterpret_cast<intptr_t>(
    661                                      value.as_pointer)));
    662       break;
    663     case TRACE_VALUE_TYPE_STRING:
    664     case TRACE_VALUE_TYPE_COPY_STRING:
    665       EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out);
    666       break;
    667     default:
    668       NOTREACHED() << "Don't know how to print this value";
    669       break;
    670   }
    671 }
    672 
    673 void TraceEvent::AppendAsJSON(std::string* out) const {
    674   int64 time_int64 = timestamp_.ToInternalValue();
    675   int process_id = TraceLog::GetInstance()->process_id();
    676   // Category group checked at category creation time.
    677   DCHECK(!strchr(name_, '"'));
    678   StringAppendF(out,
    679       "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
    680       "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
    681       TraceLog::GetCategoryGroupName(category_group_enabled_),
    682       process_id,
    683       thread_id_,
    684       time_int64,
    685       phase_,
    686       name_);
    687 
    688   // Output argument names and values, stop at first NULL argument name.
    689   for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
    690     if (i > 0)
    691       *out += ",";
    692     *out += "\"";
    693     *out += arg_names_[i];
    694     *out += "\":";
    695 
    696     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    697       convertable_values_[i]->AppendAsTraceFormat(out);
    698     else
    699       AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
    700   }
    701   *out += "}";
    702 
    703   if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
    704     int64 duration = duration_.ToInternalValue();
    705     if (duration != -1)
    706       StringAppendF(out, ",\"dur\":%" PRId64, duration);
    707     if (!thread_timestamp_.is_null()) {
    708       int64 thread_duration = thread_duration_.ToInternalValue();
    709       if (thread_duration != -1)
    710         StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
    711     }
    712   }
    713 
    714   // Output tts if thread_timestamp is valid.
    715   if (!thread_timestamp_.is_null()) {
    716     int64 thread_time_int64 = thread_timestamp_.ToInternalValue();
    717     StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
    718   }
    719 
    720   // If id_ is set, print it out as a hex string so we don't loose any
    721   // bits (it might be a 64-bit pointer).
    722   if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
    723     StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
    724 
    725   // Instant events also output their scope.
    726   if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
    727     char scope = '?';
    728     switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
    729       case TRACE_EVENT_SCOPE_GLOBAL:
    730         scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
    731         break;
    732 
    733       case TRACE_EVENT_SCOPE_PROCESS:
    734         scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
    735         break;
    736 
    737       case TRACE_EVENT_SCOPE_THREAD:
    738         scope = TRACE_EVENT_SCOPE_NAME_THREAD;
    739         break;
    740     }
    741     StringAppendF(out, ",\"s\":\"%c\"", scope);
    742   }
    743 
    744   *out += "}";
    745 }
    746 
    747 void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
    748   *out << name_ << "[";
    749   *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
    750   *out << "]";
    751   if (arg_names_[0]) {
    752     *out << ", {";
    753     for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
    754       if (i > 0)
    755         *out << ", ";
    756       *out << arg_names_[i] << ":";
    757       std::string value_as_text;
    758 
    759       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
    760         convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
    761       else
    762         AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
    763 
    764       *out << value_as_text;
    765     }
    766     *out << "}";
    767   }
    768 }
    769 
    770 ////////////////////////////////////////////////////////////////////////////////
    771 //
    772 // TraceResultBuffer
    773 //
    774 ////////////////////////////////////////////////////////////////////////////////
    775 
    776 TraceResultBuffer::OutputCallback
    777     TraceResultBuffer::SimpleOutput::GetCallback() {
    778   return Bind(&SimpleOutput::Append, Unretained(this));
    779 }
    780 
    781 void TraceResultBuffer::SimpleOutput::Append(
    782     const std::string& json_trace_output) {
    783   json_output += json_trace_output;
    784 }
    785 
    786 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
    787 }
    788 
    789 TraceResultBuffer::~TraceResultBuffer() {
    790 }
    791 
    792 void TraceResultBuffer::SetOutputCallback(
    793     const OutputCallback& json_chunk_callback) {
    794   output_callback_ = json_chunk_callback;
    795 }
    796 
    797 void TraceResultBuffer::Start() {
    798   append_comma_ = false;
    799   output_callback_.Run("[");
    800 }
    801 
    802 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
    803   if (append_comma_)
    804     output_callback_.Run(",");
    805   append_comma_ = true;
    806   output_callback_.Run(trace_fragment);
    807 }
    808 
    809 void TraceResultBuffer::Finish() {
    810   output_callback_.Run("]");
    811 }
    812 
    813 ////////////////////////////////////////////////////////////////////////////////
    814 //
    815 // TraceSamplingThread
    816 //
    817 ////////////////////////////////////////////////////////////////////////////////
    818 class TraceBucketData;
    819 typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
    820 
    821 class TraceBucketData {
    822  public:
    823   TraceBucketData(base::subtle::AtomicWord* bucket,
    824                   const char* name,
    825                   TraceSampleCallback callback);
    826   ~TraceBucketData();
    827 
    828   TRACE_EVENT_API_ATOMIC_WORD* bucket;
    829   const char* bucket_name;
    830   TraceSampleCallback callback;
    831 };
    832 
    833 // This object must be created on the IO thread.
    834 class TraceSamplingThread : public PlatformThread::Delegate {
    835  public:
    836   TraceSamplingThread();
    837   virtual ~TraceSamplingThread();
    838 
    839   // Implementation of PlatformThread::Delegate:
    840   virtual void ThreadMain() OVERRIDE;
    841 
    842   static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
    843 
    844   void Stop();
    845   void WaitSamplingEventForTesting();
    846 
    847  private:
    848   friend class TraceLog;
    849 
    850   void GetSamples();
    851   // Not thread-safe. Once the ThreadMain has been called, this can no longer
    852   // be called.
    853   void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
    854                             const char* const name,
    855                             TraceSampleCallback callback);
    856   // Splits a combined "category\0name" into the two component parts.
    857   static void ExtractCategoryAndName(const char* combined,
    858                                      const char** category,
    859                                      const char** name);
    860   std::vector<TraceBucketData> sample_buckets_;
    861   bool thread_running_;
    862   CancellationFlag cancellation_flag_;
    863   WaitableEvent waitable_event_for_testing_;
    864 };
    865 
    866 
    867 TraceSamplingThread::TraceSamplingThread()
    868     : thread_running_(false),
    869       waitable_event_for_testing_(false, false) {
    870 }
    871 
    872 TraceSamplingThread::~TraceSamplingThread() {
    873 }
    874 
    875 void TraceSamplingThread::ThreadMain() {
    876   PlatformThread::SetName("Sampling Thread");
    877   thread_running_ = true;
    878   const int kSamplingFrequencyMicroseconds = 1000;
    879   while (!cancellation_flag_.IsSet()) {
    880     PlatformThread::Sleep(
    881         TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
    882     GetSamples();
    883     waitable_event_for_testing_.Signal();
    884   }
    885 }
    886 
    887 // static
    888 void TraceSamplingThread::DefaultSamplingCallback(
    889     TraceBucketData* bucket_data) {
    890   TRACE_EVENT_API_ATOMIC_WORD category_and_name =
    891       TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
    892   if (!category_and_name)
    893     return;
    894   const char* const combined =
    895       reinterpret_cast<const char* const>(category_and_name);
    896   const char* category_group;
    897   const char* name;
    898   ExtractCategoryAndName(combined, &category_group, &name);
    899   TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
    900       TraceLog::GetCategoryGroupEnabled(category_group),
    901       name, 0, 0, NULL, NULL, NULL, NULL, 0);
    902 }
    903 
    904 void TraceSamplingThread::GetSamples() {
    905   for (size_t i = 0; i < sample_buckets_.size(); ++i) {
    906     TraceBucketData* bucket_data = &sample_buckets_[i];
    907     bucket_data->callback.Run(bucket_data);
    908   }
    909 }
    910 
    911 void TraceSamplingThread::RegisterSampleBucket(
    912     TRACE_EVENT_API_ATOMIC_WORD* bucket,
    913     const char* const name,
    914     TraceSampleCallback callback) {
    915   // Access to sample_buckets_ doesn't cause races with the sampling thread
    916   // that uses the sample_buckets_, because it is guaranteed that
    917   // RegisterSampleBucket is called before the sampling thread is created.
    918   DCHECK(!thread_running_);
    919   sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
    920 }
    921 
    922 // static
    923 void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
    924                                                  const char** category,
    925                                                  const char** name) {
    926   *category = combined;
    927   *name = &combined[strlen(combined) + 1];
    928 }
    929 
    930 void TraceSamplingThread::Stop() {
    931   cancellation_flag_.Set();
    932 }
    933 
    934 void TraceSamplingThread::WaitSamplingEventForTesting() {
    935   waitable_event_for_testing_.Wait();
    936 }
    937 
    938 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
    939                                  const char* name,
    940                                  TraceSampleCallback callback)
    941     : bucket(bucket),
    942       bucket_name(name),
    943       callback(callback) {
    944 }
    945 
    946 TraceBucketData::~TraceBucketData() {
    947 }
    948 
    949 ////////////////////////////////////////////////////////////////////////////////
    950 //
    951 // TraceLog
    952 //
    953 ////////////////////////////////////////////////////////////////////////////////
    954 
    955 class TraceLog::ThreadLocalEventBuffer
    956     : public MessageLoop::DestructionObserver {
    957  public:
    958   ThreadLocalEventBuffer(TraceLog* trace_log);
    959   virtual ~ThreadLocalEventBuffer();
    960 
    961   TraceEvent* AddTraceEvent(TraceEventHandle* handle);
    962 
    963   void ReportOverhead(const TimeTicks& event_timestamp,
    964                       const TimeTicks& event_thread_timestamp);
    965 
    966   TraceEvent* GetEventByHandle(TraceEventHandle handle) {
    967     if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
    968         handle.chunk_index != chunk_index_)
    969       return NULL;
    970 
    971     return chunk_->GetEventAt(handle.event_index);
    972   }
    973 
    974   int generation() const { return generation_; }
    975 
    976  private:
    977   // MessageLoop::DestructionObserver
    978   virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
    979 
    980   void FlushWhileLocked();
    981 
    982   void CheckThisIsCurrentBuffer() const {
    983     DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
    984   }
    985 
    986   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
    987   // as long as the thread exists.
    988   TraceLog* trace_log_;
    989   scoped_ptr<TraceBufferChunk> chunk_;
    990   size_t chunk_index_;
    991   int event_count_;
    992   TimeDelta overhead_;
    993   int generation_;
    994 
    995   DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
    996 };
    997 
    998 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
    999     : trace_log_(trace_log),
   1000       chunk_index_(0),
   1001       event_count_(0),
   1002       generation_(trace_log->generation()) {
   1003   // ThreadLocalEventBuffer is created only if the thread has a message loop, so
   1004   // the following message_loop won't be NULL.
   1005   MessageLoop* message_loop = MessageLoop::current();
   1006   message_loop->AddDestructionObserver(this);
   1007 
   1008   AutoLock lock(trace_log->lock_);
   1009   trace_log->thread_message_loops_.insert(message_loop);
   1010 }
   1011 
   1012 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
   1013   CheckThisIsCurrentBuffer();
   1014   MessageLoop::current()->RemoveDestructionObserver(this);
   1015 
   1016   // Zero event_count_ happens in either of the following cases:
   1017   // - no event generated for the thread;
   1018   // - the thread has no message loop;
   1019   // - trace_event_overhead is disabled.
   1020   if (event_count_) {
   1021     InitializeMetadataEvent(AddTraceEvent(NULL),
   1022                             static_cast<int>(base::PlatformThread::CurrentId()),
   1023                             "overhead", "average_overhead",
   1024                             overhead_.InMillisecondsF() / event_count_);
   1025   }
   1026 
   1027   {
   1028     AutoLock lock(trace_log_->lock_);
   1029     FlushWhileLocked();
   1030     trace_log_->thread_message_loops_.erase(MessageLoop::current());
   1031   }
   1032   trace_log_->thread_local_event_buffer_.Set(NULL);
   1033 }
   1034 
   1035 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
   1036     TraceEventHandle* handle) {
   1037   CheckThisIsCurrentBuffer();
   1038 
   1039   if (chunk_ && chunk_->IsFull()) {
   1040     AutoLock lock(trace_log_->lock_);
   1041     FlushWhileLocked();
   1042     chunk_.reset();
   1043   }
   1044   if (!chunk_) {
   1045     AutoLock lock(trace_log_->lock_);
   1046     chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
   1047     trace_log_->CheckIfBufferIsFullWhileLocked();
   1048   }
   1049   if (!chunk_)
   1050     return NULL;
   1051 
   1052   size_t event_index;
   1053   TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
   1054   if (trace_event && handle)
   1055     MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
   1056 
   1057   return trace_event;
   1058 }
   1059 
   1060 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
   1061     const TimeTicks& event_timestamp,
   1062     const TimeTicks& event_thread_timestamp) {
   1063   if (!g_category_group_enabled[g_category_trace_event_overhead])
   1064     return;
   1065 
   1066   CheckThisIsCurrentBuffer();
   1067 
   1068   event_count_++;
   1069   TimeTicks thread_now = ThreadNow();
   1070   TimeTicks now = trace_log_->OffsetNow();
   1071   TimeDelta overhead = now - event_timestamp;
   1072   if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
   1073     TraceEvent* trace_event = AddTraceEvent(NULL);
   1074     if (trace_event) {
   1075       trace_event->Initialize(
   1076           static_cast<int>(PlatformThread::CurrentId()),
   1077           event_timestamp, event_thread_timestamp,
   1078           TRACE_EVENT_PHASE_COMPLETE,
   1079           &g_category_group_enabled[g_category_trace_event_overhead],
   1080           "overhead", 0, 0, NULL, NULL, NULL, NULL, 0);
   1081       trace_event->UpdateDuration(now, thread_now);
   1082     }
   1083   }
   1084   overhead_ += overhead;
   1085 }
   1086 
   1087 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
   1088   delete this;
   1089 }
   1090 
   1091 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
   1092   if (!chunk_)
   1093     return;
   1094 
   1095   trace_log_->lock_.AssertAcquired();
   1096   if (trace_log_->CheckGeneration(generation_)) {
   1097     // Return the chunk to the buffer only if the generation matches,
   1098     trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
   1099   }
   1100   // Otherwise this method may be called from the destructor, or TraceLog will
   1101   // find the generation mismatch and delete this buffer soon.
   1102 }
   1103 
   1104 // static
   1105 TraceLog* TraceLog::GetInstance() {
   1106   return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
   1107 }
   1108 
   1109 TraceLog::TraceLog()
   1110     : enabled_(false),
   1111       num_traces_recorded_(0),
   1112       event_callback_(0),
   1113       dispatching_to_observer_list_(false),
   1114       process_sort_index_(0),
   1115       process_id_hash_(0),
   1116       process_id_(0),
   1117       watch_category_(0),
   1118       trace_options_(RECORD_UNTIL_FULL),
   1119       sampling_thread_handle_(0),
   1120       category_filter_(CategoryFilter::kDefaultCategoryFilterString),
   1121       event_callback_category_filter_(
   1122           CategoryFilter::kDefaultCategoryFilterString),
   1123       thread_shared_chunk_index_(0),
   1124       generation_(0) {
   1125   // Trace is enabled or disabled on one thread while other threads are
   1126   // accessing the enabled flag. We don't care whether edge-case events are
   1127   // traced or not, so we allow races on the enabled flag to keep the trace
   1128   // macros fast.
   1129   // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
   1130   // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
   1131   //                            sizeof(g_category_group_enabled),
   1132   //                           "trace_event category enabled");
   1133   for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
   1134     ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
   1135                          "trace_event category enabled");
   1136   }
   1137 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
   1138   SetProcessID(0);
   1139 #else
   1140   SetProcessID(static_cast<int>(GetCurrentProcId()));
   1141 
   1142   // NaCl also shouldn't access the command line.
   1143   if (CommandLine::InitializedForCurrentProcess() &&
   1144       CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
   1145     std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
   1146         switches::kTraceToConsole);
   1147     if (filter.empty()) {
   1148       filter = kEchoToConsoleCategoryFilter;
   1149     } else {
   1150       filter.append(",");
   1151       filter.append(kEchoToConsoleCategoryFilter);
   1152     }
   1153 
   1154     LOG(ERROR) << "Start " << switches::kTraceToConsole
   1155                << " with CategoryFilter '" << filter << "'.";
   1156     SetEnabled(CategoryFilter(filter), ECHO_TO_CONSOLE);
   1157   }
   1158 #endif
   1159 
   1160   logged_events_.reset(CreateTraceBuffer());
   1161 }
   1162 
   1163 TraceLog::~TraceLog() {
   1164 }
   1165 
   1166 const unsigned char* TraceLog::GetCategoryGroupEnabled(
   1167     const char* category_group) {
   1168   TraceLog* tracelog = GetInstance();
   1169   if (!tracelog) {
   1170     DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
   1171     return &g_category_group_enabled[g_category_already_shutdown];
   1172   }
   1173   return tracelog->GetCategoryGroupEnabledInternal(category_group);
   1174 }
   1175 
   1176 const char* TraceLog::GetCategoryGroupName(
   1177     const unsigned char* category_group_enabled) {
   1178   // Calculate the index of the category group by finding
   1179   // category_group_enabled in g_category_group_enabled array.
   1180   uintptr_t category_begin =
   1181       reinterpret_cast<uintptr_t>(g_category_group_enabled);
   1182   uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
   1183   DCHECK(category_ptr >= category_begin &&
   1184          category_ptr < reinterpret_cast<uintptr_t>(
   1185              g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
   1186       "out of bounds category pointer";
   1187   uintptr_t category_index =
   1188       (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
   1189   return g_category_groups[category_index];
   1190 }
   1191 
   1192 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index) {
   1193   unsigned char enabled_flag = 0;
   1194   const char* category_group = g_category_groups[category_index];
   1195   if (enabled_ && category_filter_.IsCategoryGroupEnabled(category_group))
   1196     enabled_flag |= ENABLED_FOR_RECORDING;
   1197   if (event_callback_ &&
   1198       event_callback_category_filter_.IsCategoryGroupEnabled(category_group))
   1199     enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
   1200   g_category_group_enabled[category_index] = enabled_flag;
   1201 }
   1202 
   1203 void TraceLog::UpdateCategoryGroupEnabledFlags() {
   1204   for (int i = 0; i < g_category_index; i++)
   1205     UpdateCategoryGroupEnabledFlag(i);
   1206 }
   1207 
   1208 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
   1209     const char* category_group) {
   1210   DCHECK(!strchr(category_group, '"')) <<
   1211       "Category groups may not contain double quote";
   1212   AutoLock lock(lock_);
   1213 
   1214   unsigned char* category_group_enabled = NULL;
   1215   // Search for pre-existing category group.
   1216   for (int i = 0; i < g_category_index; i++) {
   1217     if (strcmp(g_category_groups[i], category_group) == 0) {
   1218       category_group_enabled = &g_category_group_enabled[i];
   1219       break;
   1220     }
   1221   }
   1222 
   1223   if (!category_group_enabled) {
   1224     // Create a new category group
   1225     DCHECK(g_category_index < MAX_CATEGORY_GROUPS) <<
   1226         "must increase MAX_CATEGORY_GROUPS";
   1227     if (g_category_index < MAX_CATEGORY_GROUPS) {
   1228       int new_index = g_category_index++;
   1229       // Don't hold on to the category_group pointer, so that we can create
   1230       // category groups with strings not known at compile time (this is
   1231       // required by SetWatchEvent).
   1232       const char* new_group = strdup(category_group);
   1233       ANNOTATE_LEAKING_OBJECT_PTR(new_group);
   1234       g_category_groups[new_index] = new_group;
   1235       DCHECK(!g_category_group_enabled[new_index]);
   1236       // Note that if both included and excluded patterns in the
   1237       // CategoryFilter are empty, we exclude nothing,
   1238       // thereby enabling this category group.
   1239       UpdateCategoryGroupEnabledFlag(new_index);
   1240       category_group_enabled = &g_category_group_enabled[new_index];
   1241     } else {
   1242       category_group_enabled =
   1243           &g_category_group_enabled[g_category_categories_exhausted];
   1244     }
   1245   }
   1246   return category_group_enabled;
   1247 }
   1248 
   1249 void TraceLog::GetKnownCategoryGroups(
   1250     std::vector<std::string>* category_groups) {
   1251   AutoLock lock(lock_);
   1252   category_groups->push_back(
   1253       g_category_groups[g_category_trace_event_overhead]);
   1254   for (int i = g_num_builtin_categories; i < g_category_index; i++)
   1255     category_groups->push_back(g_category_groups[i]);
   1256 }
   1257 
   1258 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
   1259                           Options options) {
   1260   std::vector<EnabledStateObserver*> observer_list;
   1261   {
   1262     AutoLock lock(lock_);
   1263 
   1264     // Can't enable tracing when Flush() is in progress.
   1265     DCHECK(!flush_message_loop_proxy_.get());
   1266 
   1267     Options old_options = trace_options();
   1268 
   1269     if (enabled_) {
   1270       if (options != old_options) {
   1271         DLOG(ERROR) << "Attemting to re-enable tracing with a different "
   1272                     << "set of options.";
   1273       }
   1274 
   1275       category_filter_.Merge(category_filter);
   1276       UpdateCategoryGroupEnabledFlags();
   1277       return;
   1278     }
   1279 
   1280     if (dispatching_to_observer_list_) {
   1281       DLOG(ERROR) <<
   1282           "Cannot manipulate TraceLog::Enabled state from an observer.";
   1283       return;
   1284     }
   1285 
   1286     enabled_ = true;
   1287 
   1288     if (options != old_options) {
   1289       subtle::NoBarrier_Store(&trace_options_, options);
   1290       UseNextTraceBuffer();
   1291     }
   1292 
   1293     num_traces_recorded_++;
   1294 
   1295     category_filter_ = CategoryFilter(category_filter);
   1296     UpdateCategoryGroupEnabledFlags();
   1297 
   1298     if ((options & ENABLE_SAMPLING) || (options & MONITOR_SAMPLING)) {
   1299       sampling_thread_.reset(new TraceSamplingThread);
   1300       sampling_thread_->RegisterSampleBucket(
   1301           &g_trace_state[0],
   1302           "bucket0",
   1303           Bind(&TraceSamplingThread::DefaultSamplingCallback));
   1304       sampling_thread_->RegisterSampleBucket(
   1305           &g_trace_state[1],
   1306           "bucket1",
   1307           Bind(&TraceSamplingThread::DefaultSamplingCallback));
   1308       sampling_thread_->RegisterSampleBucket(
   1309           &g_trace_state[2],
   1310           "bucket2",
   1311           Bind(&TraceSamplingThread::DefaultSamplingCallback));
   1312       if (!PlatformThread::Create(
   1313             0, sampling_thread_.get(), &sampling_thread_handle_)) {
   1314         DCHECK(false) << "failed to create thread";
   1315       }
   1316     }
   1317 
   1318     dispatching_to_observer_list_ = true;
   1319     observer_list = enabled_state_observer_list_;
   1320   }
   1321   // Notify observers outside the lock in case they trigger trace events.
   1322   for (size_t i = 0; i < observer_list.size(); ++i)
   1323     observer_list[i]->OnTraceLogEnabled();
   1324 
   1325   {
   1326     AutoLock lock(lock_);
   1327     dispatching_to_observer_list_ = false;
   1328   }
   1329 }
   1330 
   1331 CategoryFilter TraceLog::GetCurrentCategoryFilter() {
   1332   AutoLock lock(lock_);
   1333   return category_filter_;
   1334 }
   1335 
   1336 void TraceLog::SetDisabled() {
   1337   AutoLock lock(lock_);
   1338   SetDisabledWhileLocked();
   1339 }
   1340 
   1341 void TraceLog::SetDisabledWhileLocked() {
   1342   lock_.AssertAcquired();
   1343 
   1344   if (!enabled_)
   1345     return;
   1346 
   1347   if (dispatching_to_observer_list_) {
   1348     DLOG(ERROR)
   1349         << "Cannot manipulate TraceLog::Enabled state from an observer.";
   1350     return;
   1351   }
   1352 
   1353   enabled_ = false;
   1354 
   1355   if (sampling_thread_.get()) {
   1356     // Stop the sampling thread.
   1357     sampling_thread_->Stop();
   1358     lock_.Release();
   1359     PlatformThread::Join(sampling_thread_handle_);
   1360     lock_.Acquire();
   1361     sampling_thread_handle_ = PlatformThreadHandle();
   1362     sampling_thread_.reset();
   1363   }
   1364 
   1365   category_filter_.Clear();
   1366   subtle::NoBarrier_Store(&watch_category_, 0);
   1367   watch_event_name_ = "";
   1368   UpdateCategoryGroupEnabledFlags();
   1369   AddMetadataEventsWhileLocked();
   1370 
   1371   dispatching_to_observer_list_ = true;
   1372   std::vector<EnabledStateObserver*> observer_list =
   1373       enabled_state_observer_list_;
   1374 
   1375   {
   1376     // Dispatch to observers outside the lock in case the observer triggers a
   1377     // trace event.
   1378     AutoUnlock unlock(lock_);
   1379     for (size_t i = 0; i < observer_list.size(); ++i)
   1380       observer_list[i]->OnTraceLogDisabled();
   1381   }
   1382   dispatching_to_observer_list_ = false;
   1383 }
   1384 
   1385 int TraceLog::GetNumTracesRecorded() {
   1386   AutoLock lock(lock_);
   1387   if (!enabled_)
   1388     return -1;
   1389   return num_traces_recorded_;
   1390 }
   1391 
   1392 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
   1393   enabled_state_observer_list_.push_back(listener);
   1394 }
   1395 
   1396 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
   1397   std::vector<EnabledStateObserver*>::iterator it =
   1398       std::find(enabled_state_observer_list_.begin(),
   1399                 enabled_state_observer_list_.end(),
   1400                 listener);
   1401   if (it != enabled_state_observer_list_.end())
   1402     enabled_state_observer_list_.erase(it);
   1403 }
   1404 
   1405 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
   1406   std::vector<EnabledStateObserver*>::const_iterator it =
   1407       std::find(enabled_state_observer_list_.begin(),
   1408                 enabled_state_observer_list_.end(),
   1409                 listener);
   1410   return it != enabled_state_observer_list_.end();
   1411 }
   1412 
   1413 float TraceLog::GetBufferPercentFull() const {
   1414   AutoLock lock(lock_);
   1415   return static_cast<float>(static_cast<double>(logged_events_->Size()) /
   1416                             logged_events_->Capacity());
   1417 }
   1418 
   1419 bool TraceLog::BufferIsFull() const {
   1420   AutoLock lock(lock_);
   1421   return logged_events_->IsFull();
   1422 }
   1423 
   1424 TraceBuffer* TraceLog::CreateTraceBuffer() {
   1425   Options options = trace_options();
   1426   if (options & RECORD_CONTINUOUSLY)
   1427     return new TraceBufferRingBuffer(kTraceEventRingBufferChunks);
   1428   else if (options & MONITOR_SAMPLING)
   1429     return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks);
   1430   else if (options & ECHO_TO_CONSOLE)
   1431     return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
   1432   return new TraceBufferVector();
   1433 }
   1434 
   1435 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
   1436     TraceEventHandle* handle, bool check_buffer_is_full) {
   1437   lock_.AssertAcquired();
   1438 
   1439   if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
   1440     logged_events_->ReturnChunk(thread_shared_chunk_index_,
   1441                                 thread_shared_chunk_.Pass());
   1442   }
   1443 
   1444   if (!thread_shared_chunk_) {
   1445     thread_shared_chunk_ = logged_events_->GetChunk(
   1446         &thread_shared_chunk_index_);
   1447     if (check_buffer_is_full)
   1448       CheckIfBufferIsFullWhileLocked();
   1449   }
   1450   if (!thread_shared_chunk_)
   1451     return NULL;
   1452 
   1453   size_t event_index;
   1454   TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
   1455   if (trace_event && handle) {
   1456     MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
   1457                event_index, handle);
   1458   }
   1459   return trace_event;
   1460 }
   1461 
   1462 void TraceLog::CheckIfBufferIsFullWhileLocked() {
   1463   lock_.AssertAcquired();
   1464   if (logged_events_->IsFull())
   1465     SetDisabledWhileLocked();
   1466 }
   1467 
   1468 void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter,
   1469                                        EventCallback cb) {
   1470   AutoLock lock(lock_);
   1471   subtle::NoBarrier_Store(&event_callback_,
   1472                           reinterpret_cast<subtle::AtomicWord>(cb));
   1473   event_callback_category_filter_ = category_filter;
   1474   UpdateCategoryGroupEnabledFlags();
   1475 };
   1476 
   1477 void TraceLog::SetEventCallbackDisabled() {
   1478   AutoLock lock(lock_);
   1479   subtle::NoBarrier_Store(&event_callback_, 0);
   1480   UpdateCategoryGroupEnabledFlags();
   1481 }
   1482 
   1483 // Flush() works as the following:
   1484 // 1. Flush() is called in threadA whose message loop is saved in
   1485 //    flush_message_loop_proxy_;
   1486 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
   1487 //    loop to flush the thread local buffers; otherwise finish the flush;
   1488 // 3. FlushCurrentThread() deletes the thread local event buffer:
   1489 //    - The last batch of events of the thread are flushed into the main buffer;
   1490 //    - The message loop will be removed from thread_message_loops_;
   1491 //    If this is the last message loop, finish the flush;
   1492 // 4. If any thread hasn't finish its flush in time, finish the flush.
   1493 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
   1494   if (IsEnabled()) {
   1495     // Can't flush when tracing is enabled because otherwise PostTask would
   1496     // - generate more trace events;
   1497     // - deschedule the calling thread on some platforms causing inaccurate
   1498     //   timing of the trace events.
   1499     scoped_refptr<RefCountedString> empty_result = new RefCountedString;
   1500     if (!cb.is_null())
   1501       cb.Run(empty_result, false);
   1502     LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
   1503     return;
   1504   }
   1505 
   1506   int generation = this->generation();
   1507   {
   1508     AutoLock lock(lock_);
   1509     DCHECK(!flush_message_loop_proxy_.get());
   1510     flush_message_loop_proxy_ = MessageLoopProxy::current();
   1511     DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get());
   1512     flush_output_callback_ = cb;
   1513 
   1514     if (thread_shared_chunk_) {
   1515       logged_events_->ReturnChunk(thread_shared_chunk_index_,
   1516                                   thread_shared_chunk_.Pass());
   1517     }
   1518 
   1519     if (thread_message_loops_.size()) {
   1520       for (hash_set<MessageLoop*>::const_iterator it =
   1521            thread_message_loops_.begin();
   1522            it != thread_message_loops_.end(); ++it) {
   1523         (*it)->PostTask(
   1524             FROM_HERE,
   1525             Bind(&TraceLog::FlushCurrentThread, Unretained(this), generation));
   1526       }
   1527       flush_message_loop_proxy_->PostDelayedTask(
   1528           FROM_HERE,
   1529           Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation),
   1530           TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
   1531       return;
   1532     }
   1533   }
   1534 
   1535   FinishFlush(generation);
   1536 }
   1537 
   1538 void TraceLog::ConvertTraceEventsToTraceFormat(
   1539     scoped_ptr<TraceBuffer> logged_events,
   1540     const TraceLog::OutputCallback& flush_output_callback) {
   1541 
   1542   if (flush_output_callback.is_null())
   1543     return;
   1544 
   1545   // The callback need to be called at least once even if there is no events
   1546   // to let the caller know the completion of flush.
   1547   bool has_more_events = true;
   1548   do {
   1549     scoped_refptr<RefCountedString> json_events_str_ptr =
   1550         new RefCountedString();
   1551 
   1552     for (size_t i = 0; i < kTraceEventBatchChunks; ++i) {
   1553       const TraceBufferChunk* chunk = logged_events->NextChunk();
   1554       if (!chunk) {
   1555         has_more_events = false;
   1556         break;
   1557       }
   1558       for (size_t j = 0; j < chunk->size(); ++j) {
   1559         if (i > 0 || j > 0)
   1560           json_events_str_ptr->data().append(",");
   1561         chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()));
   1562       }
   1563     }
   1564 
   1565     flush_output_callback.Run(json_events_str_ptr, has_more_events);
   1566   } while (has_more_events);
   1567 }
   1568 
   1569 void TraceLog::FinishFlush(int generation) {
   1570   scoped_ptr<TraceBuffer> previous_logged_events;
   1571   OutputCallback flush_output_callback;
   1572 
   1573   if (!CheckGeneration(generation))
   1574     return;
   1575 
   1576   {
   1577     AutoLock lock(lock_);
   1578 
   1579     previous_logged_events.swap(logged_events_);
   1580     UseNextTraceBuffer();
   1581     thread_message_loops_.clear();
   1582 
   1583     flush_message_loop_proxy_ = NULL;
   1584     flush_output_callback = flush_output_callback_;
   1585     flush_output_callback_.Reset();
   1586   }
   1587 
   1588   ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
   1589                                   flush_output_callback);
   1590 }
   1591 
   1592 // Run in each thread holding a local event buffer.
   1593 void TraceLog::FlushCurrentThread(int generation) {
   1594   {
   1595     AutoLock lock(lock_);
   1596     if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
   1597       // This is late. The corresponding flush has finished.
   1598       return;
   1599     }
   1600   }
   1601 
   1602   // This will flush the thread local buffer.
   1603   delete thread_local_event_buffer_.Get();
   1604 
   1605   AutoLock lock(lock_);
   1606   if (!CheckGeneration(generation) || !flush_message_loop_proxy_ ||
   1607       thread_message_loops_.size())
   1608     return;
   1609 
   1610   flush_message_loop_proxy_->PostTask(
   1611       FROM_HERE,
   1612       Bind(&TraceLog::FinishFlush, Unretained(this), generation));
   1613 }
   1614 
   1615 void TraceLog::OnFlushTimeout(int generation) {
   1616   {
   1617     AutoLock lock(lock_);
   1618     if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
   1619       // Flush has finished before timeout.
   1620       return;
   1621     }
   1622 
   1623     LOG(WARNING) <<
   1624         "The following threads haven't finished flush in time. "
   1625         "If this happens stably for some thread, please call "
   1626         "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
   1627         "the thread to avoid its trace events from being lost.";
   1628     for (hash_set<MessageLoop*>::const_iterator it =
   1629          thread_message_loops_.begin();
   1630          it != thread_message_loops_.end(); ++it) {
   1631       LOG(WARNING) << "Thread: " << (*it)->thread_name();
   1632     }
   1633   }
   1634   FinishFlush(generation);
   1635 }
   1636 
   1637 void TraceLog::FlushButLeaveBufferIntact(
   1638     const TraceLog::OutputCallback& flush_output_callback) {
   1639   scoped_ptr<TraceBuffer> previous_logged_events;
   1640   {
   1641     AutoLock lock(lock_);
   1642     AddMetadataEventsWhileLocked();
   1643     if (thread_shared_chunk_) {
   1644       // Return the chunk to the main buffer to flush the sampling data.
   1645       logged_events_->ReturnChunk(thread_shared_chunk_index_,
   1646                                   thread_shared_chunk_.Pass());
   1647     }
   1648     previous_logged_events = logged_events_->CloneForIteration().Pass();
   1649   }  // release lock
   1650 
   1651   ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
   1652                                   flush_output_callback);
   1653 }
   1654 
   1655 void TraceLog::UseNextTraceBuffer() {
   1656   logged_events_.reset(CreateTraceBuffer());
   1657   subtle::NoBarrier_AtomicIncrement(&generation_, 1);
   1658   thread_shared_chunk_.reset();
   1659   thread_shared_chunk_index_ = 0;
   1660 }
   1661 
   1662 TraceEventHandle TraceLog::AddTraceEvent(
   1663     char phase,
   1664     const unsigned char* category_group_enabled,
   1665     const char* name,
   1666     unsigned long long id,
   1667     int num_args,
   1668     const char** arg_names,
   1669     const unsigned char* arg_types,
   1670     const unsigned long long* arg_values,
   1671     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
   1672     unsigned char flags) {
   1673   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   1674   base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
   1675   return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
   1676                                                name, id, thread_id, now,
   1677                                                num_args, arg_names,
   1678                                                arg_types, arg_values,
   1679                                                convertable_values, flags);
   1680 }
   1681 
   1682 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
   1683     char phase,
   1684     const unsigned char* category_group_enabled,
   1685     const char* name,
   1686     unsigned long long id,
   1687     int thread_id,
   1688     const TimeTicks& timestamp,
   1689     int num_args,
   1690     const char** arg_names,
   1691     const unsigned char* arg_types,
   1692     const unsigned long long* arg_values,
   1693     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
   1694     unsigned char flags) {
   1695   TraceEventHandle handle = { 0, 0, 0 };
   1696   if (!*category_group_enabled)
   1697     return handle;
   1698 
   1699   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
   1700   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
   1701   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
   1702   if (thread_is_in_trace_event_.Get())
   1703     return handle;
   1704 
   1705   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
   1706 
   1707   DCHECK(name);
   1708 
   1709   if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
   1710     id ^= process_id_hash_;
   1711 
   1712   TimeTicks now = OffsetTimestamp(timestamp);
   1713   TimeTicks thread_now = ThreadNow();
   1714 
   1715   ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
   1716   // A ThreadLocalEventBuffer needs the message loop
   1717   // - to know when the thread exits;
   1718   // - to handle the final flush.
   1719   // For a thread without a message loop or the message loop may be blocked, the
   1720   // trace events will be added into the main buffer directly.
   1721   if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) {
   1722     thread_local_event_buffer = thread_local_event_buffer_.Get();
   1723     if (thread_local_event_buffer &&
   1724         !CheckGeneration(thread_local_event_buffer->generation())) {
   1725       delete thread_local_event_buffer;
   1726       thread_local_event_buffer = NULL;
   1727     }
   1728     if (!thread_local_event_buffer) {
   1729       thread_local_event_buffer = new ThreadLocalEventBuffer(this);
   1730       thread_local_event_buffer_.Set(thread_local_event_buffer);
   1731     }
   1732   }
   1733 
   1734   // Check and update the current thread name only if the event is for the
   1735   // current thread to avoid locks in most cases.
   1736   if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
   1737     const char* new_name = ThreadIdNameManager::GetInstance()->
   1738         GetName(thread_id);
   1739     // Check if the thread name has been set or changed since the previous
   1740     // call (if any), but don't bother if the new name is empty. Note this will
   1741     // not detect a thread name change within the same char* buffer address: we
   1742     // favor common case performance over corner case correctness.
   1743     if (new_name != g_current_thread_name.Get().Get() &&
   1744         new_name && *new_name) {
   1745       g_current_thread_name.Get().Set(new_name);
   1746 
   1747       AutoLock thread_info_lock(thread_info_lock_);
   1748 
   1749       hash_map<int, std::string>::iterator existing_name =
   1750           thread_names_.find(thread_id);
   1751       if (existing_name == thread_names_.end()) {
   1752         // This is a new thread id, and a new name.
   1753         thread_names_[thread_id] = new_name;
   1754       } else {
   1755         // This is a thread id that we've seen before, but potentially with a
   1756         // new name.
   1757         std::vector<StringPiece> existing_names;
   1758         Tokenize(existing_name->second, ",", &existing_names);
   1759         bool found = std::find(existing_names.begin(),
   1760                                existing_names.end(),
   1761                                new_name) != existing_names.end();
   1762         if (!found) {
   1763           if (existing_names.size())
   1764             existing_name->second.push_back(',');
   1765           existing_name->second.append(new_name);
   1766         }
   1767       }
   1768     }
   1769   }
   1770 
   1771   std::string console_message;
   1772   if ((*category_group_enabled & ENABLED_FOR_RECORDING)) {
   1773     OptionalAutoLock lock(lock_);
   1774 
   1775     TraceEvent* trace_event = NULL;
   1776     if (thread_local_event_buffer) {
   1777       trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
   1778     } else {
   1779       lock.EnsureAcquired();
   1780       trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
   1781     }
   1782 
   1783     if (trace_event) {
   1784       trace_event->Initialize(thread_id, now, thread_now, phase,
   1785                               category_group_enabled, name, id,
   1786                               num_args, arg_names, arg_types, arg_values,
   1787                               convertable_values, flags);
   1788 
   1789 #if defined(OS_ANDROID)
   1790       trace_event->SendToATrace();
   1791 #endif
   1792     }
   1793 
   1794     if (trace_options() & ECHO_TO_CONSOLE) {
   1795       console_message = EventToConsoleMessage(
   1796           phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
   1797           timestamp, trace_event);
   1798     }
   1799   }
   1800 
   1801   if (console_message.size())
   1802     LOG(ERROR) << console_message;
   1803 
   1804   if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
   1805       &watch_category_)) == category_group_enabled) {
   1806     bool event_name_matches;
   1807     WatchEventCallback watch_event_callback_copy;
   1808     {
   1809       AutoLock lock(lock_);
   1810       event_name_matches = watch_event_name_ == name;
   1811       watch_event_callback_copy = watch_event_callback_;
   1812     }
   1813     if (event_name_matches) {
   1814       if (!watch_event_callback_copy.is_null())
   1815         watch_event_callback_copy.Run();
   1816     }
   1817   }
   1818 
   1819   if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
   1820     EventCallback event_callback = reinterpret_cast<EventCallback>(
   1821         subtle::NoBarrier_Load(&event_callback_));
   1822     if (event_callback) {
   1823       event_callback(now,
   1824                      phase == TRACE_EVENT_PHASE_COMPLETE ?
   1825                          TRACE_EVENT_PHASE_BEGIN : phase,
   1826                      category_group_enabled, name, id,
   1827                      num_args, arg_names, arg_types, arg_values,
   1828                      flags);
   1829     }
   1830   }
   1831 
   1832   if (thread_local_event_buffer)
   1833     thread_local_event_buffer->ReportOverhead(now, thread_now);
   1834 
   1835   return handle;
   1836 }
   1837 
   1838 // May be called when a COMPELETE event ends and the unfinished event has been
   1839 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
   1840 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
   1841                                             const TimeTicks& timestamp,
   1842                                             TraceEvent* trace_event) {
   1843   AutoLock thread_info_lock(thread_info_lock_);
   1844 
   1845   // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
   1846   // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
   1847   DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
   1848 
   1849   TimeDelta duration;
   1850   int thread_id = trace_event ?
   1851       trace_event->thread_id() : PlatformThread::CurrentId();
   1852   if (phase == TRACE_EVENT_PHASE_END) {
   1853     duration = timestamp - thread_event_start_times_[thread_id].top();
   1854     thread_event_start_times_[thread_id].pop();
   1855   }
   1856 
   1857   std::string thread_name = thread_names_[thread_id];
   1858   if (thread_colors_.find(thread_name) == thread_colors_.end())
   1859     thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
   1860 
   1861   std::ostringstream log;
   1862   log << base::StringPrintf("%s: \x1b[0;3%dm",
   1863                             thread_name.c_str(),
   1864                             thread_colors_[thread_name]);
   1865 
   1866   size_t depth = 0;
   1867   if (thread_event_start_times_.find(thread_id) !=
   1868       thread_event_start_times_.end())
   1869     depth = thread_event_start_times_[thread_id].size();
   1870 
   1871   for (size_t i = 0; i < depth; ++i)
   1872     log << "| ";
   1873 
   1874   if (trace_event)
   1875     trace_event->AppendPrettyPrinted(&log);
   1876   if (phase == TRACE_EVENT_PHASE_END)
   1877     log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
   1878 
   1879   log << "\x1b[0;m";
   1880 
   1881   if (phase == TRACE_EVENT_PHASE_BEGIN)
   1882     thread_event_start_times_[thread_id].push(timestamp);
   1883 
   1884   return log.str();
   1885 }
   1886 
   1887 void TraceLog::AddTraceEventEtw(char phase,
   1888                                 const char* name,
   1889                                 const void* id,
   1890                                 const char* extra) {
   1891 #if defined(OS_WIN)
   1892   TraceEventETWProvider::Trace(name, phase, id, extra);
   1893 #endif
   1894   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
   1895                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
   1896 }
   1897 
   1898 void TraceLog::AddTraceEventEtw(char phase,
   1899                                 const char* name,
   1900                                 const void* id,
   1901                                 const std::string& extra)
   1902 {
   1903 #if defined(OS_WIN)
   1904   TraceEventETWProvider::Trace(name, phase, id, extra);
   1905 #endif
   1906   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
   1907                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
   1908 }
   1909 
   1910 void TraceLog::UpdateTraceEventDuration(
   1911     const unsigned char* category_group_enabled,
   1912     const char* name,
   1913     TraceEventHandle handle) {
   1914   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
   1915   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
   1916   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
   1917   if (thread_is_in_trace_event_.Get())
   1918     return;
   1919 
   1920   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
   1921 
   1922   TimeTicks thread_now = ThreadNow();
   1923   TimeTicks now = OffsetNow();
   1924 
   1925   std::string console_message;
   1926   if (*category_group_enabled & ENABLED_FOR_RECORDING) {
   1927     OptionalAutoLock lock(lock_);
   1928 
   1929     TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
   1930     if (trace_event) {
   1931       DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
   1932       trace_event->UpdateDuration(now, thread_now);
   1933 #if defined(OS_ANDROID)
   1934       trace_event->SendToATrace();
   1935 #endif
   1936     }
   1937 
   1938     if (trace_options() & ECHO_TO_CONSOLE) {
   1939       console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END,
   1940                                               now, trace_event);
   1941     }
   1942   }
   1943 
   1944   if (console_message.size())
   1945     LOG(ERROR) << console_message;
   1946 
   1947   if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
   1948     EventCallback event_callback = reinterpret_cast<EventCallback>(
   1949         subtle::NoBarrier_Load(&event_callback_));
   1950     if (event_callback) {
   1951       event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
   1952                      trace_event_internal::kNoEventId, 0, NULL, NULL, NULL,
   1953                      TRACE_EVENT_FLAG_NONE);
   1954     }
   1955   }
   1956 }
   1957 
   1958 void TraceLog::SetWatchEvent(const std::string& category_name,
   1959                              const std::string& event_name,
   1960                              const WatchEventCallback& callback) {
   1961   const unsigned char* category = GetCategoryGroupEnabled(
   1962       category_name.c_str());
   1963   AutoLock lock(lock_);
   1964   subtle::NoBarrier_Store(&watch_category_,
   1965                           reinterpret_cast<subtle::AtomicWord>(category));
   1966   watch_event_name_ = event_name;
   1967   watch_event_callback_ = callback;
   1968 }
   1969 
   1970 void TraceLog::CancelWatchEvent() {
   1971   AutoLock lock(lock_);
   1972   subtle::NoBarrier_Store(&watch_category_, 0);
   1973   watch_event_name_ = "";
   1974   watch_event_callback_.Reset();
   1975 }
   1976 
   1977 void TraceLog::AddMetadataEventsWhileLocked() {
   1978   lock_.AssertAcquired();
   1979 
   1980   int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   1981   if (process_sort_index_ != 0) {
   1982     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1983                             current_thread_id,
   1984                             "process_sort_index", "sort_index",
   1985                             process_sort_index_);
   1986   }
   1987 
   1988   if (process_name_.size()) {
   1989     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   1990                             current_thread_id,
   1991                             "process_name", "name",
   1992                             process_name_);
   1993   }
   1994 
   1995   if (process_labels_.size() > 0) {
   1996     std::vector<std::string> labels;
   1997     for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
   1998         it != process_labels_.end();
   1999         it++) {
   2000       labels.push_back(it->second);
   2001     }
   2002     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   2003                             current_thread_id,
   2004                             "process_labels", "labels",
   2005                             JoinString(labels, ','));
   2006   }
   2007 
   2008   // Thread sort indices.
   2009   for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
   2010       it != thread_sort_indices_.end();
   2011       it++) {
   2012     if (it->second == 0)
   2013       continue;
   2014     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   2015                             it->first,
   2016                             "thread_sort_index", "sort_index",
   2017                             it->second);
   2018   }
   2019 
   2020   // Thread names.
   2021   AutoLock thread_info_lock(thread_info_lock_);
   2022   for(hash_map<int, std::string>::iterator it = thread_names_.begin();
   2023       it != thread_names_.end();
   2024       it++) {
   2025     if (it->second.empty())
   2026       continue;
   2027     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
   2028                             it->first,
   2029                             "thread_name", "name",
   2030                             it->second);
   2031   }
   2032 }
   2033 
   2034 void TraceLog::WaitSamplingEventForTesting() {
   2035   if (!sampling_thread_)
   2036     return;
   2037   sampling_thread_->WaitSamplingEventForTesting();
   2038 }
   2039 
   2040 void TraceLog::DeleteForTesting() {
   2041   DeleteTraceLogForTesting::Delete();
   2042 }
   2043 
   2044 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
   2045   return GetEventByHandleInternal(handle, NULL);
   2046 }
   2047 
   2048 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
   2049                                                OptionalAutoLock* lock) {
   2050   if (!handle.chunk_seq)
   2051     return NULL;
   2052 
   2053   if (thread_local_event_buffer_.Get()) {
   2054     TraceEvent* trace_event =
   2055         thread_local_event_buffer_.Get()->GetEventByHandle(handle);
   2056     if (trace_event)
   2057       return trace_event;
   2058   }
   2059 
   2060   // The event has been out-of-control of the thread local buffer.
   2061   // Try to get the event from the main buffer with a lock.
   2062   if (lock)
   2063     lock->EnsureAcquired();
   2064 
   2065   if (thread_shared_chunk_ &&
   2066       handle.chunk_index == thread_shared_chunk_index_) {
   2067     return handle.chunk_seq == thread_shared_chunk_->seq() ?
   2068         thread_shared_chunk_->GetEventAt(handle.event_index) : NULL;
   2069   }
   2070 
   2071   return logged_events_->GetEventByHandle(handle);
   2072 }
   2073 
   2074 void TraceLog::SetProcessID(int process_id) {
   2075   process_id_ = process_id;
   2076   // Create a FNV hash from the process ID for XORing.
   2077   // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
   2078   unsigned long long offset_basis = 14695981039346656037ull;
   2079   unsigned long long fnv_prime = 1099511628211ull;
   2080   unsigned long long pid = static_cast<unsigned long long>(process_id_);
   2081   process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
   2082 }
   2083 
   2084 void TraceLog::SetProcessSortIndex(int sort_index) {
   2085   AutoLock lock(lock_);
   2086   process_sort_index_ = sort_index;
   2087 }
   2088 
   2089 void TraceLog::SetProcessName(const std::string& process_name) {
   2090   AutoLock lock(lock_);
   2091   process_name_ = process_name;
   2092 }
   2093 
   2094 void TraceLog::UpdateProcessLabel(
   2095     int label_id, const std::string& current_label) {
   2096   if(!current_label.length())
   2097     return RemoveProcessLabel(label_id);
   2098 
   2099   AutoLock lock(lock_);
   2100   process_labels_[label_id] = current_label;
   2101 }
   2102 
   2103 void TraceLog::RemoveProcessLabel(int label_id) {
   2104   AutoLock lock(lock_);
   2105   base::hash_map<int, std::string>::iterator it = process_labels_.find(
   2106       label_id);
   2107   if (it == process_labels_.end())
   2108     return;
   2109 
   2110   process_labels_.erase(it);
   2111 }
   2112 
   2113 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
   2114   AutoLock lock(lock_);
   2115   thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
   2116 }
   2117 
   2118 void TraceLog::SetTimeOffset(TimeDelta offset) {
   2119   time_offset_ = offset;
   2120 }
   2121 
   2122 size_t TraceLog::GetObserverCountForTest() const {
   2123   return enabled_state_observer_list_.size();
   2124 }
   2125 
   2126 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
   2127   thread_blocks_message_loop_.Set(true);
   2128   if (thread_local_event_buffer_.Get()) {
   2129     // This will flush the thread local buffer.
   2130     delete thread_local_event_buffer_.Get();
   2131   }
   2132 }
   2133 
   2134 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
   2135     const std::string& str) {
   2136   return  str.empty() ||
   2137           str.at(0) == ' ' ||
   2138           str.at(str.length() - 1) == ' ';
   2139 }
   2140 
   2141 bool CategoryFilter::DoesCategoryGroupContainCategory(
   2142     const char* category_group,
   2143     const char* category) const {
   2144   DCHECK(category);
   2145   CStringTokenizer category_group_tokens(category_group,
   2146                           category_group + strlen(category_group), ",");
   2147   while (category_group_tokens.GetNext()) {
   2148     std::string category_group_token = category_group_tokens.token();
   2149     // Don't allow empty tokens, nor tokens with leading or trailing space.
   2150     DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
   2151         category_group_token))
   2152         << "Disallowed category string";
   2153     if (MatchPattern(category_group_token.c_str(), category))
   2154       return true;
   2155   }
   2156   return false;
   2157 }
   2158 
   2159 CategoryFilter::CategoryFilter(const std::string& filter_string) {
   2160   if (!filter_string.empty())
   2161     Initialize(filter_string);
   2162   else
   2163     Initialize(CategoryFilter::kDefaultCategoryFilterString);
   2164 }
   2165 
   2166 CategoryFilter::CategoryFilter(const CategoryFilter& cf)
   2167     : included_(cf.included_),
   2168       disabled_(cf.disabled_),
   2169       excluded_(cf.excluded_) {
   2170 }
   2171 
   2172 CategoryFilter::~CategoryFilter() {
   2173 }
   2174 
   2175 CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
   2176   if (this == &rhs)
   2177     return *this;
   2178 
   2179   included_ = rhs.included_;
   2180   disabled_ = rhs.disabled_;
   2181   excluded_ = rhs.excluded_;
   2182   return *this;
   2183 }
   2184 
   2185 void CategoryFilter::Initialize(const std::string& filter_string) {
   2186   // Tokenize list of categories, delimited by ','.
   2187   StringTokenizer tokens(filter_string, ",");
   2188   // Add each token to the appropriate list (included_,excluded_).
   2189   while (tokens.GetNext()) {
   2190     std::string category = tokens.token();
   2191     // Ignore empty categories.
   2192     if (category.empty())
   2193       continue;
   2194     // Excluded categories start with '-'.
   2195     if (category.at(0) == '-') {
   2196       // Remove '-' from category string.
   2197       category = category.substr(1);
   2198       excluded_.push_back(category);
   2199     } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
   2200                                 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
   2201       disabled_.push_back(category);
   2202     } else {
   2203       included_.push_back(category);
   2204     }
   2205   }
   2206 }
   2207 
   2208 void CategoryFilter::WriteString(const StringList& values,
   2209                                  std::string* out,
   2210                                  bool included) const {
   2211   bool prepend_comma = !out->empty();
   2212   int token_cnt = 0;
   2213   for (StringList::const_iterator ci = values.begin();
   2214        ci != values.end(); ++ci) {
   2215     if (token_cnt > 0 || prepend_comma)
   2216       StringAppendF(out, ",");
   2217     StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
   2218     ++token_cnt;
   2219   }
   2220 }
   2221 
   2222 std::string CategoryFilter::ToString() const {
   2223   std::string filter_string;
   2224   WriteString(included_, &filter_string, true);
   2225   WriteString(disabled_, &filter_string, true);
   2226   WriteString(excluded_, &filter_string, false);
   2227   return filter_string;
   2228 }
   2229 
   2230 bool CategoryFilter::IsCategoryGroupEnabled(
   2231     const char* category_group_name) const {
   2232   // TraceLog should call this method only as  part of enabling/disabling
   2233   // categories.
   2234   StringList::const_iterator ci;
   2235 
   2236   // Check the disabled- filters and the disabled-* wildcard first so that a
   2237   // "*" filter does not include the disabled.
   2238   for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
   2239     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
   2240       return true;
   2241   }
   2242   if (DoesCategoryGroupContainCategory(category_group_name,
   2243                                        TRACE_DISABLED_BY_DEFAULT("*")))
   2244     return false;
   2245 
   2246   for (ci = included_.begin(); ci != included_.end(); ++ci) {
   2247     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
   2248       return true;
   2249   }
   2250 
   2251   for (ci = excluded_.begin(); ci != excluded_.end(); ++ci) {
   2252     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
   2253       return false;
   2254   }
   2255   // If the category group is not excluded, and there are no included patterns
   2256   // we consider this pattern enabled.
   2257   return included_.empty();
   2258 }
   2259 
   2260 bool CategoryFilter::HasIncludedPatterns() const {
   2261   return !included_.empty();
   2262 }
   2263 
   2264 void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
   2265   // Keep included patterns only if both filters have an included entry.
   2266   // Otherwise, one of the filter was specifying "*" and we want to honour the
   2267   // broadest filter.
   2268   if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
   2269     included_.insert(included_.end(),
   2270                      nested_filter.included_.begin(),
   2271                      nested_filter.included_.end());
   2272   } else {
   2273     included_.clear();
   2274   }
   2275 
   2276   disabled_.insert(disabled_.end(),
   2277                    nested_filter.disabled_.begin(),
   2278                    nested_filter.disabled_.end());
   2279   excluded_.insert(excluded_.end(),
   2280                    nested_filter.excluded_.begin(),
   2281                    nested_filter.excluded_.end());
   2282 }
   2283 
   2284 void CategoryFilter::Clear() {
   2285   included_.clear();
   2286   disabled_.clear();
   2287   excluded_.clear();
   2288 }
   2289 
   2290 }  // namespace debug
   2291 }  // namespace base
   2292 
   2293 namespace trace_event_internal {
   2294 
   2295 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
   2296     const char* category_group, const char* name) {
   2297   // The single atom works because for now the category_group can only be "gpu".
   2298   DCHECK(strcmp(category_group, "gpu") == 0);
   2299   static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
   2300   INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
   2301       category_group, atomic, category_group_enabled_);
   2302   name_ = name;
   2303   if (*category_group_enabled_) {
   2304     event_handle_ =
   2305         TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
   2306             TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
   2307             trace_event_internal::kNoEventId,
   2308             static_cast<int>(base::PlatformThread::CurrentId()),
   2309             base::TimeTicks::NowFromSystemTraceTime(),
   2310             0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
   2311   }
   2312 }
   2313 
   2314 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
   2315   if (*category_group_enabled_) {
   2316     TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
   2317                                                 name_, event_handle_);
   2318   }
   2319 }
   2320 
   2321 }  // namespace trace_event_internal
   2322