Home | History | Annotate | Download | only in trace_event
      1 // Copyright 2015 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/trace_event/trace_buffer.h"
      6 
      7 #include <memory>
      8 #include <utility>
      9 #include <vector>
     10 
     11 #include "base/macros.h"
     12 #include "base/trace_event/heap_profiler.h"
     13 #include "base/trace_event/trace_event_impl.h"
     14 
     15 namespace base {
     16 namespace trace_event {
     17 
     18 namespace {
     19 
     20 class TraceBufferRingBuffer : public TraceBuffer {
     21  public:
     22   TraceBufferRingBuffer(size_t max_chunks)
     23       : max_chunks_(max_chunks),
     24         recyclable_chunks_queue_(new size_t[queue_capacity()]),
     25         queue_head_(0),
     26         queue_tail_(max_chunks),
     27         current_iteration_index_(0),
     28         current_chunk_seq_(1) {
     29     chunks_.reserve(max_chunks);
     30     for (size_t i = 0; i < max_chunks; ++i)
     31       recyclable_chunks_queue_[i] = i;
     32   }
     33 
     34   std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
     35     HEAP_PROFILER_SCOPED_IGNORE;
     36 
     37     // Because the number of threads is much less than the number of chunks,
     38     // the queue should never be empty.
     39     DCHECK(!QueueIsEmpty());
     40 
     41     *index = recyclable_chunks_queue_[queue_head_];
     42     queue_head_ = NextQueueIndex(queue_head_);
     43     current_iteration_index_ = queue_head_;
     44 
     45     if (*index >= chunks_.size())
     46       chunks_.resize(*index + 1);
     47 
     48     TraceBufferChunk* chunk = chunks_[*index].release();
     49     chunks_[*index] = NULL;  // Put NULL in the slot of a in-flight chunk.
     50     if (chunk)
     51       chunk->Reset(current_chunk_seq_++);
     52     else
     53       chunk = new TraceBufferChunk(current_chunk_seq_++);
     54 
     55     return std::unique_ptr<TraceBufferChunk>(chunk);
     56   }
     57 
     58   void ReturnChunk(size_t index,
     59                    std::unique_ptr<TraceBufferChunk> chunk) override {
     60     // When this method is called, the queue should not be full because it
     61     // can contain all chunks including the one to be returned.
     62     DCHECK(!QueueIsFull());
     63     DCHECK(chunk);
     64     DCHECK_LT(index, chunks_.size());
     65     DCHECK(!chunks_[index]);
     66     chunks_[index] = std::move(chunk);
     67     recyclable_chunks_queue_[queue_tail_] = index;
     68     queue_tail_ = NextQueueIndex(queue_tail_);
     69   }
     70 
     71   bool IsFull() const override { return false; }
     72 
     73   size_t Size() const override {
     74     // This is approximate because not all of the chunks are full.
     75     return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
     76   }
     77 
     78   size_t Capacity() const override {
     79     return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
     80   }
     81 
     82   TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
     83     if (handle.chunk_index >= chunks_.size())
     84       return NULL;
     85     TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
     86     if (!chunk || chunk->seq() != handle.chunk_seq)
     87       return NULL;
     88     return chunk->GetEventAt(handle.event_index);
     89   }
     90 
     91   const TraceBufferChunk* NextChunk() override {
     92     if (chunks_.empty())
     93       return NULL;
     94 
     95     while (current_iteration_index_ != queue_tail_) {
     96       size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
     97       current_iteration_index_ = NextQueueIndex(current_iteration_index_);
     98       if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
     99         continue;
    100       DCHECK(chunks_[chunk_index]);
    101       return chunks_[chunk_index].get();
    102     }
    103     return NULL;
    104   }
    105 
    106   void EstimateTraceMemoryOverhead(
    107       TraceEventMemoryOverhead* overhead) override {
    108     overhead->Add("TraceBufferRingBuffer", sizeof(*this));
    109     for (size_t queue_index = queue_head_; queue_index != queue_tail_;
    110          queue_index = NextQueueIndex(queue_index)) {
    111       size_t chunk_index = recyclable_chunks_queue_[queue_index];
    112       if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
    113         continue;
    114       chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
    115     }
    116   }
    117 
    118  private:
    119   bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
    120 
    121   size_t QueueSize() const {
    122     return queue_tail_ > queue_head_
    123                ? queue_tail_ - queue_head_
    124                : queue_tail_ + queue_capacity() - queue_head_;
    125   }
    126 
    127   bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
    128 
    129   size_t queue_capacity() const {
    130     // One extra space to help distinguish full state and empty state.
    131     return max_chunks_ + 1;
    132   }
    133 
    134   size_t NextQueueIndex(size_t index) const {
    135     index++;
    136     if (index >= queue_capacity())
    137       index = 0;
    138     return index;
    139   }
    140 
    141   size_t max_chunks_;
    142   std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
    143 
    144   std::unique_ptr<size_t[]> recyclable_chunks_queue_;
    145   size_t queue_head_;
    146   size_t queue_tail_;
    147 
    148   size_t current_iteration_index_;
    149   uint32_t current_chunk_seq_;
    150 
    151   DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
    152 };
    153 
    154 class TraceBufferVector : public TraceBuffer {
    155  public:
    156   TraceBufferVector(size_t max_chunks)
    157       : in_flight_chunk_count_(0),
    158         current_iteration_index_(0),
    159         max_chunks_(max_chunks) {
    160     chunks_.reserve(max_chunks_);
    161   }
    162 
    163   std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
    164     HEAP_PROFILER_SCOPED_IGNORE;
    165 
    166     // This function may be called when adding normal events or indirectly from
    167     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
    168     // have to add the metadata events and flush thread-local buffers even if
    169     // the buffer is full.
    170     *index = chunks_.size();
    171     // Put nullptr in the slot of a in-flight chunk.
    172     chunks_.push_back(nullptr);
    173     ++in_flight_chunk_count_;
    174     // + 1 because zero chunk_seq is not allowed.
    175     return std::unique_ptr<TraceBufferChunk>(
    176         new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
    177   }
    178 
    179   void ReturnChunk(size_t index,
    180                    std::unique_ptr<TraceBufferChunk> chunk) override {
    181     DCHECK_GT(in_flight_chunk_count_, 0u);
    182     DCHECK_LT(index, chunks_.size());
    183     DCHECK(!chunks_[index]);
    184     --in_flight_chunk_count_;
    185     chunks_[index] = std::move(chunk);
    186   }
    187 
    188   bool IsFull() const override { return chunks_.size() >= max_chunks_; }
    189 
    190   size_t Size() const override {
    191     // This is approximate because not all of the chunks are full.
    192     return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
    193   }
    194 
    195   size_t Capacity() const override {
    196     return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
    197   }
    198 
    199   TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
    200     if (handle.chunk_index >= chunks_.size())
    201       return NULL;
    202     TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
    203     if (!chunk || chunk->seq() != handle.chunk_seq)
    204       return NULL;
    205     return chunk->GetEventAt(handle.event_index);
    206   }
    207 
    208   const TraceBufferChunk* NextChunk() override {
    209     while (current_iteration_index_ < chunks_.size()) {
    210       // Skip in-flight chunks.
    211       const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
    212       if (chunk)
    213         return chunk;
    214     }
    215     return NULL;
    216   }
    217 
    218   void EstimateTraceMemoryOverhead(
    219       TraceEventMemoryOverhead* overhead) override {
    220     const size_t chunks_ptr_vector_allocated_size =
    221         sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
    222     const size_t chunks_ptr_vector_resident_size =
    223         sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
    224     overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
    225                   chunks_ptr_vector_resident_size);
    226     for (size_t i = 0; i < chunks_.size(); ++i) {
    227       TraceBufferChunk* chunk = chunks_[i].get();
    228       // Skip the in-flight (nullptr) chunks. They will be accounted by the
    229       // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
    230       if (chunk)
    231         chunk->EstimateTraceMemoryOverhead(overhead);
    232     }
    233   }
    234 
    235  private:
    236   size_t in_flight_chunk_count_;
    237   size_t current_iteration_index_;
    238   size_t max_chunks_;
    239   std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
    240 
    241   DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
    242 };
    243 
    244 }  // namespace
    245 
    246 TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
    247 
    248 TraceBufferChunk::~TraceBufferChunk() {}
    249 
    250 void TraceBufferChunk::Reset(uint32_t new_seq) {
    251   for (size_t i = 0; i < next_free_; ++i)
    252     chunk_[i].Reset();
    253   next_free_ = 0;
    254   seq_ = new_seq;
    255   cached_overhead_estimate_.reset();
    256 }
    257 
    258 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
    259   DCHECK(!IsFull());
    260   *event_index = next_free_++;
    261   return &chunk_[*event_index];
    262 }
    263 
    264 void TraceBufferChunk::EstimateTraceMemoryOverhead(
    265     TraceEventMemoryOverhead* overhead) {
    266   if (!cached_overhead_estimate_) {
    267     cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
    268 
    269     // When estimating the size of TraceBufferChunk, exclude the array of trace
    270     // events, as they are computed individually below.
    271     cached_overhead_estimate_->Add("TraceBufferChunk",
    272                                    sizeof(*this) - sizeof(chunk_));
    273   }
    274 
    275   const size_t num_cached_estimated_events =
    276       cached_overhead_estimate_->GetCount("TraceEvent");
    277   DCHECK_LE(num_cached_estimated_events, size());
    278 
    279   if (IsFull() && num_cached_estimated_events == size()) {
    280     overhead->Update(*cached_overhead_estimate_);
    281     return;
    282   }
    283 
    284   for (size_t i = num_cached_estimated_events; i < size(); ++i)
    285     chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
    286 
    287   if (IsFull()) {
    288     cached_overhead_estimate_->AddSelf();
    289   } else {
    290     // The unused TraceEvents in |chunks_| are not cached. They will keep
    291     // changing as new TraceEvents are added to this chunk, so they are
    292     // computed on the fly.
    293     const size_t num_unused_trace_events = capacity() - size();
    294     overhead->Add("TraceEvent (unused)",
    295                   num_unused_trace_events * sizeof(TraceEvent));
    296   }
    297 
    298   overhead->Update(*cached_overhead_estimate_);
    299 }
    300 
    301 TraceResultBuffer::OutputCallback
    302 TraceResultBuffer::SimpleOutput::GetCallback() {
    303   return Bind(&SimpleOutput::Append, Unretained(this));
    304 }
    305 
    306 void TraceResultBuffer::SimpleOutput::Append(
    307     const std::string& json_trace_output) {
    308   json_output += json_trace_output;
    309 }
    310 
    311 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
    312 
    313 TraceResultBuffer::~TraceResultBuffer() {}
    314 
    315 void TraceResultBuffer::SetOutputCallback(
    316     const OutputCallback& json_chunk_callback) {
    317   output_callback_ = json_chunk_callback;
    318 }
    319 
    320 void TraceResultBuffer::Start() {
    321   append_comma_ = false;
    322   output_callback_.Run("[");
    323 }
    324 
    325 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
    326   if (append_comma_)
    327     output_callback_.Run(",");
    328   append_comma_ = true;
    329   output_callback_.Run(trace_fragment);
    330 }
    331 
    332 void TraceResultBuffer::Finish() {
    333   output_callback_.Run("]");
    334 }
    335 
    336 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
    337   return new TraceBufferRingBuffer(max_chunks);
    338 }
    339 
    340 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
    341   return new TraceBufferVector(max_chunks);
    342 }
    343 
    344 }  // namespace trace_event
    345 }  // namespace base
    346