Home | History | Annotate | Download | only in metrics
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/metrics/statistics_recorder.h"
      6 
      7 #include "base/at_exit.h"
      8 #include "base/debug/leak_annotations.h"
      9 #include "base/json/string_escape.h"
     10 #include "base/logging.h"
     11 #include "base/memory/scoped_ptr.h"
     12 #include "base/metrics/histogram.h"
     13 #include "base/strings/stringprintf.h"
     14 #include "base/synchronization/lock.h"
     15 #include "base/values.h"
     16 
     17 using std::list;
     18 using std::string;
     19 
     20 namespace {
     21 // Initialize histogram statistics gathering system.
     22 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
     23     LAZY_INSTANCE_INITIALIZER;
     24 }  // namespace
     25 
     26 namespace base {
     27 
     28 // static
     29 void StatisticsRecorder::Initialize() {
     30   // Ensure that an instance of the StatisticsRecorder object is created.
     31   g_statistics_recorder_.Get();
     32 }
     33 
     34 
     35 // static
     36 bool StatisticsRecorder::IsActive() {
     37   if (lock_ == NULL)
     38     return false;
     39   base::AutoLock auto_lock(*lock_);
     40   return NULL != histograms_;
     41 }
     42 
     43 // static
     44 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
     45     HistogramBase* histogram) {
     46   // As per crbug.com/79322 the histograms are intentionally leaked, so we need
     47   // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
     48   // for an object, the duplicates should not be annotated.
     49   // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
     50   // twice if (lock_ == NULL) || (!histograms_).
     51   if (lock_ == NULL) {
     52     ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
     53     return histogram;
     54   }
     55 
     56   HistogramBase* histogram_to_delete = NULL;
     57   HistogramBase* histogram_to_return = NULL;
     58   {
     59     base::AutoLock auto_lock(*lock_);
     60     if (histograms_ == NULL) {
     61       histogram_to_return = histogram;
     62     } else {
     63       const string& name = histogram->histogram_name();
     64       HistogramMap::iterator it = histograms_->find(name);
     65       if (histograms_->end() == it) {
     66         (*histograms_)[name] = histogram;
     67         ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
     68         histogram_to_return = histogram;
     69       } else if (histogram == it->second) {
     70         // The histogram was registered before.
     71         histogram_to_return = histogram;
     72       } else {
     73         // We already have one histogram with this name.
     74         histogram_to_return = it->second;
     75         histogram_to_delete = histogram;
     76       }
     77     }
     78   }
     79   delete histogram_to_delete;
     80   return histogram_to_return;
     81 }
     82 
     83 // static
     84 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
     85     const BucketRanges* ranges) {
     86   DCHECK(ranges->HasValidChecksum());
     87   scoped_ptr<const BucketRanges> ranges_deleter;
     88 
     89   if (lock_ == NULL) {
     90     ANNOTATE_LEAKING_OBJECT_PTR(ranges);
     91     return ranges;
     92   }
     93 
     94   base::AutoLock auto_lock(*lock_);
     95   if (ranges_ == NULL) {
     96     ANNOTATE_LEAKING_OBJECT_PTR(ranges);
     97     return ranges;
     98   }
     99 
    100   list<const BucketRanges*>* checksum_matching_list;
    101   RangesMap::iterator ranges_it = ranges_->find(ranges->checksum());
    102   if (ranges_->end() == ranges_it) {
    103     // Add a new matching list to map.
    104     checksum_matching_list = new list<const BucketRanges*>();
    105     ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
    106     (*ranges_)[ranges->checksum()] = checksum_matching_list;
    107   } else {
    108     checksum_matching_list = ranges_it->second;
    109   }
    110 
    111   list<const BucketRanges*>::iterator checksum_matching_list_it;
    112   for (checksum_matching_list_it = checksum_matching_list->begin();
    113        checksum_matching_list_it != checksum_matching_list->end();
    114        ++checksum_matching_list_it) {
    115     const BucketRanges* existing_ranges = *checksum_matching_list_it;
    116     if (existing_ranges->Equals(ranges)) {
    117       if (existing_ranges == ranges) {
    118         return ranges;
    119       } else {
    120         ranges_deleter.reset(ranges);
    121         return existing_ranges;
    122       }
    123     }
    124   }
    125   // We haven't found a BucketRanges which has the same ranges. Register the
    126   // new BucketRanges.
    127   checksum_matching_list->push_front(ranges);
    128   return ranges;
    129 }
    130 
    131 // static
    132 void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
    133                                         std::string* output) {
    134   if (!IsActive())
    135     return;
    136 
    137   Histograms snapshot;
    138   GetSnapshot(query, &snapshot);
    139   for (Histograms::iterator it = snapshot.begin();
    140        it != snapshot.end();
    141        ++it) {
    142     (*it)->WriteHTMLGraph(output);
    143     output->append("<br><hr><br>");
    144   }
    145 }
    146 
    147 // static
    148 void StatisticsRecorder::WriteGraph(const std::string& query,
    149                                     std::string* output) {
    150   if (!IsActive())
    151     return;
    152   if (query.length())
    153     StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
    154   else
    155     output->append("Collections of all histograms\n");
    156 
    157   Histograms snapshot;
    158   GetSnapshot(query, &snapshot);
    159   for (Histograms::iterator it = snapshot.begin();
    160        it != snapshot.end();
    161        ++it) {
    162     (*it)->WriteAscii(output);
    163     output->append("\n");
    164   }
    165 }
    166 
    167 // static
    168 std::string StatisticsRecorder::ToJSON(const std::string& query) {
    169   if (!IsActive())
    170     return std::string();
    171 
    172   std::string output("{");
    173   if (!query.empty()) {
    174     output += "\"query\":";
    175     EscapeJSONString(query, true, &output);
    176     output += ",";
    177   }
    178 
    179   Histograms snapshot;
    180   GetSnapshot(query, &snapshot);
    181   output += "\"histograms\":[";
    182   bool first_histogram = true;
    183   for (Histograms::const_iterator it = snapshot.begin(); it != snapshot.end();
    184        ++it) {
    185     if (first_histogram)
    186       first_histogram = false;
    187     else
    188       output += ",";
    189     std::string json;
    190     (*it)->WriteJSON(&json);
    191     output += json;
    192   }
    193   output += "]}";
    194   return output;
    195 }
    196 
    197 // static
    198 void StatisticsRecorder::GetHistograms(Histograms* output) {
    199   if (lock_ == NULL)
    200     return;
    201   base::AutoLock auto_lock(*lock_);
    202   if (histograms_ == NULL)
    203     return;
    204 
    205   for (HistogramMap::iterator it = histograms_->begin();
    206        histograms_->end() != it;
    207        ++it) {
    208     DCHECK_EQ(it->first, it->second->histogram_name());
    209     output->push_back(it->second);
    210   }
    211 }
    212 
    213 // static
    214 void StatisticsRecorder::GetBucketRanges(
    215     std::vector<const BucketRanges*>* output) {
    216   if (lock_ == NULL)
    217     return;
    218   base::AutoLock auto_lock(*lock_);
    219   if (ranges_ == NULL)
    220     return;
    221 
    222   for (RangesMap::iterator it = ranges_->begin();
    223        ranges_->end() != it;
    224        ++it) {
    225     list<const BucketRanges*>* ranges_list = it->second;
    226     list<const BucketRanges*>::iterator ranges_list_it;
    227     for (ranges_list_it = ranges_list->begin();
    228          ranges_list_it != ranges_list->end();
    229          ++ranges_list_it) {
    230       output->push_back(*ranges_list_it);
    231     }
    232   }
    233 }
    234 
    235 // static
    236 HistogramBase* StatisticsRecorder::FindHistogram(const std::string& name) {
    237   if (lock_ == NULL)
    238     return NULL;
    239   base::AutoLock auto_lock(*lock_);
    240   if (histograms_ == NULL)
    241     return NULL;
    242 
    243   HistogramMap::iterator it = histograms_->find(name);
    244   if (histograms_->end() == it)
    245     return NULL;
    246   return it->second;
    247 }
    248 
    249 // private static
    250 void StatisticsRecorder::GetSnapshot(const std::string& query,
    251                                      Histograms* snapshot) {
    252   if (lock_ == NULL)
    253     return;
    254   base::AutoLock auto_lock(*lock_);
    255   if (histograms_ == NULL)
    256     return;
    257 
    258   for (HistogramMap::iterator it = histograms_->begin();
    259        histograms_->end() != it;
    260        ++it) {
    261     if (it->first.find(query) != std::string::npos)
    262       snapshot->push_back(it->second);
    263   }
    264 }
    265 
    266 // This singleton instance should be started during the single threaded portion
    267 // of main(), and hence it is not thread safe.  It initializes globals to
    268 // provide support for all future calls.
    269 StatisticsRecorder::StatisticsRecorder() {
    270   DCHECK(!histograms_);
    271   if (lock_ == NULL) {
    272     // This will leak on purpose. It's the only way to make sure we won't race
    273     // against the static uninitialization of the module while one of our
    274     // static methods relying on the lock get called at an inappropriate time
    275     // during the termination phase. Since it's a static data member, we will
    276     // leak one per process, which would be similar to the instance allocated
    277     // during static initialization and released only on  process termination.
    278     lock_ = new base::Lock;
    279   }
    280   base::AutoLock auto_lock(*lock_);
    281   histograms_ = new HistogramMap;
    282   ranges_ = new RangesMap;
    283 
    284   if (VLOG_IS_ON(1))
    285     AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
    286 }
    287 
    288 // static
    289 void StatisticsRecorder::DumpHistogramsToVlog(void* instance) {
    290   DCHECK(VLOG_IS_ON(1));
    291 
    292   StatisticsRecorder* me = reinterpret_cast<StatisticsRecorder*>(instance);
    293   string output;
    294   me->WriteGraph(std::string(), &output);
    295   VLOG(1) << output;
    296 }
    297 
    298 StatisticsRecorder::~StatisticsRecorder() {
    299   DCHECK(histograms_ && ranges_ && lock_);
    300 
    301   // Clean up.
    302   scoped_ptr<HistogramMap> histograms_deleter;
    303   scoped_ptr<RangesMap> ranges_deleter;
    304   // We don't delete lock_ on purpose to avoid having to properly protect
    305   // against it going away after we checked for NULL in the static methods.
    306   {
    307     base::AutoLock auto_lock(*lock_);
    308     histograms_deleter.reset(histograms_);
    309     ranges_deleter.reset(ranges_);
    310     histograms_ = NULL;
    311     ranges_ = NULL;
    312   }
    313   // We are going to leak the histograms and the ranges.
    314 }
    315 
    316 
    317 // static
    318 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
    319 // static
    320 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
    321 // static
    322 base::Lock* StatisticsRecorder::lock_ = NULL;
    323 
    324 }  // namespace base
    325