1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "base/metrics/statistics_recorder.h" 6 7 #include <memory> 8 9 #include "base/at_exit.h" 10 #include "base/debug/leak_annotations.h" 11 #include "base/json/string_escape.h" 12 #include "base/logging.h" 13 #include "base/memory/ptr_util.h" 14 #include "base/metrics/histogram.h" 15 #include "base/metrics/metrics_hashes.h" 16 #include "base/metrics/persistent_histogram_allocator.h" 17 #include "base/stl_util.h" 18 #include "base/strings/stringprintf.h" 19 #include "base/synchronization/lock.h" 20 #include "base/values.h" 21 22 namespace { 23 24 // Initialize histogram statistics gathering system. 25 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ = 26 LAZY_INSTANCE_INITIALIZER; 27 28 bool HistogramNameLesser(const base::HistogramBase* a, 29 const base::HistogramBase* b) { 30 return a->histogram_name() < b->histogram_name(); 31 } 32 33 } // namespace 34 35 namespace base { 36 37 StatisticsRecorder::HistogramIterator::HistogramIterator( 38 const HistogramMap::iterator& iter, bool include_persistent) 39 : iter_(iter), 40 include_persistent_(include_persistent) { 41 // The starting location could point to a persistent histogram when such 42 // is not wanted. If so, skip it. 43 if (!include_persistent_ && iter_ != histograms_->end() && 44 (iter_->second->flags() & HistogramBase::kIsPersistent)) { 45 // This operator will continue to skip until a non-persistent histogram 46 // is found. 47 operator++(); 48 } 49 } 50 51 StatisticsRecorder::HistogramIterator::HistogramIterator( 52 const HistogramIterator& rhs) 53 : iter_(rhs.iter_), 54 include_persistent_(rhs.include_persistent_) { 55 } 56 57 StatisticsRecorder::HistogramIterator::~HistogramIterator() {} 58 59 StatisticsRecorder::HistogramIterator& 60 StatisticsRecorder::HistogramIterator::operator++() { 61 const HistogramMap::iterator histograms_end = histograms_->end(); 62 if (iter_ == histograms_end || lock_ == NULL) 63 return *this; 64 65 base::AutoLock auto_lock(*lock_); 66 67 for (;;) { 68 ++iter_; 69 if (iter_ == histograms_end) 70 break; 71 if (!include_persistent_ && (iter_->second->flags() & 72 HistogramBase::kIsPersistent)) { 73 continue; 74 } 75 break; 76 } 77 78 return *this; 79 } 80 81 StatisticsRecorder::~StatisticsRecorder() { 82 DCHECK(lock_); 83 DCHECK(histograms_); 84 DCHECK(ranges_); 85 86 // Clean out what this object created and then restore what existed before. 87 Reset(); 88 base::AutoLock auto_lock(*lock_); 89 histograms_ = existing_histograms_.release(); 90 callbacks_ = existing_callbacks_.release(); 91 ranges_ = existing_ranges_.release(); 92 } 93 94 // static 95 void StatisticsRecorder::Initialize() { 96 // Ensure that an instance of the StatisticsRecorder object is created. 97 g_statistics_recorder_.Get(); 98 } 99 100 // static 101 bool StatisticsRecorder::IsActive() { 102 if (lock_ == NULL) 103 return false; 104 base::AutoLock auto_lock(*lock_); 105 return NULL != histograms_; 106 } 107 108 // static 109 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate( 110 HistogramBase* histogram) { 111 // As per crbug.com/79322 the histograms are intentionally leaked, so we need 112 // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once 113 // for an object, the duplicates should not be annotated. 114 // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr) 115 // twice if (lock_ == NULL) || (!histograms_). 116 if (lock_ == NULL) { 117 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 118 return histogram; 119 } 120 121 HistogramBase* histogram_to_delete = NULL; 122 HistogramBase* histogram_to_return = NULL; 123 { 124 base::AutoLock auto_lock(*lock_); 125 if (histograms_ == NULL) { 126 histogram_to_return = histogram; 127 } else { 128 const std::string& name = histogram->histogram_name(); 129 HistogramMap::iterator it = histograms_->find(name); 130 if (histograms_->end() == it) { 131 // The StringKey references the name within |histogram| rather than 132 // making a copy. 133 (*histograms_)[name] = histogram; 134 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 135 // If there are callbacks for this histogram, we set the kCallbackExists 136 // flag. 137 auto callback_iterator = callbacks_->find(name); 138 if (callback_iterator != callbacks_->end()) { 139 if (!callback_iterator->second.is_null()) 140 histogram->SetFlags(HistogramBase::kCallbackExists); 141 else 142 histogram->ClearFlags(HistogramBase::kCallbackExists); 143 } 144 histogram_to_return = histogram; 145 } else if (histogram == it->second) { 146 // The histogram was registered before. 147 histogram_to_return = histogram; 148 } else { 149 // We already have one histogram with this name. 150 DCHECK_EQ(histogram->histogram_name(), 151 it->second->histogram_name()) << "hash collision"; 152 histogram_to_return = it->second; 153 histogram_to_delete = histogram; 154 } 155 } 156 } 157 delete histogram_to_delete; 158 return histogram_to_return; 159 } 160 161 // static 162 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges( 163 const BucketRanges* ranges) { 164 DCHECK(ranges->HasValidChecksum()); 165 std::unique_ptr<const BucketRanges> ranges_deleter; 166 167 if (lock_ == NULL) { 168 ANNOTATE_LEAKING_OBJECT_PTR(ranges); 169 return ranges; 170 } 171 172 base::AutoLock auto_lock(*lock_); 173 if (ranges_ == NULL) { 174 ANNOTATE_LEAKING_OBJECT_PTR(ranges); 175 return ranges; 176 } 177 178 std::list<const BucketRanges*>* checksum_matching_list; 179 RangesMap::iterator ranges_it = ranges_->find(ranges->checksum()); 180 if (ranges_->end() == ranges_it) { 181 // Add a new matching list to map. 182 checksum_matching_list = new std::list<const BucketRanges*>(); 183 ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list); 184 (*ranges_)[ranges->checksum()] = checksum_matching_list; 185 } else { 186 checksum_matching_list = ranges_it->second; 187 } 188 189 for (const BucketRanges* existing_ranges : *checksum_matching_list) { 190 if (existing_ranges->Equals(ranges)) { 191 if (existing_ranges == ranges) { 192 return ranges; 193 } else { 194 ranges_deleter.reset(ranges); 195 return existing_ranges; 196 } 197 } 198 } 199 // We haven't found a BucketRanges which has the same ranges. Register the 200 // new BucketRanges. 201 checksum_matching_list->push_front(ranges); 202 return ranges; 203 } 204 205 // static 206 void StatisticsRecorder::WriteHTMLGraph(const std::string& query, 207 std::string* output) { 208 if (!IsActive()) 209 return; 210 211 Histograms snapshot; 212 GetSnapshot(query, &snapshot); 213 std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser); 214 for (const HistogramBase* histogram : snapshot) { 215 histogram->WriteHTMLGraph(output); 216 output->append("<br><hr><br>"); 217 } 218 } 219 220 // static 221 void StatisticsRecorder::WriteGraph(const std::string& query, 222 std::string* output) { 223 if (!IsActive()) 224 return; 225 if (query.length()) 226 StringAppendF(output, "Collections of histograms for %s\n", query.c_str()); 227 else 228 output->append("Collections of all histograms\n"); 229 230 Histograms snapshot; 231 GetSnapshot(query, &snapshot); 232 std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser); 233 for (const HistogramBase* histogram : snapshot) { 234 histogram->WriteAscii(output); 235 output->append("\n"); 236 } 237 } 238 239 // static 240 std::string StatisticsRecorder::ToJSON(const std::string& query) { 241 if (!IsActive()) 242 return std::string(); 243 244 std::string output("{"); 245 if (!query.empty()) { 246 output += "\"query\":"; 247 EscapeJSONString(query, true, &output); 248 output += ","; 249 } 250 251 Histograms snapshot; 252 GetSnapshot(query, &snapshot); 253 output += "\"histograms\":["; 254 bool first_histogram = true; 255 for (const HistogramBase* histogram : snapshot) { 256 if (first_histogram) 257 first_histogram = false; 258 else 259 output += ","; 260 std::string json; 261 histogram->WriteJSON(&json); 262 output += json; 263 } 264 output += "]}"; 265 return output; 266 } 267 268 // static 269 void StatisticsRecorder::GetHistograms(Histograms* output) { 270 if (lock_ == NULL) 271 return; 272 base::AutoLock auto_lock(*lock_); 273 if (histograms_ == NULL) 274 return; 275 276 for (const auto& entry : *histograms_) { 277 output->push_back(entry.second); 278 } 279 } 280 281 // static 282 void StatisticsRecorder::GetBucketRanges( 283 std::vector<const BucketRanges*>* output) { 284 if (lock_ == NULL) 285 return; 286 base::AutoLock auto_lock(*lock_); 287 if (ranges_ == NULL) 288 return; 289 290 for (const auto& entry : *ranges_) { 291 for (auto* range_entry : *entry.second) { 292 output->push_back(range_entry); 293 } 294 } 295 } 296 297 // static 298 HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) { 299 // This must be called *before* the lock is acquired below because it will 300 // call back into this object to register histograms. Those called methods 301 // will acquire the lock at that time. 302 ImportGlobalPersistentHistograms(); 303 304 if (lock_ == NULL) 305 return NULL; 306 base::AutoLock auto_lock(*lock_); 307 if (histograms_ == NULL) 308 return NULL; 309 310 HistogramMap::iterator it = histograms_->find(name); 311 if (histograms_->end() == it) 312 return NULL; 313 return it->second; 314 } 315 316 // static 317 StatisticsRecorder::HistogramIterator StatisticsRecorder::begin( 318 bool include_persistent) { 319 DCHECK(histograms_); 320 ImportGlobalPersistentHistograms(); 321 322 HistogramMap::iterator iter_begin; 323 { 324 base::AutoLock auto_lock(*lock_); 325 iter_begin = histograms_->begin(); 326 } 327 return HistogramIterator(iter_begin, include_persistent); 328 } 329 330 // static 331 StatisticsRecorder::HistogramIterator StatisticsRecorder::end() { 332 HistogramMap::iterator iter_end; 333 { 334 base::AutoLock auto_lock(*lock_); 335 iter_end = histograms_->end(); 336 } 337 return HistogramIterator(iter_end, true); 338 } 339 340 // static 341 void StatisticsRecorder::InitLogOnShutdown() { 342 if (lock_ == nullptr) 343 return; 344 base::AutoLock auto_lock(*lock_); 345 g_statistics_recorder_.Get().InitLogOnShutdownWithoutLock(); 346 } 347 348 // static 349 void StatisticsRecorder::GetSnapshot(const std::string& query, 350 Histograms* snapshot) { 351 if (lock_ == NULL) 352 return; 353 base::AutoLock auto_lock(*lock_); 354 if (histograms_ == NULL) 355 return; 356 357 for (const auto& entry : *histograms_) { 358 if (entry.second->histogram_name().find(query) != std::string::npos) 359 snapshot->push_back(entry.second); 360 } 361 } 362 363 // static 364 bool StatisticsRecorder::SetCallback( 365 const std::string& name, 366 const StatisticsRecorder::OnSampleCallback& cb) { 367 DCHECK(!cb.is_null()); 368 if (lock_ == NULL) 369 return false; 370 base::AutoLock auto_lock(*lock_); 371 if (histograms_ == NULL) 372 return false; 373 374 if (ContainsKey(*callbacks_, name)) 375 return false; 376 callbacks_->insert(std::make_pair(name, cb)); 377 378 auto it = histograms_->find(name); 379 if (it != histograms_->end()) 380 it->second->SetFlags(HistogramBase::kCallbackExists); 381 382 return true; 383 } 384 385 // static 386 void StatisticsRecorder::ClearCallback(const std::string& name) { 387 if (lock_ == NULL) 388 return; 389 base::AutoLock auto_lock(*lock_); 390 if (histograms_ == NULL) 391 return; 392 393 callbacks_->erase(name); 394 395 // We also clear the flag from the histogram (if it exists). 396 auto it = histograms_->find(name); 397 if (it != histograms_->end()) 398 it->second->ClearFlags(HistogramBase::kCallbackExists); 399 } 400 401 // static 402 StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback( 403 const std::string& name) { 404 if (lock_ == NULL) 405 return OnSampleCallback(); 406 base::AutoLock auto_lock(*lock_); 407 if (histograms_ == NULL) 408 return OnSampleCallback(); 409 410 auto callback_iterator = callbacks_->find(name); 411 return callback_iterator != callbacks_->end() ? callback_iterator->second 412 : OnSampleCallback(); 413 } 414 415 // static 416 size_t StatisticsRecorder::GetHistogramCount() { 417 if (!lock_) 418 return 0; 419 420 base::AutoLock auto_lock(*lock_); 421 if (!histograms_) 422 return 0; 423 return histograms_->size(); 424 } 425 426 // static 427 void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) { 428 if (histograms_) 429 histograms_->erase(name); 430 } 431 432 // static 433 std::unique_ptr<StatisticsRecorder> 434 StatisticsRecorder::CreateTemporaryForTesting() { 435 return WrapUnique(new StatisticsRecorder()); 436 } 437 438 // static 439 void StatisticsRecorder::UninitializeForTesting() { 440 // Stop now if it's never been initialized. 441 if (lock_ == NULL || histograms_ == NULL) 442 return; 443 444 // Get the global instance and destruct it. It's held in static memory so 445 // can't "delete" it; call the destructor explicitly. 446 DCHECK(g_statistics_recorder_.private_instance_); 447 g_statistics_recorder_.Get().~StatisticsRecorder(); 448 449 // Now the ugly part. There's no official way to release a LazyInstance once 450 // created so it's necessary to clear out an internal variable which 451 // shouldn't be publicly visible but is for initialization reasons. 452 g_statistics_recorder_.private_instance_ = 0; 453 } 454 455 // static 456 void StatisticsRecorder::ImportGlobalPersistentHistograms() { 457 if (lock_ == NULL) 458 return; 459 460 // Import histograms from known persistent storage. Histograms could have 461 // been added by other processes and they must be fetched and recognized 462 // locally. If the persistent memory segment is not shared between processes, 463 // this call does nothing. 464 GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get(); 465 if (allocator) 466 allocator->ImportHistogramsToStatisticsRecorder(); 467 } 468 469 // This singleton instance should be started during the single threaded portion 470 // of main(), and hence it is not thread safe. It initializes globals to 471 // provide support for all future calls. 472 StatisticsRecorder::StatisticsRecorder() { 473 if (lock_ == NULL) { 474 // This will leak on purpose. It's the only way to make sure we won't race 475 // against the static uninitialization of the module while one of our 476 // static methods relying on the lock get called at an inappropriate time 477 // during the termination phase. Since it's a static data member, we will 478 // leak one per process, which would be similar to the instance allocated 479 // during static initialization and released only on process termination. 480 lock_ = new base::Lock; 481 } 482 483 base::AutoLock auto_lock(*lock_); 484 485 existing_histograms_.reset(histograms_); 486 existing_callbacks_.reset(callbacks_); 487 existing_ranges_.reset(ranges_); 488 489 histograms_ = new HistogramMap; 490 callbacks_ = new CallbackMap; 491 ranges_ = new RangesMap; 492 493 InitLogOnShutdownWithoutLock(); 494 } 495 496 void StatisticsRecorder::InitLogOnShutdownWithoutLock() { 497 if (!vlog_initialized_ && VLOG_IS_ON(1)) { 498 vlog_initialized_ = true; 499 AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this); 500 } 501 } 502 503 // static 504 void StatisticsRecorder::Reset() { 505 // If there's no lock then there is nothing to reset. 506 if (!lock_) 507 return; 508 509 std::unique_ptr<HistogramMap> histograms_deleter; 510 std::unique_ptr<CallbackMap> callbacks_deleter; 511 std::unique_ptr<RangesMap> ranges_deleter; 512 // We don't delete lock_ on purpose to avoid having to properly protect 513 // against it going away after we checked for NULL in the static methods. 514 { 515 base::AutoLock auto_lock(*lock_); 516 histograms_deleter.reset(histograms_); 517 callbacks_deleter.reset(callbacks_); 518 ranges_deleter.reset(ranges_); 519 histograms_ = NULL; 520 callbacks_ = NULL; 521 ranges_ = NULL; 522 } 523 // We are going to leak the histograms and the ranges. 524 } 525 526 // static 527 void StatisticsRecorder::DumpHistogramsToVlog(void* /*instance*/) { 528 std::string output; 529 StatisticsRecorder::WriteGraph(std::string(), &output); 530 VLOG(1) << output; 531 } 532 533 534 // static 535 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL; 536 // static 537 StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = NULL; 538 // static 539 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL; 540 // static 541 base::Lock* StatisticsRecorder::lock_ = NULL; 542 543 } // namespace base 544