1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "base/tracked_objects.h" 6 7 #include <math.h> 8 #include <stdlib.h> 9 10 #include "base/compiler_specific.h" 11 #include "base/debug/leak_annotations.h" 12 #include "base/format_macros.h" 13 #include "base/memory/scoped_ptr.h" 14 #include "base/port.h" 15 #include "base/process/process_handle.h" 16 #include "base/profiler/alternate_timer.h" 17 #include "base/strings/stringprintf.h" 18 #include "base/third_party/valgrind/memcheck.h" 19 #include "base/threading/thread_restrictions.h" 20 21 using base::TimeDelta; 22 23 namespace tracked_objects { 24 25 namespace { 26 // Flag to compile out almost all of the task tracking code. 27 const bool kTrackAllTaskObjects = true; 28 29 // TODO(jar): Evaluate the perf impact of enabling this. If the perf impact is 30 // negligible, enable by default. 31 // Flag to compile out parent-child link recording. 32 const bool kTrackParentChildLinks = false; 33 34 // When ThreadData is first initialized, should we start in an ACTIVE state to 35 // record all of the startup-time tasks, or should we start up DEACTIVATED, so 36 // that we only record after parsing the command line flag --enable-tracking. 37 // Note that the flag may force either state, so this really controls only the 38 // period of time up until that flag is parsed. If there is no flag seen, then 39 // this state may prevail for much or all of the process lifetime. 40 const ThreadData::Status kInitialStartupState = 41 ThreadData::PROFILING_CHILDREN_ACTIVE; 42 43 // Control whether an alternate time source (Now() function) is supported by 44 // the ThreadData class. This compile time flag should be set to true if we 45 // want other modules (such as a memory allocator, or a thread-specific CPU time 46 // clock) to be able to provide a thread-specific Now() function. Without this 47 // compile-time flag, the code will only support the wall-clock time. This flag 48 // can be flipped to efficiently disable this path (if there is a performance 49 // problem with its presence). 50 static const bool kAllowAlternateTimeSourceHandling = true; 51 52 } // namespace 53 54 //------------------------------------------------------------------------------ 55 // DeathData tallies durations when a death takes place. 56 57 DeathData::DeathData() { 58 Clear(); 59 } 60 61 DeathData::DeathData(int count) { 62 Clear(); 63 count_ = count; 64 } 65 66 // TODO(jar): I need to see if this macro to optimize branching is worth using. 67 // 68 // This macro has no branching, so it is surely fast, and is equivalent to: 69 // if (assign_it) 70 // target = source; 71 // We use a macro rather than a template to force this to inline. 72 // Related code for calculating max is discussed on the web. 73 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ 74 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it)) 75 76 void DeathData::RecordDeath(const int32 queue_duration, 77 const int32 run_duration, 78 int32 random_number) { 79 // We'll just clamp at INT_MAX, but we should note this in the UI as such. 80 if (count_ < INT_MAX) 81 ++count_; 82 queue_duration_sum_ += queue_duration; 83 run_duration_sum_ += run_duration; 84 85 if (queue_duration_max_ < queue_duration) 86 queue_duration_max_ = queue_duration; 87 if (run_duration_max_ < run_duration) 88 run_duration_max_ = run_duration; 89 90 // Take a uniformly distributed sample over all durations ever supplied. 91 // The probability that we (instead) use this new sample is 1/count_. This 92 // results in a completely uniform selection of the sample (at least when we 93 // don't clamp count_... but that should be inconsequentially likely). 94 // We ignore the fact that we correlated our selection of a sample to the run 95 // and queue times (i.e., we used them to generate random_number). 96 CHECK_GT(count_, 0); 97 if (0 == (random_number % count_)) { 98 queue_duration_sample_ = queue_duration; 99 run_duration_sample_ = run_duration; 100 } 101 } 102 103 int DeathData::count() const { return count_; } 104 105 int32 DeathData::run_duration_sum() const { return run_duration_sum_; } 106 107 int32 DeathData::run_duration_max() const { return run_duration_max_; } 108 109 int32 DeathData::run_duration_sample() const { 110 return run_duration_sample_; 111 } 112 113 int32 DeathData::queue_duration_sum() const { 114 return queue_duration_sum_; 115 } 116 117 int32 DeathData::queue_duration_max() const { 118 return queue_duration_max_; 119 } 120 121 int32 DeathData::queue_duration_sample() const { 122 return queue_duration_sample_; 123 } 124 125 void DeathData::ResetMax() { 126 run_duration_max_ = 0; 127 queue_duration_max_ = 0; 128 } 129 130 void DeathData::Clear() { 131 count_ = 0; 132 run_duration_sum_ = 0; 133 run_duration_max_ = 0; 134 run_duration_sample_ = 0; 135 queue_duration_sum_ = 0; 136 queue_duration_max_ = 0; 137 queue_duration_sample_ = 0; 138 } 139 140 //------------------------------------------------------------------------------ 141 DeathDataSnapshot::DeathDataSnapshot() 142 : count(-1), 143 run_duration_sum(-1), 144 run_duration_max(-1), 145 run_duration_sample(-1), 146 queue_duration_sum(-1), 147 queue_duration_max(-1), 148 queue_duration_sample(-1) { 149 } 150 151 DeathDataSnapshot::DeathDataSnapshot( 152 const tracked_objects::DeathData& death_data) 153 : count(death_data.count()), 154 run_duration_sum(death_data.run_duration_sum()), 155 run_duration_max(death_data.run_duration_max()), 156 run_duration_sample(death_data.run_duration_sample()), 157 queue_duration_sum(death_data.queue_duration_sum()), 158 queue_duration_max(death_data.queue_duration_max()), 159 queue_duration_sample(death_data.queue_duration_sample()) { 160 } 161 162 DeathDataSnapshot::~DeathDataSnapshot() { 163 } 164 165 //------------------------------------------------------------------------------ 166 BirthOnThread::BirthOnThread(const Location& location, 167 const ThreadData& current) 168 : location_(location), 169 birth_thread_(¤t) { 170 } 171 172 //------------------------------------------------------------------------------ 173 BirthOnThreadSnapshot::BirthOnThreadSnapshot() { 174 } 175 176 BirthOnThreadSnapshot::BirthOnThreadSnapshot( 177 const tracked_objects::BirthOnThread& birth) 178 : location(birth.location()), 179 thread_name(birth.birth_thread()->thread_name()) { 180 } 181 182 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() { 183 } 184 185 //------------------------------------------------------------------------------ 186 Births::Births(const Location& location, const ThreadData& current) 187 : BirthOnThread(location, current), 188 birth_count_(1) { } 189 190 int Births::birth_count() const { return birth_count_; } 191 192 void Births::RecordBirth() { ++birth_count_; } 193 194 void Births::ForgetBirth() { --birth_count_; } 195 196 void Births::Clear() { birth_count_ = 0; } 197 198 //------------------------------------------------------------------------------ 199 // ThreadData maintains the central data for all births and deaths on a single 200 // thread. 201 202 // TODO(jar): We should pull all these static vars together, into a struct, and 203 // optimize layout so that we benefit from locality of reference during accesses 204 // to them. 205 206 // static 207 NowFunction* ThreadData::now_function_ = NULL; 208 209 // A TLS slot which points to the ThreadData instance for the current thread. We 210 // do a fake initialization here (zeroing out data), and then the real in-place 211 // construction happens when we call tls_index_.Initialize(). 212 // static 213 base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER; 214 215 // static 216 int ThreadData::worker_thread_data_creation_count_ = 0; 217 218 // static 219 int ThreadData::cleanup_count_ = 0; 220 221 // static 222 int ThreadData::incarnation_counter_ = 0; 223 224 // static 225 ThreadData* ThreadData::all_thread_data_list_head_ = NULL; 226 227 // static 228 ThreadData* ThreadData::first_retired_worker_ = NULL; 229 230 // static 231 base::LazyInstance<base::Lock>::Leaky 232 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER; 233 234 // static 235 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; 236 237 ThreadData::ThreadData(const std::string& suggested_name) 238 : next_(NULL), 239 next_retired_worker_(NULL), 240 worker_thread_number_(0), 241 incarnation_count_for_pool_(-1) { 242 DCHECK_GE(suggested_name.size(), 0u); 243 thread_name_ = suggested_name; 244 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. 245 } 246 247 ThreadData::ThreadData(int thread_number) 248 : next_(NULL), 249 next_retired_worker_(NULL), 250 worker_thread_number_(thread_number), 251 incarnation_count_for_pool_(-1) { 252 CHECK_GT(thread_number, 0); 253 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); 254 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_. 255 } 256 257 ThreadData::~ThreadData() {} 258 259 void ThreadData::PushToHeadOfList() { 260 // Toss in a hint of randomness (atop the uniniitalized value). 261 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_, 262 sizeof(random_number_)); 263 MSAN_UNPOISON(&random_number_, sizeof(random_number_)); 264 random_number_ += static_cast<int32>(this - static_cast<ThreadData*>(0)); 265 random_number_ ^= (Now() - TrackedTime()).InMilliseconds(); 266 267 DCHECK(!next_); 268 base::AutoLock lock(*list_lock_.Pointer()); 269 incarnation_count_for_pool_ = incarnation_counter_; 270 next_ = all_thread_data_list_head_; 271 all_thread_data_list_head_ = this; 272 } 273 274 // static 275 ThreadData* ThreadData::first() { 276 base::AutoLock lock(*list_lock_.Pointer()); 277 return all_thread_data_list_head_; 278 } 279 280 ThreadData* ThreadData::next() const { return next_; } 281 282 // static 283 void ThreadData::InitializeThreadContext(const std::string& suggested_name) { 284 if (!Initialize()) // Always initialize if needed. 285 return; 286 ThreadData* current_thread_data = 287 reinterpret_cast<ThreadData*>(tls_index_.Get()); 288 if (current_thread_data) 289 return; // Browser tests instigate this. 290 current_thread_data = new ThreadData(suggested_name); 291 tls_index_.Set(current_thread_data); 292 } 293 294 // static 295 ThreadData* ThreadData::Get() { 296 if (!tls_index_.initialized()) 297 return NULL; // For unittests only. 298 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get()); 299 if (registered) 300 return registered; 301 302 // We must be a worker thread, since we didn't pre-register. 303 ThreadData* worker_thread_data = NULL; 304 int worker_thread_number = 0; 305 { 306 base::AutoLock lock(*list_lock_.Pointer()); 307 if (first_retired_worker_) { 308 worker_thread_data = first_retired_worker_; 309 first_retired_worker_ = first_retired_worker_->next_retired_worker_; 310 worker_thread_data->next_retired_worker_ = NULL; 311 } else { 312 worker_thread_number = ++worker_thread_data_creation_count_; 313 } 314 } 315 316 // If we can't find a previously used instance, then we have to create one. 317 if (!worker_thread_data) { 318 DCHECK_GT(worker_thread_number, 0); 319 worker_thread_data = new ThreadData(worker_thread_number); 320 } 321 DCHECK_GT(worker_thread_data->worker_thread_number_, 0); 322 323 tls_index_.Set(worker_thread_data); 324 return worker_thread_data; 325 } 326 327 // static 328 void ThreadData::OnThreadTermination(void* thread_data) { 329 DCHECK(thread_data); // TLS should *never* call us with a NULL. 330 // We must NOT do any allocations during this callback. There is a chance 331 // that the allocator is no longer active on this thread. 332 if (!kTrackAllTaskObjects) 333 return; // Not compiled in. 334 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup(); 335 } 336 337 void ThreadData::OnThreadTerminationCleanup() { 338 // The list_lock_ was created when we registered the callback, so it won't be 339 // allocated here despite the lazy reference. 340 base::AutoLock lock(*list_lock_.Pointer()); 341 if (incarnation_counter_ != incarnation_count_for_pool_) 342 return; // ThreadData was constructed in an earlier unit test. 343 ++cleanup_count_; 344 // Only worker threads need to be retired and reused. 345 if (!worker_thread_number_) { 346 return; 347 } 348 // We must NOT do any allocations during this callback. 349 // Using the simple linked lists avoids all allocations. 350 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL)); 351 this->next_retired_worker_ = first_retired_worker_; 352 first_retired_worker_ = this; 353 } 354 355 // static 356 void ThreadData::Snapshot(bool reset_max, ProcessDataSnapshot* process_data) { 357 // Add births that have run to completion to |collected_data|. 358 // |birth_counts| tracks the total number of births recorded at each location 359 // for which we have not seen a death count. 360 BirthCountMap birth_counts; 361 ThreadData::SnapshotAllExecutedTasks(reset_max, process_data, &birth_counts); 362 363 // Add births that are still active -- i.e. objects that have tallied a birth, 364 // but have not yet tallied a matching death, and hence must be either 365 // running, queued up, or being held in limbo for future posting. 366 for (BirthCountMap::const_iterator it = birth_counts.begin(); 367 it != birth_counts.end(); ++it) { 368 if (it->second > 0) { 369 process_data->tasks.push_back( 370 TaskSnapshot(*it->first, DeathData(it->second), "Still_Alive")); 371 } 372 } 373 } 374 375 Births* ThreadData::TallyABirth(const Location& location) { 376 BirthMap::iterator it = birth_map_.find(location); 377 Births* child; 378 if (it != birth_map_.end()) { 379 child = it->second; 380 child->RecordBirth(); 381 } else { 382 child = new Births(location, *this); // Leak this. 383 // Lock since the map may get relocated now, and other threads sometimes 384 // snapshot it (but they lock before copying it). 385 base::AutoLock lock(map_lock_); 386 birth_map_[location] = child; 387 } 388 389 if (kTrackParentChildLinks && status_ > PROFILING_ACTIVE && 390 !parent_stack_.empty()) { 391 const Births* parent = parent_stack_.top(); 392 ParentChildPair pair(parent, child); 393 if (parent_child_set_.find(pair) == parent_child_set_.end()) { 394 // Lock since the map may get relocated now, and other threads sometimes 395 // snapshot it (but they lock before copying it). 396 base::AutoLock lock(map_lock_); 397 parent_child_set_.insert(pair); 398 } 399 } 400 401 return child; 402 } 403 404 void ThreadData::TallyADeath(const Births& birth, 405 int32 queue_duration, 406 int32 run_duration) { 407 // Stir in some randomness, plus add constant in case durations are zero. 408 const int32 kSomePrimeNumber = 2147483647; 409 random_number_ += queue_duration + run_duration + kSomePrimeNumber; 410 // An address is going to have some randomness to it as well ;-). 411 random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0)); 412 413 // We don't have queue durations without OS timer. OS timer is automatically 414 // used for task-post-timing, so the use of an alternate timer implies all 415 // queue times are invalid. 416 if (kAllowAlternateTimeSourceHandling && now_function_) 417 queue_duration = 0; 418 419 DeathMap::iterator it = death_map_.find(&birth); 420 DeathData* death_data; 421 if (it != death_map_.end()) { 422 death_data = &it->second; 423 } else { 424 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. 425 death_data = &death_map_[&birth]; 426 } // Release lock ASAP. 427 death_data->RecordDeath(queue_duration, run_duration, random_number_); 428 429 if (!kTrackParentChildLinks) 430 return; 431 if (!parent_stack_.empty()) { // We might get turned off. 432 DCHECK_EQ(parent_stack_.top(), &birth); 433 parent_stack_.pop(); 434 } 435 } 436 437 // static 438 Births* ThreadData::TallyABirthIfActive(const Location& location) { 439 if (!kTrackAllTaskObjects) 440 return NULL; // Not compiled in. 441 442 if (!TrackingStatus()) 443 return NULL; 444 ThreadData* current_thread_data = Get(); 445 if (!current_thread_data) 446 return NULL; 447 return current_thread_data->TallyABirth(location); 448 } 449 450 // static 451 void ThreadData::TallyRunOnNamedThreadIfTracking( 452 const base::TrackingInfo& completed_task, 453 const TrackedTime& start_of_run, 454 const TrackedTime& end_of_run) { 455 if (!kTrackAllTaskObjects) 456 return; // Not compiled in. 457 458 // Even if we have been DEACTIVATED, we will process any pending births so 459 // that our data structures (which counted the outstanding births) remain 460 // consistent. 461 const Births* birth = completed_task.birth_tally; 462 if (!birth) 463 return; 464 ThreadData* current_thread_data = Get(); 465 if (!current_thread_data) 466 return; 467 468 // Watch out for a race where status_ is changing, and hence one or both 469 // of start_of_run or end_of_run is zero. In that case, we didn't bother to 470 // get a time value since we "weren't tracking" and we were trying to be 471 // efficient by not calling for a genuine time value. For simplicity, we'll 472 // use a default zero duration when we can't calculate a true value. 473 int32 queue_duration = 0; 474 int32 run_duration = 0; 475 if (!start_of_run.is_null()) { 476 queue_duration = (start_of_run - completed_task.EffectiveTimePosted()) 477 .InMilliseconds(); 478 if (!end_of_run.is_null()) 479 run_duration = (end_of_run - start_of_run).InMilliseconds(); 480 } 481 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); 482 } 483 484 // static 485 void ThreadData::TallyRunOnWorkerThreadIfTracking( 486 const Births* birth, 487 const TrackedTime& time_posted, 488 const TrackedTime& start_of_run, 489 const TrackedTime& end_of_run) { 490 if (!kTrackAllTaskObjects) 491 return; // Not compiled in. 492 493 // Even if we have been DEACTIVATED, we will process any pending births so 494 // that our data structures (which counted the outstanding births) remain 495 // consistent. 496 if (!birth) 497 return; 498 499 // TODO(jar): Support the option to coalesce all worker-thread activity under 500 // one ThreadData instance that uses locks to protect *all* access. This will 501 // reduce memory (making it provably bounded), but run incrementally slower 502 // (since we'll use locks on TallyABirth and TallyADeath). The good news is 503 // that the locks on TallyADeath will be *after* the worker thread has run, 504 // and hence nothing will be waiting for the completion (... besides some 505 // other thread that might like to run). Also, the worker threads tasks are 506 // generally longer, and hence the cost of the lock may perchance be amortized 507 // over the long task's lifetime. 508 ThreadData* current_thread_data = Get(); 509 if (!current_thread_data) 510 return; 511 512 int32 queue_duration = 0; 513 int32 run_duration = 0; 514 if (!start_of_run.is_null()) { 515 queue_duration = (start_of_run - time_posted).InMilliseconds(); 516 if (!end_of_run.is_null()) 517 run_duration = (end_of_run - start_of_run).InMilliseconds(); 518 } 519 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); 520 } 521 522 // static 523 void ThreadData::TallyRunInAScopedRegionIfTracking( 524 const Births* birth, 525 const TrackedTime& start_of_run, 526 const TrackedTime& end_of_run) { 527 if (!kTrackAllTaskObjects) 528 return; // Not compiled in. 529 530 // Even if we have been DEACTIVATED, we will process any pending births so 531 // that our data structures (which counted the outstanding births) remain 532 // consistent. 533 if (!birth) 534 return; 535 536 ThreadData* current_thread_data = Get(); 537 if (!current_thread_data) 538 return; 539 540 int32 queue_duration = 0; 541 int32 run_duration = 0; 542 if (!start_of_run.is_null() && !end_of_run.is_null()) 543 run_duration = (end_of_run - start_of_run).InMilliseconds(); 544 current_thread_data->TallyADeath(*birth, queue_duration, run_duration); 545 } 546 547 // static 548 void ThreadData::SnapshotAllExecutedTasks(bool reset_max, 549 ProcessDataSnapshot* process_data, 550 BirthCountMap* birth_counts) { 551 if (!kTrackAllTaskObjects) 552 return; // Not compiled in. 553 554 // Get an unchanging copy of a ThreadData list. 555 ThreadData* my_list = ThreadData::first(); 556 557 // Gather data serially. 558 // This hackish approach *can* get some slighly corrupt tallies, as we are 559 // grabbing values without the protection of a lock, but it has the advantage 560 // of working even with threads that don't have message loops. If a user 561 // sees any strangeness, they can always just run their stats gathering a 562 // second time. 563 for (ThreadData* thread_data = my_list; 564 thread_data; 565 thread_data = thread_data->next()) { 566 thread_data->SnapshotExecutedTasks(reset_max, process_data, birth_counts); 567 } 568 } 569 570 void ThreadData::SnapshotExecutedTasks(bool reset_max, 571 ProcessDataSnapshot* process_data, 572 BirthCountMap* birth_counts) { 573 // Get copy of data, so that the data will not change during the iterations 574 // and processing. 575 ThreadData::BirthMap birth_map; 576 ThreadData::DeathMap death_map; 577 ThreadData::ParentChildSet parent_child_set; 578 SnapshotMaps(reset_max, &birth_map, &death_map, &parent_child_set); 579 580 for (ThreadData::DeathMap::const_iterator it = death_map.begin(); 581 it != death_map.end(); ++it) { 582 process_data->tasks.push_back( 583 TaskSnapshot(*it->first, it->second, thread_name())); 584 (*birth_counts)[it->first] -= it->first->birth_count(); 585 } 586 587 for (ThreadData::BirthMap::const_iterator it = birth_map.begin(); 588 it != birth_map.end(); ++it) { 589 (*birth_counts)[it->second] += it->second->birth_count(); 590 } 591 592 if (!kTrackParentChildLinks) 593 return; 594 595 for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin(); 596 it != parent_child_set.end(); ++it) { 597 process_data->descendants.push_back(ParentChildPairSnapshot(*it)); 598 } 599 } 600 601 // This may be called from another thread. 602 void ThreadData::SnapshotMaps(bool reset_max, 603 BirthMap* birth_map, 604 DeathMap* death_map, 605 ParentChildSet* parent_child_set) { 606 base::AutoLock lock(map_lock_); 607 for (BirthMap::const_iterator it = birth_map_.begin(); 608 it != birth_map_.end(); ++it) 609 (*birth_map)[it->first] = it->second; 610 for (DeathMap::iterator it = death_map_.begin(); 611 it != death_map_.end(); ++it) { 612 (*death_map)[it->first] = it->second; 613 if (reset_max) 614 it->second.ResetMax(); 615 } 616 617 if (!kTrackParentChildLinks) 618 return; 619 620 for (ParentChildSet::iterator it = parent_child_set_.begin(); 621 it != parent_child_set_.end(); ++it) 622 parent_child_set->insert(*it); 623 } 624 625 // static 626 void ThreadData::ResetAllThreadData() { 627 ThreadData* my_list = first(); 628 629 for (ThreadData* thread_data = my_list; 630 thread_data; 631 thread_data = thread_data->next()) 632 thread_data->Reset(); 633 } 634 635 void ThreadData::Reset() { 636 base::AutoLock lock(map_lock_); 637 for (DeathMap::iterator it = death_map_.begin(); 638 it != death_map_.end(); ++it) 639 it->second.Clear(); 640 for (BirthMap::iterator it = birth_map_.begin(); 641 it != birth_map_.end(); ++it) 642 it->second->Clear(); 643 } 644 645 static void OptionallyInitializeAlternateTimer() { 646 NowFunction* alternate_time_source = GetAlternateTimeSource(); 647 if (alternate_time_source) 648 ThreadData::SetAlternateTimeSource(alternate_time_source); 649 } 650 651 bool ThreadData::Initialize() { 652 if (!kTrackAllTaskObjects) 653 return false; // Not compiled in. 654 if (status_ >= DEACTIVATED) 655 return true; // Someone else did the initialization. 656 // Due to racy lazy initialization in tests, we'll need to recheck status_ 657 // after we acquire the lock. 658 659 // Ensure that we don't double initialize tls. We are called when single 660 // threaded in the product, but some tests may be racy and lazy about our 661 // initialization. 662 base::AutoLock lock(*list_lock_.Pointer()); 663 if (status_ >= DEACTIVATED) 664 return true; // Someone raced in here and beat us. 665 666 // Put an alternate timer in place if the environment calls for it, such as 667 // for tracking TCMalloc allocations. This insertion is idempotent, so we 668 // don't mind if there is a race, and we'd prefer not to be in a lock while 669 // doing this work. 670 if (kAllowAlternateTimeSourceHandling) 671 OptionallyInitializeAlternateTimer(); 672 673 // Perform the "real" TLS initialization now, and leave it intact through 674 // process termination. 675 if (!tls_index_.initialized()) { // Testing may have initialized this. 676 DCHECK_EQ(status_, UNINITIALIZED); 677 tls_index_.Initialize(&ThreadData::OnThreadTermination); 678 if (!tls_index_.initialized()) 679 return false; 680 } else { 681 // TLS was initialzed for us earlier. 682 DCHECK_EQ(status_, DORMANT_DURING_TESTS); 683 } 684 685 // Incarnation counter is only significant to testing, as it otherwise will 686 // never again change in this process. 687 ++incarnation_counter_; 688 689 // The lock is not critical for setting status_, but it doesn't hurt. It also 690 // ensures that if we have a racy initialization, that we'll bail as soon as 691 // we get the lock earlier in this method. 692 status_ = kInitialStartupState; 693 if (!kTrackParentChildLinks && 694 kInitialStartupState == PROFILING_CHILDREN_ACTIVE) 695 status_ = PROFILING_ACTIVE; 696 DCHECK(status_ != UNINITIALIZED); 697 return true; 698 } 699 700 // static 701 bool ThreadData::InitializeAndSetTrackingStatus(Status status) { 702 DCHECK_GE(status, DEACTIVATED); 703 DCHECK_LE(status, PROFILING_CHILDREN_ACTIVE); 704 705 if (!Initialize()) // No-op if already initialized. 706 return false; // Not compiled in. 707 708 if (!kTrackParentChildLinks && status > DEACTIVATED) 709 status = PROFILING_ACTIVE; 710 status_ = status; 711 return true; 712 } 713 714 // static 715 ThreadData::Status ThreadData::status() { 716 return status_; 717 } 718 719 // static 720 bool ThreadData::TrackingStatus() { 721 return status_ > DEACTIVATED; 722 } 723 724 // static 725 bool ThreadData::TrackingParentChildStatus() { 726 return status_ >= PROFILING_CHILDREN_ACTIVE; 727 } 728 729 // static 730 TrackedTime ThreadData::NowForStartOfRun(const Births* parent) { 731 if (kTrackParentChildLinks && parent && status_ > PROFILING_ACTIVE) { 732 ThreadData* current_thread_data = Get(); 733 if (current_thread_data) 734 current_thread_data->parent_stack_.push(parent); 735 } 736 return Now(); 737 } 738 739 // static 740 TrackedTime ThreadData::NowForEndOfRun() { 741 return Now(); 742 } 743 744 // static 745 void ThreadData::SetAlternateTimeSource(NowFunction* now_function) { 746 DCHECK(now_function); 747 if (kAllowAlternateTimeSourceHandling) 748 now_function_ = now_function; 749 } 750 751 // static 752 TrackedTime ThreadData::Now() { 753 if (kAllowAlternateTimeSourceHandling && now_function_) 754 return TrackedTime::FromMilliseconds((*now_function_)()); 755 if (kTrackAllTaskObjects && TrackingStatus()) 756 return TrackedTime::Now(); 757 return TrackedTime(); // Super fast when disabled, or not compiled. 758 } 759 760 // static 761 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) { 762 base::AutoLock lock(*list_lock_.Pointer()); 763 if (worker_thread_data_creation_count_ == 0) 764 return; // We haven't really run much, and couldn't have leaked. 765 // Verify that we've at least shutdown/cleanup the major namesd threads. The 766 // caller should tell us how many thread shutdowns should have taken place by 767 // now. 768 return; // TODO(jar): until this is working on XP, don't run the real test. 769 CHECK_GT(cleanup_count_, major_threads_shutdown_count); 770 } 771 772 // static 773 void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { 774 // This is only called from test code, where we need to cleanup so that 775 // additional tests can be run. 776 // We must be single threaded... but be careful anyway. 777 if (!InitializeAndSetTrackingStatus(DEACTIVATED)) 778 return; 779 ThreadData* thread_data_list; 780 { 781 base::AutoLock lock(*list_lock_.Pointer()); 782 thread_data_list = all_thread_data_list_head_; 783 all_thread_data_list_head_ = NULL; 784 ++incarnation_counter_; 785 // To be clean, break apart the retired worker list (though we leak them). 786 while (first_retired_worker_) { 787 ThreadData* worker = first_retired_worker_; 788 CHECK_GT(worker->worker_thread_number_, 0); 789 first_retired_worker_ = worker->next_retired_worker_; 790 worker->next_retired_worker_ = NULL; 791 } 792 } 793 794 // Put most global static back in pristine shape. 795 worker_thread_data_creation_count_ = 0; 796 cleanup_count_ = 0; 797 tls_index_.Set(NULL); 798 status_ = DORMANT_DURING_TESTS; // Almost UNINITIALIZED. 799 800 // To avoid any chance of racing in unit tests, which is the only place we 801 // call this function, we may sometimes leak all the data structures we 802 // recovered, as they may still be in use on threads from prior tests! 803 if (leak) { 804 ThreadData* thread_data = thread_data_list; 805 while (thread_data) { 806 ANNOTATE_LEAKING_OBJECT_PTR(thread_data); 807 thread_data = thread_data->next(); 808 } 809 return; 810 } 811 812 // When we want to cleanup (on a single thread), here is what we do. 813 814 // Do actual recursive delete in all ThreadData instances. 815 while (thread_data_list) { 816 ThreadData* next_thread_data = thread_data_list; 817 thread_data_list = thread_data_list->next(); 818 819 for (BirthMap::iterator it = next_thread_data->birth_map_.begin(); 820 next_thread_data->birth_map_.end() != it; ++it) 821 delete it->second; // Delete the Birth Records. 822 delete next_thread_data; // Includes all Death Records. 823 } 824 } 825 826 //------------------------------------------------------------------------------ 827 TaskSnapshot::TaskSnapshot() { 828 } 829 830 TaskSnapshot::TaskSnapshot(const BirthOnThread& birth, 831 const DeathData& death_data, 832 const std::string& death_thread_name) 833 : birth(birth), 834 death_data(death_data), 835 death_thread_name(death_thread_name) { 836 } 837 838 TaskSnapshot::~TaskSnapshot() { 839 } 840 841 //------------------------------------------------------------------------------ 842 // ParentChildPairSnapshot 843 844 ParentChildPairSnapshot::ParentChildPairSnapshot() { 845 } 846 847 ParentChildPairSnapshot::ParentChildPairSnapshot( 848 const ThreadData::ParentChildPair& parent_child) 849 : parent(*parent_child.first), 850 child(*parent_child.second) { 851 } 852 853 ParentChildPairSnapshot::~ParentChildPairSnapshot() { 854 } 855 856 //------------------------------------------------------------------------------ 857 // ProcessDataSnapshot 858 859 ProcessDataSnapshot::ProcessDataSnapshot() 860 #if !defined(OS_NACL) 861 : process_id(base::GetCurrentProcId()) { 862 #else 863 : process_id(0) { 864 #endif 865 } 866 867 ProcessDataSnapshot::~ProcessDataSnapshot() { 868 } 869 870 } // namespace tracked_objects 871