Home | History | Annotate | Download | only in base
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/tracked_objects.h"
      6 
      7 #include <limits.h>
      8 #include <stdlib.h>
      9 
     10 #include "base/atomicops.h"
     11 #include "base/base_switches.h"
     12 #include "base/command_line.h"
     13 #include "base/compiler_specific.h"
     14 #include "base/debug/leak_annotations.h"
     15 #include "base/logging.h"
     16 #include "base/process/process_handle.h"
     17 #include "base/profiler/alternate_timer.h"
     18 #include "base/strings/stringprintf.h"
     19 #include "base/third_party/valgrind/memcheck.h"
     20 #include "base/tracking_info.h"
     21 
     22 using base::TimeDelta;
     23 
     24 namespace base {
     25 class TimeDelta;
     26 }
     27 
     28 namespace tracked_objects {
     29 
     30 namespace {
     31 // Flag to compile out almost all of the task tracking code.
     32 const bool kTrackAllTaskObjects = true;
     33 
     34 // TODO(jar): Evaluate the perf impact of enabling this.  If the perf impact is
     35 // negligible, enable by default.
     36 // Flag to compile out parent-child link recording.
     37 const bool kTrackParentChildLinks = false;
     38 
     39 // When ThreadData is first initialized, should we start in an ACTIVE state to
     40 // record all of the startup-time tasks, or should we start up DEACTIVATED, so
     41 // that we only record after parsing the command line flag --enable-tracking.
     42 // Note that the flag may force either state, so this really controls only the
     43 // period of time up until that flag is parsed. If there is no flag seen, then
     44 // this state may prevail for much or all of the process lifetime.
     45 const ThreadData::Status kInitialStartupState =
     46     ThreadData::PROFILING_CHILDREN_ACTIVE;
     47 
     48 // Control whether an alternate time source (Now() function) is supported by
     49 // the ThreadData class.  This compile time flag should be set to true if we
     50 // want other modules (such as a memory allocator, or a thread-specific CPU time
     51 // clock) to be able to provide a thread-specific Now() function.  Without this
     52 // compile-time flag, the code will only support the wall-clock time.  This flag
     53 // can be flipped to efficiently disable this path (if there is a performance
     54 // problem with its presence).
     55 static const bool kAllowAlternateTimeSourceHandling = true;
     56 
     57 inline bool IsProfilerTimingEnabled() {
     58   enum {
     59     UNDEFINED_TIMING,
     60     ENABLED_TIMING,
     61     DISABLED_TIMING,
     62   };
     63   static base::subtle::Atomic32 timing_enabled = UNDEFINED_TIMING;
     64   // Reading |timing_enabled| is done without barrier because multiple
     65   // initialization is not an issue while the barrier can be relatively costly
     66   // given that this method is sometimes called in a tight loop.
     67   base::subtle::Atomic32 current_timing_enabled =
     68       base::subtle::NoBarrier_Load(&timing_enabled);
     69   if (current_timing_enabled == UNDEFINED_TIMING) {
     70     if (!CommandLine::InitializedForCurrentProcess())
     71       return true;
     72     current_timing_enabled =
     73         (CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
     74              switches::kProfilerTiming) ==
     75          switches::kProfilerTimingDisabledValue)
     76             ? DISABLED_TIMING
     77             : ENABLED_TIMING;
     78     base::subtle::NoBarrier_Store(&timing_enabled, current_timing_enabled);
     79   }
     80   return current_timing_enabled == ENABLED_TIMING;
     81 }
     82 
     83 }  // namespace
     84 
     85 //------------------------------------------------------------------------------
     86 // DeathData tallies durations when a death takes place.
     87 
     88 DeathData::DeathData() {
     89   Clear();
     90 }
     91 
     92 DeathData::DeathData(int count) {
     93   Clear();
     94   count_ = count;
     95 }
     96 
     97 // TODO(jar): I need to see if this macro to optimize branching is worth using.
     98 //
     99 // This macro has no branching, so it is surely fast, and is equivalent to:
    100 //             if (assign_it)
    101 //               target = source;
    102 // We use a macro rather than a template to force this to inline.
    103 // Related code for calculating max is discussed on the web.
    104 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
    105     ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it))
    106 
    107 void DeathData::RecordDeath(const int32 queue_duration,
    108                             const int32 run_duration,
    109                             int32 random_number) {
    110   // We'll just clamp at INT_MAX, but we should note this in the UI as such.
    111   if (count_ < INT_MAX)
    112     ++count_;
    113   queue_duration_sum_ += queue_duration;
    114   run_duration_sum_ += run_duration;
    115 
    116   if (queue_duration_max_ < queue_duration)
    117     queue_duration_max_ = queue_duration;
    118   if (run_duration_max_ < run_duration)
    119     run_duration_max_ = run_duration;
    120 
    121   // Take a uniformly distributed sample over all durations ever supplied.
    122   // The probability that we (instead) use this new sample is 1/count_.  This
    123   // results in a completely uniform selection of the sample (at least when we
    124   // don't clamp count_... but that should be inconsequentially likely).
    125   // We ignore the fact that we correlated our selection of a sample to the run
    126   // and queue times (i.e., we used them to generate random_number).
    127   CHECK_GT(count_, 0);
    128   if (0 == (random_number % count_)) {
    129     queue_duration_sample_ = queue_duration;
    130     run_duration_sample_ = run_duration;
    131   }
    132 }
    133 
    134 int DeathData::count() const { return count_; }
    135 
    136 int32 DeathData::run_duration_sum() const { return run_duration_sum_; }
    137 
    138 int32 DeathData::run_duration_max() const { return run_duration_max_; }
    139 
    140 int32 DeathData::run_duration_sample() const {
    141   return run_duration_sample_;
    142 }
    143 
    144 int32 DeathData::queue_duration_sum() const {
    145   return queue_duration_sum_;
    146 }
    147 
    148 int32 DeathData::queue_duration_max() const {
    149   return queue_duration_max_;
    150 }
    151 
    152 int32 DeathData::queue_duration_sample() const {
    153   return queue_duration_sample_;
    154 }
    155 
    156 void DeathData::ResetMax() {
    157   run_duration_max_ = 0;
    158   queue_duration_max_ = 0;
    159 }
    160 
    161 void DeathData::Clear() {
    162   count_ = 0;
    163   run_duration_sum_ = 0;
    164   run_duration_max_ = 0;
    165   run_duration_sample_ = 0;
    166   queue_duration_sum_ = 0;
    167   queue_duration_max_ = 0;
    168   queue_duration_sample_ = 0;
    169 }
    170 
    171 //------------------------------------------------------------------------------
    172 DeathDataSnapshot::DeathDataSnapshot()
    173     : count(-1),
    174       run_duration_sum(-1),
    175       run_duration_max(-1),
    176       run_duration_sample(-1),
    177       queue_duration_sum(-1),
    178       queue_duration_max(-1),
    179       queue_duration_sample(-1) {
    180 }
    181 
    182 DeathDataSnapshot::DeathDataSnapshot(
    183     const tracked_objects::DeathData& death_data)
    184     : count(death_data.count()),
    185       run_duration_sum(death_data.run_duration_sum()),
    186       run_duration_max(death_data.run_duration_max()),
    187       run_duration_sample(death_data.run_duration_sample()),
    188       queue_duration_sum(death_data.queue_duration_sum()),
    189       queue_duration_max(death_data.queue_duration_max()),
    190       queue_duration_sample(death_data.queue_duration_sample()) {
    191 }
    192 
    193 DeathDataSnapshot::~DeathDataSnapshot() {
    194 }
    195 
    196 //------------------------------------------------------------------------------
    197 BirthOnThread::BirthOnThread(const Location& location,
    198                              const ThreadData& current)
    199     : location_(location),
    200       birth_thread_(&current) {
    201 }
    202 
    203 //------------------------------------------------------------------------------
    204 BirthOnThreadSnapshot::BirthOnThreadSnapshot() {
    205 }
    206 
    207 BirthOnThreadSnapshot::BirthOnThreadSnapshot(
    208     const tracked_objects::BirthOnThread& birth)
    209     : location(birth.location()),
    210       thread_name(birth.birth_thread()->thread_name()) {
    211 }
    212 
    213 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() {
    214 }
    215 
    216 //------------------------------------------------------------------------------
    217 Births::Births(const Location& location, const ThreadData& current)
    218     : BirthOnThread(location, current),
    219       birth_count_(1) { }
    220 
    221 int Births::birth_count() const { return birth_count_; }
    222 
    223 void Births::RecordBirth() { ++birth_count_; }
    224 
    225 void Births::ForgetBirth() { --birth_count_; }
    226 
    227 void Births::Clear() { birth_count_ = 0; }
    228 
    229 //------------------------------------------------------------------------------
    230 // ThreadData maintains the central data for all births and deaths on a single
    231 // thread.
    232 
    233 // TODO(jar): We should pull all these static vars together, into a struct, and
    234 // optimize layout so that we benefit from locality of reference during accesses
    235 // to them.
    236 
    237 // static
    238 NowFunction* ThreadData::now_function_ = NULL;
    239 
    240 // A TLS slot which points to the ThreadData instance for the current thread. We
    241 // do a fake initialization here (zeroing out data), and then the real in-place
    242 // construction happens when we call tls_index_.Initialize().
    243 // static
    244 base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER;
    245 
    246 // static
    247 int ThreadData::worker_thread_data_creation_count_ = 0;
    248 
    249 // static
    250 int ThreadData::cleanup_count_ = 0;
    251 
    252 // static
    253 int ThreadData::incarnation_counter_ = 0;
    254 
    255 // static
    256 ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
    257 
    258 // static
    259 ThreadData* ThreadData::first_retired_worker_ = NULL;
    260 
    261 // static
    262 base::LazyInstance<base::Lock>::Leaky
    263     ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER;
    264 
    265 // static
    266 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED;
    267 
    268 ThreadData::ThreadData(const std::string& suggested_name)
    269     : next_(NULL),
    270       next_retired_worker_(NULL),
    271       worker_thread_number_(0),
    272       incarnation_count_for_pool_(-1) {
    273   DCHECK_GE(suggested_name.size(), 0u);
    274   thread_name_ = suggested_name;
    275   PushToHeadOfList();  // Which sets real incarnation_count_for_pool_.
    276 }
    277 
    278 ThreadData::ThreadData(int thread_number)
    279     : next_(NULL),
    280       next_retired_worker_(NULL),
    281       worker_thread_number_(thread_number),
    282       incarnation_count_for_pool_(-1)  {
    283   CHECK_GT(thread_number, 0);
    284   base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
    285   PushToHeadOfList();  // Which sets real incarnation_count_for_pool_.
    286 }
    287 
    288 ThreadData::~ThreadData() {}
    289 
    290 void ThreadData::PushToHeadOfList() {
    291   // Toss in a hint of randomness (atop the uniniitalized value).
    292   (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_,
    293                                                  sizeof(random_number_));
    294   MSAN_UNPOISON(&random_number_, sizeof(random_number_));
    295   random_number_ += static_cast<int32>(this - static_cast<ThreadData*>(0));
    296   random_number_ ^= (Now() - TrackedTime()).InMilliseconds();
    297 
    298   DCHECK(!next_);
    299   base::AutoLock lock(*list_lock_.Pointer());
    300   incarnation_count_for_pool_ = incarnation_counter_;
    301   next_ = all_thread_data_list_head_;
    302   all_thread_data_list_head_ = this;
    303 }
    304 
    305 // static
    306 ThreadData* ThreadData::first() {
    307   base::AutoLock lock(*list_lock_.Pointer());
    308   return all_thread_data_list_head_;
    309 }
    310 
    311 ThreadData* ThreadData::next() const { return next_; }
    312 
    313 // static
    314 void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
    315   if (!Initialize())  // Always initialize if needed.
    316     return;
    317   ThreadData* current_thread_data =
    318       reinterpret_cast<ThreadData*>(tls_index_.Get());
    319   if (current_thread_data)
    320     return;  // Browser tests instigate this.
    321   current_thread_data = new ThreadData(suggested_name);
    322   tls_index_.Set(current_thread_data);
    323 }
    324 
    325 // static
    326 ThreadData* ThreadData::Get() {
    327   if (!tls_index_.initialized())
    328     return NULL;  // For unittests only.
    329   ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get());
    330   if (registered)
    331     return registered;
    332 
    333   // We must be a worker thread, since we didn't pre-register.
    334   ThreadData* worker_thread_data = NULL;
    335   int worker_thread_number = 0;
    336   {
    337     base::AutoLock lock(*list_lock_.Pointer());
    338     if (first_retired_worker_) {
    339       worker_thread_data = first_retired_worker_;
    340       first_retired_worker_ = first_retired_worker_->next_retired_worker_;
    341       worker_thread_data->next_retired_worker_ = NULL;
    342     } else {
    343       worker_thread_number = ++worker_thread_data_creation_count_;
    344     }
    345   }
    346 
    347   // If we can't find a previously used instance, then we have to create one.
    348   if (!worker_thread_data) {
    349     DCHECK_GT(worker_thread_number, 0);
    350     worker_thread_data = new ThreadData(worker_thread_number);
    351   }
    352   DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
    353 
    354   tls_index_.Set(worker_thread_data);
    355   return worker_thread_data;
    356 }
    357 
    358 // static
    359 void ThreadData::OnThreadTermination(void* thread_data) {
    360   DCHECK(thread_data);  // TLS should *never* call us with a NULL.
    361   // We must NOT do any allocations during this callback. There is a chance
    362   // that the allocator is no longer active on this thread.
    363   if (!kTrackAllTaskObjects)
    364     return;  // Not compiled in.
    365   reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup();
    366 }
    367 
    368 void ThreadData::OnThreadTerminationCleanup() {
    369   // The list_lock_ was created when we registered the callback, so it won't be
    370   // allocated here despite the lazy reference.
    371   base::AutoLock lock(*list_lock_.Pointer());
    372   if (incarnation_counter_ != incarnation_count_for_pool_)
    373     return;  // ThreadData was constructed in an earlier unit test.
    374   ++cleanup_count_;
    375   // Only worker threads need to be retired and reused.
    376   if (!worker_thread_number_) {
    377     return;
    378   }
    379   // We must NOT do any allocations during this callback.
    380   // Using the simple linked lists avoids all allocations.
    381   DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
    382   this->next_retired_worker_ = first_retired_worker_;
    383   first_retired_worker_ = this;
    384 }
    385 
    386 // static
    387 void ThreadData::Snapshot(bool reset_max, ProcessDataSnapshot* process_data) {
    388   // Add births that have run to completion to |collected_data|.
    389   // |birth_counts| tracks the total number of births recorded at each location
    390   // for which we have not seen a death count.
    391   BirthCountMap birth_counts;
    392   ThreadData::SnapshotAllExecutedTasks(reset_max, process_data, &birth_counts);
    393 
    394   // Add births that are still active -- i.e. objects that have tallied a birth,
    395   // but have not yet tallied a matching death, and hence must be either
    396   // running, queued up, or being held in limbo for future posting.
    397   for (BirthCountMap::const_iterator it = birth_counts.begin();
    398        it != birth_counts.end(); ++it) {
    399     if (it->second > 0) {
    400       process_data->tasks.push_back(
    401           TaskSnapshot(*it->first, DeathData(it->second), "Still_Alive"));
    402     }
    403   }
    404 }
    405 
    406 Births* ThreadData::TallyABirth(const Location& location) {
    407   BirthMap::iterator it = birth_map_.find(location);
    408   Births* child;
    409   if (it != birth_map_.end()) {
    410     child =  it->second;
    411     child->RecordBirth();
    412   } else {
    413     child = new Births(location, *this);  // Leak this.
    414     // Lock since the map may get relocated now, and other threads sometimes
    415     // snapshot it (but they lock before copying it).
    416     base::AutoLock lock(map_lock_);
    417     birth_map_[location] = child;
    418   }
    419 
    420   if (kTrackParentChildLinks && status_ > PROFILING_ACTIVE &&
    421       !parent_stack_.empty()) {
    422     const Births* parent = parent_stack_.top();
    423     ParentChildPair pair(parent, child);
    424     if (parent_child_set_.find(pair) == parent_child_set_.end()) {
    425       // Lock since the map may get relocated now, and other threads sometimes
    426       // snapshot it (but they lock before copying it).
    427       base::AutoLock lock(map_lock_);
    428       parent_child_set_.insert(pair);
    429     }
    430   }
    431 
    432   return child;
    433 }
    434 
    435 void ThreadData::TallyADeath(const Births& birth,
    436                              int32 queue_duration,
    437                              int32 run_duration) {
    438   // Stir in some randomness, plus add constant in case durations are zero.
    439   const int32 kSomePrimeNumber = 2147483647;
    440   random_number_ += queue_duration + run_duration + kSomePrimeNumber;
    441   // An address is going to have some randomness to it as well ;-).
    442   random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0));
    443 
    444   // We don't have queue durations without OS timer. OS timer is automatically
    445   // used for task-post-timing, so the use of an alternate timer implies all
    446   // queue times are invalid.
    447   if (kAllowAlternateTimeSourceHandling && now_function_)
    448     queue_duration = 0;
    449 
    450   DeathMap::iterator it = death_map_.find(&birth);
    451   DeathData* death_data;
    452   if (it != death_map_.end()) {
    453     death_data = &it->second;
    454   } else {
    455     base::AutoLock lock(map_lock_);  // Lock as the map may get relocated now.
    456     death_data = &death_map_[&birth];
    457   }  // Release lock ASAP.
    458   death_data->RecordDeath(queue_duration, run_duration, random_number_);
    459 
    460   if (!kTrackParentChildLinks)
    461     return;
    462   if (!parent_stack_.empty()) {  // We might get turned off.
    463     DCHECK_EQ(parent_stack_.top(), &birth);
    464     parent_stack_.pop();
    465   }
    466 }
    467 
    468 // static
    469 Births* ThreadData::TallyABirthIfActive(const Location& location) {
    470   if (!kTrackAllTaskObjects)
    471     return NULL;  // Not compiled in.
    472 
    473   if (!TrackingStatus())
    474     return NULL;
    475   ThreadData* current_thread_data = Get();
    476   if (!current_thread_data)
    477     return NULL;
    478   return current_thread_data->TallyABirth(location);
    479 }
    480 
    481 // static
    482 void ThreadData::TallyRunOnNamedThreadIfTracking(
    483     const base::TrackingInfo& completed_task,
    484     const TrackedTime& start_of_run,
    485     const TrackedTime& end_of_run) {
    486   if (!kTrackAllTaskObjects)
    487     return;  // Not compiled in.
    488 
    489   // Even if we have been DEACTIVATED, we will process any pending births so
    490   // that our data structures (which counted the outstanding births) remain
    491   // consistent.
    492   const Births* birth = completed_task.birth_tally;
    493   if (!birth)
    494     return;
    495   ThreadData* current_thread_data = Get();
    496   if (!current_thread_data)
    497     return;
    498 
    499   // Watch out for a race where status_ is changing, and hence one or both
    500   // of start_of_run or end_of_run is zero.  In that case, we didn't bother to
    501   // get a time value since we "weren't tracking" and we were trying to be
    502   // efficient by not calling for a genuine time value. For simplicity, we'll
    503   // use a default zero duration when we can't calculate a true value.
    504   int32 queue_duration = 0;
    505   int32 run_duration = 0;
    506   if (!start_of_run.is_null()) {
    507     queue_duration = (start_of_run - completed_task.EffectiveTimePosted())
    508         .InMilliseconds();
    509     if (!end_of_run.is_null())
    510       run_duration = (end_of_run - start_of_run).InMilliseconds();
    511   }
    512   current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
    513 }
    514 
    515 // static
    516 void ThreadData::TallyRunOnWorkerThreadIfTracking(
    517     const Births* birth,
    518     const TrackedTime& time_posted,
    519     const TrackedTime& start_of_run,
    520     const TrackedTime& end_of_run) {
    521   if (!kTrackAllTaskObjects)
    522     return;  // Not compiled in.
    523 
    524   // Even if we have been DEACTIVATED, we will process any pending births so
    525   // that our data structures (which counted the outstanding births) remain
    526   // consistent.
    527   if (!birth)
    528     return;
    529 
    530   // TODO(jar): Support the option to coalesce all worker-thread activity under
    531   // one ThreadData instance that uses locks to protect *all* access.  This will
    532   // reduce memory (making it provably bounded), but run incrementally slower
    533   // (since we'll use locks on TallyABirth and TallyADeath).  The good news is
    534   // that the locks on TallyADeath will be *after* the worker thread has run,
    535   // and hence nothing will be waiting for the completion (... besides some
    536   // other thread that might like to run).  Also, the worker threads tasks are
    537   // generally longer, and hence the cost of the lock may perchance be amortized
    538   // over the long task's lifetime.
    539   ThreadData* current_thread_data = Get();
    540   if (!current_thread_data)
    541     return;
    542 
    543   int32 queue_duration = 0;
    544   int32 run_duration = 0;
    545   if (!start_of_run.is_null()) {
    546     queue_duration = (start_of_run - time_posted).InMilliseconds();
    547     if (!end_of_run.is_null())
    548       run_duration = (end_of_run - start_of_run).InMilliseconds();
    549   }
    550   current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
    551 }
    552 
    553 // static
    554 void ThreadData::TallyRunInAScopedRegionIfTracking(
    555     const Births* birth,
    556     const TrackedTime& start_of_run,
    557     const TrackedTime& end_of_run) {
    558   if (!kTrackAllTaskObjects)
    559     return;  // Not compiled in.
    560 
    561   // Even if we have been DEACTIVATED, we will process any pending births so
    562   // that our data structures (which counted the outstanding births) remain
    563   // consistent.
    564   if (!birth)
    565     return;
    566 
    567   ThreadData* current_thread_data = Get();
    568   if (!current_thread_data)
    569     return;
    570 
    571   int32 queue_duration = 0;
    572   int32 run_duration = 0;
    573   if (!start_of_run.is_null() && !end_of_run.is_null())
    574     run_duration = (end_of_run - start_of_run).InMilliseconds();
    575   current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
    576 }
    577 
    578 // static
    579 void ThreadData::SnapshotAllExecutedTasks(bool reset_max,
    580                                           ProcessDataSnapshot* process_data,
    581                                           BirthCountMap* birth_counts) {
    582   if (!kTrackAllTaskObjects)
    583     return;  // Not compiled in.
    584 
    585   // Get an unchanging copy of a ThreadData list.
    586   ThreadData* my_list = ThreadData::first();
    587 
    588   // Gather data serially.
    589   // This hackish approach *can* get some slighly corrupt tallies, as we are
    590   // grabbing values without the protection of a lock, but it has the advantage
    591   // of working even with threads that don't have message loops.  If a user
    592   // sees any strangeness, they can always just run their stats gathering a
    593   // second time.
    594   for (ThreadData* thread_data = my_list;
    595        thread_data;
    596        thread_data = thread_data->next()) {
    597     thread_data->SnapshotExecutedTasks(reset_max, process_data, birth_counts);
    598   }
    599 }
    600 
    601 void ThreadData::SnapshotExecutedTasks(bool reset_max,
    602                                        ProcessDataSnapshot* process_data,
    603                                        BirthCountMap* birth_counts) {
    604   // Get copy of data, so that the data will not change during the iterations
    605   // and processing.
    606   ThreadData::BirthMap birth_map;
    607   ThreadData::DeathMap death_map;
    608   ThreadData::ParentChildSet parent_child_set;
    609   SnapshotMaps(reset_max, &birth_map, &death_map, &parent_child_set);
    610 
    611   for (ThreadData::DeathMap::const_iterator it = death_map.begin();
    612        it != death_map.end(); ++it) {
    613     process_data->tasks.push_back(
    614         TaskSnapshot(*it->first, it->second, thread_name()));
    615     (*birth_counts)[it->first] -= it->first->birth_count();
    616   }
    617 
    618   for (ThreadData::BirthMap::const_iterator it = birth_map.begin();
    619        it != birth_map.end(); ++it) {
    620     (*birth_counts)[it->second] += it->second->birth_count();
    621   }
    622 
    623   if (!kTrackParentChildLinks)
    624     return;
    625 
    626   for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin();
    627        it != parent_child_set.end(); ++it) {
    628     process_data->descendants.push_back(ParentChildPairSnapshot(*it));
    629   }
    630 }
    631 
    632 // This may be called from another thread.
    633 void ThreadData::SnapshotMaps(bool reset_max,
    634                               BirthMap* birth_map,
    635                               DeathMap* death_map,
    636                               ParentChildSet* parent_child_set) {
    637   base::AutoLock lock(map_lock_);
    638   for (BirthMap::const_iterator it = birth_map_.begin();
    639        it != birth_map_.end(); ++it)
    640     (*birth_map)[it->first] = it->second;
    641   for (DeathMap::iterator it = death_map_.begin();
    642        it != death_map_.end(); ++it) {
    643     (*death_map)[it->first] = it->second;
    644     if (reset_max)
    645       it->second.ResetMax();
    646   }
    647 
    648   if (!kTrackParentChildLinks)
    649     return;
    650 
    651   for (ParentChildSet::iterator it = parent_child_set_.begin();
    652        it != parent_child_set_.end(); ++it)
    653     parent_child_set->insert(*it);
    654 }
    655 
    656 // static
    657 void ThreadData::ResetAllThreadData() {
    658   ThreadData* my_list = first();
    659 
    660   for (ThreadData* thread_data = my_list;
    661        thread_data;
    662        thread_data = thread_data->next())
    663     thread_data->Reset();
    664 }
    665 
    666 void ThreadData::Reset() {
    667   base::AutoLock lock(map_lock_);
    668   for (DeathMap::iterator it = death_map_.begin();
    669        it != death_map_.end(); ++it)
    670     it->second.Clear();
    671   for (BirthMap::iterator it = birth_map_.begin();
    672        it != birth_map_.end(); ++it)
    673     it->second->Clear();
    674 }
    675 
    676 static void OptionallyInitializeAlternateTimer() {
    677   NowFunction* alternate_time_source = GetAlternateTimeSource();
    678   if (alternate_time_source)
    679     ThreadData::SetAlternateTimeSource(alternate_time_source);
    680 }
    681 
    682 bool ThreadData::Initialize() {
    683   if (!kTrackAllTaskObjects)
    684     return false;  // Not compiled in.
    685   if (status_ >= DEACTIVATED)
    686     return true;  // Someone else did the initialization.
    687   // Due to racy lazy initialization in tests, we'll need to recheck status_
    688   // after we acquire the lock.
    689 
    690   // Ensure that we don't double initialize tls.  We are called when single
    691   // threaded in the product, but some tests may be racy and lazy about our
    692   // initialization.
    693   base::AutoLock lock(*list_lock_.Pointer());
    694   if (status_ >= DEACTIVATED)
    695     return true;  // Someone raced in here and beat us.
    696 
    697   // Put an alternate timer in place if the environment calls for it, such as
    698   // for tracking TCMalloc allocations.  This insertion is idempotent, so we
    699   // don't mind if there is a race, and we'd prefer not to be in a lock while
    700   // doing this work.
    701   if (kAllowAlternateTimeSourceHandling)
    702     OptionallyInitializeAlternateTimer();
    703 
    704   // Perform the "real" TLS initialization now, and leave it intact through
    705   // process termination.
    706   if (!tls_index_.initialized()) {  // Testing may have initialized this.
    707     DCHECK_EQ(status_, UNINITIALIZED);
    708     tls_index_.Initialize(&ThreadData::OnThreadTermination);
    709     if (!tls_index_.initialized())
    710       return false;
    711   } else {
    712     // TLS was initialzed for us earlier.
    713     DCHECK_EQ(status_, DORMANT_DURING_TESTS);
    714   }
    715 
    716   // Incarnation counter is only significant to testing, as it otherwise will
    717   // never again change in this process.
    718   ++incarnation_counter_;
    719 
    720   // The lock is not critical for setting status_, but it doesn't hurt. It also
    721   // ensures that if we have a racy initialization, that we'll bail as soon as
    722   // we get the lock earlier in this method.
    723   status_ = kInitialStartupState;
    724   if (!kTrackParentChildLinks &&
    725       kInitialStartupState == PROFILING_CHILDREN_ACTIVE)
    726     status_ = PROFILING_ACTIVE;
    727   DCHECK(status_ != UNINITIALIZED);
    728   return true;
    729 }
    730 
    731 // static
    732 bool ThreadData::InitializeAndSetTrackingStatus(Status status) {
    733   DCHECK_GE(status, DEACTIVATED);
    734   DCHECK_LE(status, PROFILING_CHILDREN_ACTIVE);
    735 
    736   if (!Initialize())  // No-op if already initialized.
    737     return false;  // Not compiled in.
    738 
    739   if (!kTrackParentChildLinks && status > DEACTIVATED)
    740     status = PROFILING_ACTIVE;
    741   status_ = status;
    742   return true;
    743 }
    744 
    745 // static
    746 ThreadData::Status ThreadData::status() {
    747   return status_;
    748 }
    749 
    750 // static
    751 bool ThreadData::TrackingStatus() {
    752   return status_ > DEACTIVATED;
    753 }
    754 
    755 // static
    756 bool ThreadData::TrackingParentChildStatus() {
    757   return status_ >= PROFILING_CHILDREN_ACTIVE;
    758 }
    759 
    760 // static
    761 TrackedTime ThreadData::NowForStartOfRun(const Births* parent) {
    762   if (kTrackParentChildLinks && parent && status_ > PROFILING_ACTIVE) {
    763     ThreadData* current_thread_data = Get();
    764     if (current_thread_data)
    765       current_thread_data->parent_stack_.push(parent);
    766   }
    767   return Now();
    768 }
    769 
    770 // static
    771 TrackedTime ThreadData::NowForEndOfRun() {
    772   return Now();
    773 }
    774 
    775 // static
    776 void ThreadData::SetAlternateTimeSource(NowFunction* now_function) {
    777   DCHECK(now_function);
    778   if (kAllowAlternateTimeSourceHandling)
    779     now_function_ = now_function;
    780 }
    781 
    782 // static
    783 TrackedTime ThreadData::Now() {
    784   if (kAllowAlternateTimeSourceHandling && now_function_)
    785     return TrackedTime::FromMilliseconds((*now_function_)());
    786   if (kTrackAllTaskObjects && IsProfilerTimingEnabled() && TrackingStatus())
    787     return TrackedTime::Now();
    788   return TrackedTime();  // Super fast when disabled, or not compiled.
    789 }
    790 
    791 // static
    792 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
    793   base::AutoLock lock(*list_lock_.Pointer());
    794   if (worker_thread_data_creation_count_ == 0)
    795     return;  // We haven't really run much, and couldn't have leaked.
    796 
    797   // TODO(jar): until this is working on XP, don't run the real test.
    798 #if 0
    799   // Verify that we've at least shutdown/cleanup the major namesd threads.  The
    800   // caller should tell us how many thread shutdowns should have taken place by
    801   // now.
    802   CHECK_GT(cleanup_count_, major_threads_shutdown_count);
    803 #endif
    804 }
    805 
    806 // static
    807 void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
    808   // This is only called from test code, where we need to cleanup so that
    809   // additional tests can be run.
    810   // We must be single threaded... but be careful anyway.
    811   if (!InitializeAndSetTrackingStatus(DEACTIVATED))
    812     return;
    813   ThreadData* thread_data_list;
    814   {
    815     base::AutoLock lock(*list_lock_.Pointer());
    816     thread_data_list = all_thread_data_list_head_;
    817     all_thread_data_list_head_ = NULL;
    818     ++incarnation_counter_;
    819     // To be clean, break apart the retired worker list (though we leak them).
    820     while (first_retired_worker_) {
    821       ThreadData* worker = first_retired_worker_;
    822       CHECK_GT(worker->worker_thread_number_, 0);
    823       first_retired_worker_ = worker->next_retired_worker_;
    824       worker->next_retired_worker_ = NULL;
    825     }
    826   }
    827 
    828   // Put most global static back in pristine shape.
    829   worker_thread_data_creation_count_ = 0;
    830   cleanup_count_ = 0;
    831   tls_index_.Set(NULL);
    832   status_ = DORMANT_DURING_TESTS;  // Almost UNINITIALIZED.
    833 
    834   // To avoid any chance of racing in unit tests, which is the only place we
    835   // call this function, we may sometimes leak all the data structures we
    836   // recovered, as they may still be in use on threads from prior tests!
    837   if (leak) {
    838     ThreadData* thread_data = thread_data_list;
    839     while (thread_data) {
    840       ANNOTATE_LEAKING_OBJECT_PTR(thread_data);
    841       thread_data = thread_data->next();
    842     }
    843     return;
    844   }
    845 
    846   // When we want to cleanup (on a single thread), here is what we do.
    847 
    848   // Do actual recursive delete in all ThreadData instances.
    849   while (thread_data_list) {
    850     ThreadData* next_thread_data = thread_data_list;
    851     thread_data_list = thread_data_list->next();
    852 
    853     for (BirthMap::iterator it = next_thread_data->birth_map_.begin();
    854          next_thread_data->birth_map_.end() != it; ++it)
    855       delete it->second;  // Delete the Birth Records.
    856     delete next_thread_data;  // Includes all Death Records.
    857   }
    858 }
    859 
    860 //------------------------------------------------------------------------------
    861 TaskSnapshot::TaskSnapshot() {
    862 }
    863 
    864 TaskSnapshot::TaskSnapshot(const BirthOnThread& birth,
    865                            const DeathData& death_data,
    866                            const std::string& death_thread_name)
    867     : birth(birth),
    868       death_data(death_data),
    869       death_thread_name(death_thread_name) {
    870 }
    871 
    872 TaskSnapshot::~TaskSnapshot() {
    873 }
    874 
    875 //------------------------------------------------------------------------------
    876 // ParentChildPairSnapshot
    877 
    878 ParentChildPairSnapshot::ParentChildPairSnapshot() {
    879 }
    880 
    881 ParentChildPairSnapshot::ParentChildPairSnapshot(
    882     const ThreadData::ParentChildPair& parent_child)
    883     : parent(*parent_child.first),
    884       child(*parent_child.second) {
    885 }
    886 
    887 ParentChildPairSnapshot::~ParentChildPairSnapshot() {
    888 }
    889 
    890 //------------------------------------------------------------------------------
    891 // ProcessDataSnapshot
    892 
    893 ProcessDataSnapshot::ProcessDataSnapshot()
    894 #if !defined(OS_NACL)
    895     : process_id(base::GetCurrentProcId()) {
    896 #else
    897     : process_id(0) {
    898 #endif
    899 }
    900 
    901 ProcessDataSnapshot::~ProcessDataSnapshot() {
    902 }
    903 
    904 }  // namespace tracked_objects
    905