Home | History | Annotate | Download | only in base
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef BASE_TRACKED_OBJECTS_H_
      6 #define BASE_TRACKED_OBJECTS_H_
      7 
      8 #include <stdint.h>
      9 
     10 #include <map>
     11 #include <set>
     12 #include <stack>
     13 #include <string>
     14 #include <utility>
     15 #include <vector>
     16 
     17 #include "base/atomicops.h"
     18 #include "base/base_export.h"
     19 #include "base/containers/hash_tables.h"
     20 #include "base/gtest_prod_util.h"
     21 #include "base/lazy_instance.h"
     22 #include "base/location.h"
     23 #include "base/macros.h"
     24 #include "base/process/process_handle.h"
     25 #include "base/profiler/alternate_timer.h"
     26 #include "base/profiler/tracked_time.h"
     27 #include "base/synchronization/lock.h"
     28 #include "base/threading/thread_checker.h"
     29 #include "base/threading/thread_local_storage.h"
     30 
     31 namespace base {
     32 struct TrackingInfo;
     33 }
     34 
     35 // TrackedObjects provides a database of stats about objects (generally Tasks)
     36 // that are tracked.  Tracking means their birth, death, duration, birth thread,
     37 // death thread, and birth place are recorded.  This data is carefully spread
     38 // across a series of objects so that the counts and times can be rapidly
     39 // updated without (usually) having to lock the data, and hence there is usually
     40 // very little contention caused by the tracking.  The data can be viewed via
     41 // the about:profiler URL, with a variety of sorting and filtering choices.
     42 //
     43 // These classes serve as the basis of a profiler of sorts for the Tasks system.
     44 // As a result, design decisions were made to maximize speed, by minimizing
     45 // recurring allocation/deallocation, lock contention and data copying.  In the
     46 // "stable" state, which is reached relatively quickly, there is no separate
     47 // marginal allocation cost associated with construction or destruction of
     48 // tracked objects, no locks are generally employed, and probably the largest
     49 // computational cost is associated with obtaining start and stop times for
     50 // instances as they are created and destroyed.
     51 //
     52 // The following describes the life cycle of tracking an instance.
     53 //
     54 // First off, when the instance is created, the FROM_HERE macro is expanded
     55 // to specify the birth place (file, line, function) where the instance was
     56 // created.  That data is used to create a transient Location instance
     57 // encapsulating the above triple of information.  The strings (like __FILE__)
     58 // are passed around by reference, with the assumption that they are static, and
     59 // will never go away.  This ensures that the strings can be dealt with as atoms
     60 // with great efficiency (i.e., copying of strings is never needed, and
     61 // comparisons for equality can be based on pointer comparisons).
     62 //
     63 // Next, a Births instance is created for use ONLY on the thread where this
     64 // instance was created.  That Births instance records (in a base class
     65 // BirthOnThread) references to the static data provided in a Location instance,
     66 // as well as a pointer specifying the thread on which the birth takes place.
     67 // Hence there is at most one Births instance for each Location on each thread.
     68 // The derived Births class contains slots for recording statistics about all
     69 // instances born at the same location.  Statistics currently include only the
     70 // count of instances constructed.
     71 //
     72 // Since the base class BirthOnThread contains only constant data, it can be
     73 // freely accessed by any thread at any time (i.e., only the statistic needs to
     74 // be handled carefully, and stats are updated exclusively on the birth thread).
     75 //
     76 // For Tasks, having now either constructed or found the Births instance
     77 // described above, a pointer to the Births instance is then recorded into the
     78 // PendingTask structure in MessageLoop.  This fact alone is very useful in
     79 // debugging, when there is a question of where an instance came from.  In
     80 // addition, the birth time is also recorded and used to later evaluate the
     81 // lifetime duration of the whole Task.  As a result of the above embedding, we
     82 // can find out a Task's location of birth, and thread of birth, without using
     83 // any locks, as all that data is constant across the life of the process.
     84 //
     85 // The above work *could* also be done for any other object as well by calling
     86 // TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate.
     87 //
     88 // The amount of memory used in the above data structures depends on how many
     89 // threads there are, and how many Locations of construction there are.
     90 // Fortunately, we don't use memory that is the product of those two counts, but
     91 // rather we only need one Births instance for each thread that constructs an
     92 // instance at a Location.  In many cases, instances are only created on one
     93 // thread, so the memory utilization is actually fairly restrained.
     94 //
     95 // Lastly, when an instance is deleted, the final tallies of statistics are
     96 // carefully accumulated.  That tallying writes into slots (members) in a
     97 // collection of DeathData instances.  For each birth place Location that is
     98 // destroyed on a thread, there is a DeathData instance to record the additional
     99 // death count, as well as accumulate the run-time and queue-time durations for
    100 // the instance as it is destroyed (dies).  By maintaining a single place to
    101 // aggregate this running sum *only* for the given thread, we avoid the need to
    102 // lock such DeathData instances. (i.e., these accumulated stats in a DeathData
    103 // instance are exclusively updated by the singular owning thread).
    104 //
    105 // With the above life cycle description complete, the major remaining detail
    106 // is explaining how each thread maintains a list of DeathData instances, and
    107 // of Births instances, and is able to avoid additional (redundant/unnecessary)
    108 // allocations.
    109 //
    110 // Each thread maintains a list of data items specific to that thread in a
    111 // ThreadData instance (for that specific thread only).  The two critical items
    112 // are lists of DeathData and Births instances.  These lists are maintained in
    113 // STL maps, which are indexed by Location.  As noted earlier, we can compare
    114 // locations very efficiently as we consider the underlying data (file,
    115 // function, line) to be atoms, and hence pointer comparison is used rather than
    116 // (slow) string comparisons.
    117 //
    118 // To provide a mechanism for iterating over all "known threads," which means
    119 // threads that have recorded a birth or a death, we create a singly linked list
    120 // of ThreadData instances.  Each such instance maintains a pointer to the next
    121 // one.  A static member of ThreadData provides a pointer to the first item on
    122 // this global list, and access via that all_thread_data_list_head_ item
    123 // requires the use of the list_lock_.
    124 // When new ThreadData instances is added to the global list, it is pre-pended,
    125 // which ensures that any prior acquisition of the list is valid (i.e., the
    126 // holder can iterate over it without fear of it changing, or the necessity of
    127 // using an additional lock.  Iterations are actually pretty rare (used
    128 // primarily for cleanup, or snapshotting data for display), so this lock has
    129 // very little global performance impact.
    130 //
    131 // The above description tries to define the high performance (run time)
    132 // portions of these classes.  After gathering statistics, calls instigated
    133 // by visiting about:profiler will assemble and aggregate data for display.  The
    134 // following data structures are used for producing such displays.  They are
    135 // not performance critical, and their only major constraint is that they should
    136 // be able to run concurrently with ongoing augmentation of the birth and death
    137 // data.
    138 //
    139 // This header also exports collection of classes that provide "snapshotted"
    140 // representations of the core tracked_objects:: classes.  These snapshotted
    141 // representations are designed for safe transmission of the tracked_objects::
    142 // data across process boundaries.  Each consists of:
    143 // (1) a default constructor, to support the IPC serialization macros,
    144 // (2) a constructor that extracts data from the type being snapshotted, and
    145 // (3) the snapshotted data.
    146 //
    147 // For a given birth location, information about births is spread across data
    148 // structures that are asynchronously changing on various threads.  For
    149 // serialization and display purposes, we need to construct TaskSnapshot
    150 // instances for each combination of birth thread, death thread, and location,
    151 // along with the count of such lifetimes.  We gather such data into a
    152 // TaskSnapshot instances, so that such instances can be sorted and
    153 // aggregated (and remain frozen during our processing).
    154 //
    155 // Profiling consists of phases.  The concrete phase in the sequence of phases
    156 // is identified by its 0-based index.
    157 //
    158 // The ProcessDataPhaseSnapshot struct is a serialized representation of the
    159 // list of ThreadData objects for a process for a concrete profiling phase.  It
    160 // holds a set of TaskSnapshots.  The statistics in a snapshot are gathered
    161 // asynhcronously relative to their ongoing updates.
    162 // It is possible, though highly unlikely, that stats could be incorrectly
    163 // recorded by this process (all data is held in 32 bit ints, but we are not
    164 // atomically collecting all data, so we could have count that does not, for
    165 // example, match with the number of durations we accumulated).  The advantage
    166 // to having fast (non-atomic) updates of the data outweighs the minimal risk of
    167 // a singular corrupt statistic snapshot (only the snapshot could be corrupt,
    168 // not the underlying and ongoing statistic).  In contrast, pointer data that
    169 // is accessed during snapshotting is completely invariant, and hence is
    170 // perfectly acquired (i.e., no potential corruption, and no risk of a bad
    171 // memory reference).
    172 //
    173 // TODO(jar): We can implement a Snapshot system that *tries* to grab the
    174 // snapshots on the source threads *when* they have MessageLoops available
    175 // (worker threads don't have message loops generally, and hence gathering from
    176 // them will continue to be asynchronous).  We had an implementation of this in
    177 // the past, but the difficulty is dealing with message loops being terminated.
    178 // We can *try* to spam the available threads via some message loop proxy to
    179 // achieve this feat, and it *might* be valuable when we are collecting data
    180 // for upload via UMA (where correctness of data may be more significant than
    181 // for a single screen of about:profiler).
    182 //
    183 // TODO(jar): We need to store DataCollections, and provide facilities for
    184 // taking the difference between two gathered DataCollections.  For now, we're
    185 // just adding a hack that Reset()s to zero all counts and stats.  This is also
    186 // done in a slightly thread-unsafe fashion, as the resetting is done
    187 // asynchronously relative to ongoing updates (but all data is 32 bit in size).
    188 // For basic profiling, this will work "most of the time," and should be
    189 // sufficient... but storing away DataCollections is the "right way" to do this.
    190 // We'll accomplish this via JavaScript storage of snapshots, and then we'll
    191 // remove the Reset() methods.  We may also need a short-term-max value in
    192 // DeathData that is reset (as synchronously as possible) during each snapshot.
    193 // This will facilitate displaying a max value for each snapshot period.
    194 
    195 namespace tracked_objects {
    196 
    197 //------------------------------------------------------------------------------
    198 // For a specific thread, and a specific birth place, the collection of all
    199 // death info (with tallies for each death thread, to prevent access conflicts).
    200 class ThreadData;
    201 class BASE_EXPORT BirthOnThread {
    202  public:
    203   BirthOnThread(const Location& location, const ThreadData& current);
    204 
    205   const Location& location() const { return location_; }
    206   const ThreadData* birth_thread() const { return birth_thread_; }
    207 
    208  private:
    209   // File/lineno of birth.  This defines the essence of the task, as the context
    210   // of the birth (construction) often tell what the item is for.  This field
    211   // is const, and hence safe to access from any thread.
    212   const Location location_;
    213 
    214   // The thread that records births into this object.  Only this thread is
    215   // allowed to update birth_count_ (which changes over time).
    216   const ThreadData* const birth_thread_;
    217 
    218   DISALLOW_COPY_AND_ASSIGN(BirthOnThread);
    219 };
    220 
    221 //------------------------------------------------------------------------------
    222 // A "snapshotted" representation of the BirthOnThread class.
    223 
    224 struct BASE_EXPORT BirthOnThreadSnapshot {
    225   BirthOnThreadSnapshot();
    226   explicit BirthOnThreadSnapshot(const BirthOnThread& birth);
    227   ~BirthOnThreadSnapshot();
    228 
    229   LocationSnapshot location;
    230   std::string thread_name;
    231 };
    232 
    233 //------------------------------------------------------------------------------
    234 // A class for accumulating counts of births (without bothering with a map<>).
    235 
    236 class BASE_EXPORT Births: public BirthOnThread {
    237  public:
    238   Births(const Location& location, const ThreadData& current);
    239 
    240   int birth_count() const;
    241 
    242   // When we have a birth we update the count for this birthplace.
    243   void RecordBirth();
    244 
    245  private:
    246   // The number of births on this thread for our location_.
    247   int birth_count_;
    248 
    249   DISALLOW_COPY_AND_ASSIGN(Births);
    250 };
    251 
    252 //------------------------------------------------------------------------------
    253 // A "snapshotted" representation of the DeathData class.
    254 
    255 struct BASE_EXPORT DeathDataSnapshot {
    256   DeathDataSnapshot();
    257 
    258   // Constructs the snapshot from individual values.
    259   // The alternative would be taking a DeathData parameter, but this would
    260   // create a loop since DeathData indirectly refers DeathDataSnapshot.  Passing
    261   // a wrapper structure as a param or using an empty constructor for
    262   // snapshotting DeathData would be less efficient.
    263   DeathDataSnapshot(int count,
    264                     int32_t run_duration_sum,
    265                     int32_t run_duration_max,
    266                     int32_t run_duration_sample,
    267                     int32_t queue_duration_sum,
    268                     int32_t queue_duration_max,
    269                     int32_t queue_duration_sample);
    270   ~DeathDataSnapshot();
    271 
    272   // Calculates and returns the delta between this snapshot and an earlier
    273   // snapshot of the same task |older|.
    274   DeathDataSnapshot Delta(const DeathDataSnapshot& older) const;
    275 
    276   int count;
    277   int32_t run_duration_sum;
    278   int32_t run_duration_max;
    279   int32_t run_duration_sample;
    280   int32_t queue_duration_sum;
    281   int32_t queue_duration_max;
    282   int32_t queue_duration_sample;
    283 };
    284 
    285 //------------------------------------------------------------------------------
    286 // A "snapshotted" representation of the DeathData for a particular profiling
    287 // phase.  Used as an element of the list of phase snapshots owned by DeathData.
    288 
    289 struct DeathDataPhaseSnapshot {
    290   DeathDataPhaseSnapshot(int profiling_phase,
    291                          int count,
    292                          int32_t run_duration_sum,
    293                          int32_t run_duration_max,
    294                          int32_t run_duration_sample,
    295                          int32_t queue_duration_sum,
    296                          int32_t queue_duration_max,
    297                          int32_t queue_duration_sample,
    298                          const DeathDataPhaseSnapshot* prev);
    299 
    300   // Profiling phase at which completion this snapshot was taken.
    301   int profiling_phase;
    302 
    303   // Death data snapshot.
    304   DeathDataSnapshot death_data;
    305 
    306   // Pointer to a snapshot from the previous phase.
    307   const DeathDataPhaseSnapshot* prev;
    308 };
    309 
    310 //------------------------------------------------------------------------------
    311 // Information about deaths of a task on a given thread, called "death thread".
    312 // Access to members of this class is never protected by a lock.  The fields
    313 // are accessed in such a way that corruptions resulting from race conditions
    314 // are not significant, and don't accumulate as a result of multiple accesses.
    315 // All invocations of DeathData::OnProfilingPhaseCompleted and
    316 // ThreadData::SnapshotMaps (which takes DeathData snapshot) in a given process
    317 // must be called from the same thread. It doesn't matter what thread it is, but
    318 // it's important the same thread is used as a snapshot thread during the whole
    319 // process lifetime.  All fields except sample_probability_count_ can be
    320 // snapshotted.
    321 
    322 class BASE_EXPORT DeathData {
    323  public:
    324   DeathData();
    325   DeathData(const DeathData& other);
    326   ~DeathData();
    327 
    328   // Update stats for a task destruction (death) that had a Run() time of
    329   // |duration|, and has had a queueing delay of |queue_duration|.
    330   void RecordDeath(const int32_t queue_duration,
    331                    const int32_t run_duration,
    332                    const uint32_t random_number);
    333 
    334   // Metrics and past snapshots accessors, used only for serialization and in
    335   // tests.
    336   int count() const { return base::subtle::NoBarrier_Load(&count_); }
    337   int32_t run_duration_sum() const {
    338     return base::subtle::NoBarrier_Load(&run_duration_sum_);
    339   }
    340   int32_t run_duration_max() const {
    341     return base::subtle::NoBarrier_Load(&run_duration_max_);
    342   }
    343   int32_t run_duration_sample() const {
    344     return base::subtle::NoBarrier_Load(&run_duration_sample_);
    345   }
    346   int32_t queue_duration_sum() const {
    347     return base::subtle::NoBarrier_Load(&queue_duration_sum_);
    348   }
    349   int32_t queue_duration_max() const {
    350     return base::subtle::NoBarrier_Load(&queue_duration_max_);
    351   }
    352   int32_t queue_duration_sample() const {
    353     return base::subtle::NoBarrier_Load(&queue_duration_sample_);
    354   }
    355   const DeathDataPhaseSnapshot* last_phase_snapshot() const {
    356     return last_phase_snapshot_;
    357   }
    358 
    359   // Called when the current profiling phase, identified by |profiling_phase|,
    360   // ends.
    361   // Must be called only on the snapshot thread.
    362   void OnProfilingPhaseCompleted(int profiling_phase);
    363 
    364  private:
    365   // Members are ordered from most regularly read and updated, to least
    366   // frequently used.  This might help a bit with cache lines.
    367   // Number of runs seen (divisor for calculating averages).
    368   // Can be incremented only on the death thread.
    369   base::subtle::Atomic32 count_;
    370 
    371   // Count used in determining probability of selecting exec/queue times from a
    372   // recorded death as samples.
    373   // Gets incremented only on the death thread, but can be set to 0 by
    374   // OnProfilingPhaseCompleted() on the snapshot thread.
    375   base::subtle::Atomic32 sample_probability_count_;
    376 
    377   // Basic tallies, used to compute averages.  Can be incremented only on the
    378   // death thread.
    379   base::subtle::Atomic32 run_duration_sum_;
    380   base::subtle::Atomic32 queue_duration_sum_;
    381   // Max values, used by local visualization routines.  These are often read,
    382   // but rarely updated.  The max values get assigned only on the death thread,
    383   // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the
    384   // snapshot thread.
    385   base::subtle::Atomic32 run_duration_max_;
    386   base::subtle::Atomic32 queue_duration_max_;
    387   // Samples, used by crowd sourcing gatherers.  These are almost never read,
    388   // and rarely updated.  They can be modified only on the death thread.
    389   base::subtle::Atomic32 run_duration_sample_;
    390   base::subtle::Atomic32 queue_duration_sample_;
    391 
    392   // Snapshot of this death data made at the last profiling phase completion, if
    393   // any.  DeathData owns the whole list starting with this pointer.
    394   // Can be accessed only on the snapshot thread.
    395   const DeathDataPhaseSnapshot* last_phase_snapshot_;
    396 
    397   DISALLOW_ASSIGN(DeathData);
    398 };
    399 
    400 //------------------------------------------------------------------------------
    401 // A temporary collection of data that can be sorted and summarized.  It is
    402 // gathered (carefully) from many threads.  Instances are held in arrays and
    403 // processed, filtered, and rendered.
    404 // The source of this data was collected on many threads, and is asynchronously
    405 // changing.  The data in this instance is not asynchronously changing.
    406 
    407 struct BASE_EXPORT TaskSnapshot {
    408   TaskSnapshot();
    409   TaskSnapshot(const BirthOnThreadSnapshot& birth,
    410                const DeathDataSnapshot& death_data,
    411                const std::string& death_thread_name);
    412   ~TaskSnapshot();
    413 
    414   BirthOnThreadSnapshot birth;
    415   // Delta between death data for a thread for a certain profiling phase and the
    416   // snapshot for the pervious phase, if any.  Otherwise, just a snapshot.
    417   DeathDataSnapshot death_data;
    418   std::string death_thread_name;
    419 };
    420 
    421 //------------------------------------------------------------------------------
    422 // For each thread, we have a ThreadData that stores all tracking info generated
    423 // on this thread.  This prevents the need for locking as data accumulates.
    424 // We use ThreadLocalStorage to quickly identfy the current ThreadData context.
    425 // We also have a linked list of ThreadData instances, and that list is used to
    426 // harvest data from all existing instances.
    427 
    428 struct ProcessDataPhaseSnapshot;
    429 struct ProcessDataSnapshot;
    430 class BASE_EXPORT TaskStopwatch;
    431 
    432 // Map from profiling phase number to the process-wide snapshotted
    433 // representation of the list of ThreadData objects that died during the given
    434 // phase.
    435 typedef std::map<int, ProcessDataPhaseSnapshot> PhasedProcessDataSnapshotMap;
    436 
    437 class BASE_EXPORT ThreadData {
    438  public:
    439   // Current allowable states of the tracking system.  The states can vary
    440   // between ACTIVE and DEACTIVATED, but can never go back to UNINITIALIZED.
    441   enum Status {
    442     UNINITIALIZED,         // Pristine, link-time state before running.
    443     DORMANT_DURING_TESTS,  // Only used during testing.
    444     DEACTIVATED,           // No longer recording profiling.
    445     PROFILING_ACTIVE,      // Recording profiles.
    446     STATUS_LAST = PROFILING_ACTIVE
    447   };
    448 
    449   typedef base::hash_map<Location, Births*, Location::Hash> BirthMap;
    450   typedef std::map<const Births*, DeathData> DeathMap;
    451 
    452   // Initialize the current thread context with a new instance of ThreadData.
    453   // This is used by all threads that have names, and should be explicitly
    454   // set *before* any births on the threads have taken place.  It is generally
    455   // only used by the message loop, which has a well defined thread name.
    456   static void InitializeThreadContext(const std::string& suggested_name);
    457 
    458   // Using Thread Local Store, find the current instance for collecting data.
    459   // If an instance does not exist, construct one (and remember it for use on
    460   // this thread.
    461   // This may return NULL if the system is disabled for any reason.
    462   static ThreadData* Get();
    463 
    464   // Fills |process_data_snapshot| with phased snapshots of all profiling
    465   // phases, including the current one, identified by |current_profiling_phase|.
    466   // |current_profiling_phase| is necessary because a child process can start
    467   // after several phase-changing events, so it needs to receive the current
    468   // phase number from the browser process to fill the correct entry for the
    469   // current phase in the |process_data_snapshot| map.
    470   static void Snapshot(int current_profiling_phase,
    471                        ProcessDataSnapshot* process_data_snapshot);
    472 
    473   // Called when the current profiling phase, identified by |profiling_phase|,
    474   // ends.
    475   // |profiling_phase| is necessary because a child process can start after
    476   // several phase-changing events, so it needs to receive the phase number from
    477   // the browser process to fill the correct entry in the
    478   // completed_phases_snapshots_ map.
    479   static void OnProfilingPhaseCompleted(int profiling_phase);
    480 
    481   // Finds (or creates) a place to count births from the given location in this
    482   // thread, and increment that tally.
    483   // TallyABirthIfActive will returns NULL if the birth cannot be tallied.
    484   static Births* TallyABirthIfActive(const Location& location);
    485 
    486   // Records the end of a timed run of an object.  The |completed_task| contains
    487   // a pointer to a Births, the time_posted, and a delayed_start_time if any.
    488   // The |start_of_run| indicates when we started to perform the run of the
    489   // task.  The delayed_start_time is non-null for tasks that were posted as
    490   // delayed tasks, and it indicates when the task should have run (i.e., when
    491   // it should have posted out of the timer queue, and into the work queue.
    492   // The |end_of_run| was just obtained by a call to Now() (just after the task
    493   // finished).  It is provided as an argument to help with testing.
    494   static void TallyRunOnNamedThreadIfTracking(
    495       const base::TrackingInfo& completed_task,
    496       const TaskStopwatch& stopwatch);
    497 
    498   // Record the end of a timed run of an object.  The |birth| is the record for
    499   // the instance, the |time_posted| records that instant, which is presumed to
    500   // be when the task was posted into a queue to run on a worker thread.
    501   // The |start_of_run| is when the worker thread started to perform the run of
    502   // the task.
    503   // The |end_of_run| was just obtained by a call to Now() (just after the task
    504   // finished).
    505   static void TallyRunOnWorkerThreadIfTracking(const Births* births,
    506                                                const TrackedTime& time_posted,
    507                                                const TaskStopwatch& stopwatch);
    508 
    509   // Record the end of execution in region, generally corresponding to a scope
    510   // being exited.
    511   static void TallyRunInAScopedRegionIfTracking(const Births* births,
    512                                                 const TaskStopwatch& stopwatch);
    513 
    514   const std::string& thread_name() const { return thread_name_; }
    515 
    516   // Initializes all statics if needed (this initialization call should be made
    517   // while we are single threaded).
    518   static void Initialize();
    519 
    520   // Sets internal status_.
    521   // If |status| is false, then status_ is set to DEACTIVATED.
    522   // If |status| is true, then status_ is set to PROFILING_ACTIVE.
    523   static void InitializeAndSetTrackingStatus(Status status);
    524 
    525   static Status status();
    526 
    527   // Indicate if any sort of profiling is being done (i.e., we are more than
    528   // DEACTIVATED).
    529   static bool TrackingStatus();
    530 
    531   // Enables profiler timing.
    532   static void EnableProfilerTiming();
    533 
    534   // Provide a time function that does nothing (runs fast) when we don't have
    535   // the profiler enabled.  It will generally be optimized away when it is
    536   // ifdef'ed to be small enough (allowing the profiler to be "compiled out" of
    537   // the code).
    538   static TrackedTime Now();
    539 
    540   // Use the function |now| to provide current times, instead of calling the
    541   // TrackedTime::Now() function.  Since this alternate function is being used,
    542   // the other time arguments (used for calculating queueing delay) will be
    543   // ignored.
    544   static void SetAlternateTimeSource(NowFunction* now);
    545 
    546   // This function can be called at process termination to validate that thread
    547   // cleanup routines have been called for at least some number of named
    548   // threads.
    549   static void EnsureCleanupWasCalled(int major_threads_shutdown_count);
    550 
    551  private:
    552   friend class TaskStopwatch;
    553   // Allow only tests to call ShutdownSingleThreadedCleanup.  We NEVER call it
    554   // in production code.
    555   // TODO(jar): Make this a friend in DEBUG only, so that the optimizer has a
    556   // better change of optimizing (inlining? etc.) private methods (knowing that
    557   // there will be no need for an external entry point).
    558   friend class TrackedObjectsTest;
    559   FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown);
    560   FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown);
    561 
    562   typedef std::map<const BirthOnThread*, int> BirthCountMap;
    563 
    564   typedef std::vector<std::pair<const Births*, DeathDataPhaseSnapshot>>
    565       DeathsSnapshot;
    566 
    567   // Worker thread construction creates a name since there is none.
    568   explicit ThreadData(int thread_number);
    569 
    570   // Message loop based construction should provide a name.
    571   explicit ThreadData(const std::string& suggested_name);
    572 
    573   ~ThreadData();
    574 
    575   // Push this instance to the head of all_thread_data_list_head_, linking it to
    576   // the previous head.  This is performed after each construction, and leaves
    577   // the instance permanently on that list.
    578   void PushToHeadOfList();
    579 
    580   // (Thread safe) Get start of list of all ThreadData instances using the lock.
    581   static ThreadData* first();
    582 
    583   // Iterate through the null terminated list of ThreadData instances.
    584   ThreadData* next() const;
    585 
    586 
    587   // In this thread's data, record a new birth.
    588   Births* TallyABirth(const Location& location);
    589 
    590   // Find a place to record a death on this thread.
    591   void TallyADeath(const Births& births,
    592                    int32_t queue_duration,
    593                    const TaskStopwatch& stopwatch);
    594 
    595   // Snapshots (under a lock) the profiled data for the tasks for this thread
    596   // and writes all of the executed tasks' data -- i.e. the data for all
    597   // profiling phases (including the current one: |current_profiling_phase|) for
    598   // the tasks with with entries in the death_map_ -- into |phased_snapshots|.
    599   // Also updates the |birth_counts| tally for each task to keep track of the
    600   // number of living instances of the task -- that is, each task maps to the
    601   // number of births for the task that have not yet been balanced by a death.
    602   void SnapshotExecutedTasks(int current_profiling_phase,
    603                              PhasedProcessDataSnapshotMap* phased_snapshots,
    604                              BirthCountMap* birth_counts);
    605 
    606   // Using our lock, make a copy of the specified maps.  This call may be made
    607   // on  non-local threads, which necessitate the use of the lock to prevent
    608   // the map(s) from being reallocated while they are copied.
    609   void SnapshotMaps(int profiling_phase,
    610                     BirthMap* birth_map,
    611                     DeathsSnapshot* deaths);
    612 
    613   // Called for this thread when the current profiling phase, identified by
    614   // |profiling_phase|, ends.
    615   void OnProfilingPhaseCompletedOnThread(int profiling_phase);
    616 
    617   // This method is called by the TLS system when a thread terminates.
    618   // The argument may be NULL if this thread has never tracked a birth or death.
    619   static void OnThreadTermination(void* thread_data);
    620 
    621   // This method should be called when a worker thread terminates, so that we
    622   // can save all the thread data into a cache of reusable ThreadData instances.
    623   void OnThreadTerminationCleanup();
    624 
    625   // Cleans up data structures, and returns statics to near pristine (mostly
    626   // uninitialized) state.  If there is any chance that other threads are still
    627   // using the data structures, then the |leak| argument should be passed in as
    628   // true, and the data structures (birth maps, death maps, ThreadData
    629   // insntances, etc.) will be leaked and not deleted.  If you have joined all
    630   // threads since the time that InitializeAndSetTrackingStatus() was called,
    631   // then you can pass in a |leak| value of false, and this function will
    632   // delete recursively all data structures, starting with the list of
    633   // ThreadData instances.
    634   static void ShutdownSingleThreadedCleanup(bool leak);
    635 
    636   // When non-null, this specifies an external function that supplies monotone
    637   // increasing time functcion.
    638   static NowFunction* now_function_;
    639 
    640   // If true, now_function_ returns values that can be used to calculate queue
    641   // time.
    642   static bool now_function_is_time_;
    643 
    644   // We use thread local store to identify which ThreadData to interact with.
    645   static base::ThreadLocalStorage::StaticSlot tls_index_;
    646 
    647   // List of ThreadData instances for use with worker threads.  When a worker
    648   // thread is done (terminated), we push it onto this list.  When a new worker
    649   // thread is created, we first try to re-use a ThreadData instance from the
    650   // list, and if none are available, construct a new one.
    651   // This is only accessed while list_lock_ is held.
    652   static ThreadData* first_retired_worker_;
    653 
    654   // Link to the most recently created instance (starts a null terminated list).
    655   // The list is traversed by about:profiler when it needs to snapshot data.
    656   // This is only accessed while list_lock_ is held.
    657   static ThreadData* all_thread_data_list_head_;
    658 
    659   // The next available worker thread number.  This should only be accessed when
    660   // the list_lock_ is held.
    661   static int worker_thread_data_creation_count_;
    662 
    663   // The number of times TLS has called us back to cleanup a ThreadData
    664   // instance.  This is only accessed while list_lock_ is held.
    665   static int cleanup_count_;
    666 
    667   // Incarnation sequence number, indicating how many times (during unittests)
    668   // we've either transitioned out of UNINITIALIZED, or into that state.  This
    669   // value is only accessed while the list_lock_ is held.
    670   static int incarnation_counter_;
    671 
    672   // Protection for access to all_thread_data_list_head_, and to
    673   // unregistered_thread_data_pool_.  This lock is leaked at shutdown.
    674   // The lock is very infrequently used, so we can afford to just make a lazy
    675   // instance and be safe.
    676   static base::LazyInstance<base::Lock>::Leaky list_lock_;
    677 
    678   // We set status_ to SHUTDOWN when we shut down the tracking service.
    679   static base::subtle::Atomic32 status_;
    680 
    681   // Link to next instance (null terminated list).  Used to globally track all
    682   // registered instances (corresponds to all registered threads where we keep
    683   // data).
    684   ThreadData* next_;
    685 
    686   // Pointer to another ThreadData instance for a Worker-Thread that has been
    687   // retired (its thread was terminated).  This value is non-NULL only for a
    688   // retired ThreadData associated with a Worker-Thread.
    689   ThreadData* next_retired_worker_;
    690 
    691   // The name of the thread that is being recorded.  If this thread has no
    692   // message_loop, then this is a worker thread, with a sequence number postfix.
    693   std::string thread_name_;
    694 
    695   // Indicate if this is a worker thread, and the ThreadData contexts should be
    696   // stored in the unregistered_thread_data_pool_ when not in use.
    697   // Value is zero when it is not a worker thread.  Value is a positive integer
    698   // corresponding to the created thread name if it is a worker thread.
    699   int worker_thread_number_;
    700 
    701   // A map used on each thread to keep track of Births on this thread.
    702   // This map should only be accessed on the thread it was constructed on.
    703   // When a snapshot is needed, this structure can be locked in place for the
    704   // duration of the snapshotting activity.
    705   BirthMap birth_map_;
    706 
    707   // Similar to birth_map_, this records informations about death of tracked
    708   // instances (i.e., when a tracked instance was destroyed on this thread).
    709   // It is locked before changing, and hence other threads may access it by
    710   // locking before reading it.
    711   DeathMap death_map_;
    712 
    713   // Lock to protect *some* access to BirthMap and DeathMap.  The maps are
    714   // regularly read and written on this thread, but may only be read from other
    715   // threads.  To support this, we acquire this lock if we are writing from this
    716   // thread, or reading from another thread.  For reading from this thread we
    717   // don't need a lock, as there is no potential for a conflict since the
    718   // writing is only done from this thread.
    719   mutable base::Lock map_lock_;
    720 
    721   // A random number that we used to select decide which sample to keep as a
    722   // representative sample in each DeathData instance.  We can't start off with
    723   // much randomness (because we can't call RandInt() on all our threads), so
    724   // we stir in more and more as we go.
    725   uint32_t random_number_;
    726 
    727   // Record of what the incarnation_counter_ was when this instance was created.
    728   // If the incarnation_counter_ has changed, then we avoid pushing into the
    729   // pool (this is only critical in tests which go through multiple
    730   // incarnations).
    731   int incarnation_count_for_pool_;
    732 
    733   // Most recently started (i.e. most nested) stopwatch on the current thread,
    734   // if it exists; NULL otherwise.
    735   TaskStopwatch* current_stopwatch_;
    736 
    737   DISALLOW_COPY_AND_ASSIGN(ThreadData);
    738 };
    739 
    740 //------------------------------------------------------------------------------
    741 // Stopwatch to measure task run time or simply create a time interval that will
    742 // be subtracted from the current most nested task's run time.  Stopwatches
    743 // coordinate with the stopwatches in which they are nested to avoid
    744 // double-counting nested tasks run times.
    745 
    746 class BASE_EXPORT TaskStopwatch {
    747  public:
    748   // Starts the stopwatch.
    749   TaskStopwatch();
    750   ~TaskStopwatch();
    751 
    752   // Starts stopwatch.
    753   void Start();
    754 
    755   // Stops stopwatch.
    756   void Stop();
    757 
    758   // Returns the start time.
    759   TrackedTime StartTime() const;
    760 
    761   // Task's duration is calculated as the wallclock duration between starting
    762   // and stopping this stopwatch, minus the wallclock durations of any other
    763   // instances that are immediately nested in this one, started and stopped on
    764   // this thread during that period.
    765   int32_t RunDurationMs() const;
    766 
    767   // Returns tracking info for the current thread.
    768   ThreadData* GetThreadData() const;
    769 
    770  private:
    771   // Time when the stopwatch was started.
    772   TrackedTime start_time_;
    773 
    774   // Wallclock duration of the task.
    775   int32_t wallclock_duration_ms_;
    776 
    777   // Tracking info for the current thread.
    778   ThreadData* current_thread_data_;
    779 
    780   // Sum of wallclock durations of all stopwatches that were directly nested in
    781   // this one.
    782   int32_t excluded_duration_ms_;
    783 
    784   // Stopwatch which was running on our thread when this stopwatch was started.
    785   // That preexisting stopwatch must be adjusted to the exclude the wallclock
    786   // duration of this stopwatch.
    787   TaskStopwatch* parent_;
    788 
    789 #if DCHECK_IS_ON()
    790   // State of the stopwatch.  Stopwatch is first constructed in a created state
    791   // state, then is optionally started/stopped, then destructed.
    792   enum { CREATED, RUNNING, STOPPED } state_;
    793 
    794   // Currently running stopwatch that is directly nested in this one, if such
    795   // stopwatch exists.  NULL otherwise.
    796   TaskStopwatch* child_;
    797 #endif
    798 };
    799 
    800 //------------------------------------------------------------------------------
    801 // A snapshotted representation of the list of ThreadData objects for a process,
    802 // for a single profiling phase.
    803 
    804 struct BASE_EXPORT ProcessDataPhaseSnapshot {
    805  public:
    806   ProcessDataPhaseSnapshot();
    807   ~ProcessDataPhaseSnapshot();
    808 
    809   std::vector<TaskSnapshot> tasks;
    810 };
    811 
    812 //------------------------------------------------------------------------------
    813 // A snapshotted representation of the list of ThreadData objects for a process,
    814 // for all profiling phases, including the current one.
    815 
    816 struct BASE_EXPORT ProcessDataSnapshot {
    817  public:
    818   ProcessDataSnapshot();
    819   ~ProcessDataSnapshot();
    820 
    821   PhasedProcessDataSnapshotMap phased_snapshots;
    822   base::ProcessId process_id;
    823 };
    824 
    825 }  // namespace tracked_objects
    826 
    827 #endif  // BASE_TRACKED_OBJECTS_H_
    828