Home | History | Annotate | Download | only in trace_event
      1 // Copyright 2015 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/trace_event/memory_dump_manager.h"
      6 
      7 #include <algorithm>
      8 #include <utility>
      9 
     10 #include "base/atomic_sequence_num.h"
     11 #include "base/base_switches.h"
     12 #include "base/command_line.h"
     13 #include "base/compiler_specific.h"
     14 #include "base/thread_task_runner_handle.h"
     15 #include "base/threading/thread.h"
     16 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
     17 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
     18 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
     19 #include "base/trace_event/malloc_dump_provider.h"
     20 #include "base/trace_event/memory_dump_provider.h"
     21 #include "base/trace_event/memory_dump_session_state.h"
     22 #include "base/trace_event/process_memory_dump.h"
     23 #include "base/trace_event/trace_event_argument.h"
     24 #include "build/build_config.h"
     25 
     26 #if !defined(OS_NACL)
     27 #include "base/trace_event/process_memory_totals_dump_provider.h"
     28 #endif
     29 
     30 #if defined(OS_LINUX) || defined(OS_ANDROID)
     31 #include "base/trace_event/process_memory_maps_dump_provider.h"
     32 #endif
     33 
     34 #if defined(OS_ANDROID)
     35 #include "base/trace_event/java_heap_dump_provider_android.h"
     36 #endif
     37 
     38 #if defined(OS_WIN)
     39 #include "base/trace_event/winheap_dump_provider_win.h"
     40 #endif
     41 
     42 namespace base {
     43 namespace trace_event {
     44 
     45 namespace {
     46 
     47 const int kTraceEventNumArgs = 1;
     48 const char* kTraceEventArgNames[] = {"dumps"};
     49 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
     50 
     51 StaticAtomicSequenceNumber g_next_guid;
     52 uint32_t g_periodic_dumps_count = 0;
     53 uint32_t g_heavy_dumps_rate = 0;
     54 MemoryDumpManager* g_instance_for_testing = nullptr;
     55 
     56 void RequestPeriodicGlobalDump() {
     57   MemoryDumpLevelOfDetail level_of_detail;
     58   if (g_heavy_dumps_rate == 0) {
     59     level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
     60   } else {
     61     level_of_detail = g_periodic_dumps_count == 0
     62                           ? MemoryDumpLevelOfDetail::DETAILED
     63                           : MemoryDumpLevelOfDetail::LIGHT;
     64 
     65     if (++g_periodic_dumps_count == g_heavy_dumps_rate)
     66       g_periodic_dumps_count = 0;
     67   }
     68 
     69   MemoryDumpManager::GetInstance()->RequestGlobalDump(
     70       MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
     71 }
     72 
     73 // Callback wrapper to hook upon the completion of RequestGlobalDump() and
     74 // inject trace markers.
     75 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
     76                       uint64_t dump_guid,
     77                       bool success) {
     78   TRACE_EVENT_NESTABLE_ASYNC_END1(
     79       MemoryDumpManager::kTraceCategory, "GlobalMemoryDump",
     80       TRACE_ID_MANGLE(dump_guid), "success", success);
     81 
     82   if (!wrapped_callback.is_null()) {
     83     wrapped_callback.Run(dump_guid, success);
     84     wrapped_callback.Reset();
     85   }
     86 }
     87 
     88 }  // namespace
     89 
     90 // static
     91 const char* const MemoryDumpManager::kTraceCategory =
     92     TRACE_DISABLED_BY_DEFAULT("memory-infra");
     93 
     94 // static
     95 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
     96 
     97 // static
     98 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
     99 
    100 // static
    101 const char* const MemoryDumpManager::kSystemAllocatorPoolName =
    102 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
    103     MallocDumpProvider::kAllocatedObjects;
    104 #elif defined(OS_WIN)
    105     WinHeapDumpProvider::kAllocatedObjects;
    106 #else
    107     nullptr;
    108 #endif
    109 
    110 // static
    111 MemoryDumpManager* MemoryDumpManager::GetInstance() {
    112   if (g_instance_for_testing)
    113     return g_instance_for_testing;
    114 
    115   return Singleton<MemoryDumpManager,
    116                    LeakySingletonTraits<MemoryDumpManager>>::get();
    117 }
    118 
    119 // static
    120 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
    121   g_instance_for_testing = instance;
    122 }
    123 
    124 MemoryDumpManager::MemoryDumpManager()
    125     : delegate_(nullptr),
    126       is_coordinator_(false),
    127       memory_tracing_enabled_(0),
    128       tracing_process_id_(kInvalidTracingProcessId),
    129       dumper_registrations_ignored_for_testing_(false) {
    130   g_next_guid.GetNext();  // Make sure that first guid is not zero.
    131 
    132   heap_profiling_enabled_ = CommandLine::InitializedForCurrentProcess()
    133                                 ? CommandLine::ForCurrentProcess()->HasSwitch(
    134                                       switches::kEnableHeapProfiling)
    135                                 : false;
    136 
    137   if (heap_profiling_enabled_)
    138     AllocationContextTracker::SetCaptureEnabled(true);
    139 }
    140 
    141 MemoryDumpManager::~MemoryDumpManager() {
    142   TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
    143 }
    144 
    145 void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
    146                                    bool is_coordinator) {
    147   {
    148     AutoLock lock(lock_);
    149     DCHECK(delegate);
    150     DCHECK(!delegate_);
    151     delegate_ = delegate;
    152     is_coordinator_ = is_coordinator;
    153   }
    154 
    155 // Enable the core dump providers.
    156 #if !defined(OS_NACL)
    157   RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance(),
    158                        "ProcessMemoryTotals", nullptr);
    159 #endif
    160 
    161 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
    162   RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
    163 #endif
    164 
    165 #if defined(OS_LINUX) || defined(OS_ANDROID)
    166   RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance(),
    167                        "ProcessMemoryMaps", nullptr);
    168 #endif
    169 
    170 #if defined(OS_ANDROID)
    171   RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
    172                        nullptr);
    173 #endif
    174 
    175 #if defined(OS_WIN)
    176   RegisterDumpProvider(WinHeapDumpProvider::GetInstance(), "WinHeap", nullptr);
    177 #endif
    178 
    179   // If tracing was enabled before initializing MemoryDumpManager, we missed the
    180   // OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
    181   bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
    182   TRACE_EVENT0(kTraceCategory, "init");  // Add to trace-viewer category list.
    183   TraceLog::GetInstance()->AddEnabledStateObserver(this);
    184   if (is_tracing_already_enabled)
    185     OnTraceLogEnabled();
    186 }
    187 
    188 void MemoryDumpManager::RegisterDumpProvider(
    189     MemoryDumpProvider* mdp,
    190     const char* name,
    191     const scoped_refptr<SingleThreadTaskRunner>& task_runner,
    192     const MemoryDumpProvider::Options& options) {
    193   if (dumper_registrations_ignored_for_testing_)
    194     return;
    195 
    196   scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
    197       new MemoryDumpProviderInfo(mdp, name, task_runner, options);
    198 
    199   {
    200     AutoLock lock(lock_);
    201     bool already_registered = !dump_providers_.insert(mdpinfo).second;
    202     // This actually happens in some tests which don't have a clean tear-down
    203     // path for RenderThreadImpl::Init().
    204     if (already_registered)
    205       return;
    206   }
    207 
    208   if (heap_profiling_enabled_)
    209     mdp->OnHeapProfilingEnabled(true);
    210 }
    211 
    212 void MemoryDumpManager::RegisterDumpProvider(
    213     MemoryDumpProvider* mdp,
    214     const char* name,
    215     const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
    216   RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options());
    217 }
    218 
    219 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
    220   UnregisterDumpProviderInternal(mdp, false /* delete_async */);
    221 }
    222 
    223 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
    224     scoped_ptr<MemoryDumpProvider> mdp) {
    225   UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
    226 }
    227 
    228 void MemoryDumpManager::UnregisterDumpProviderInternal(
    229     MemoryDumpProvider* mdp,
    230     bool take_mdp_ownership_and_delete_async) {
    231   scoped_ptr<MemoryDumpProvider> owned_mdp;
    232   if (take_mdp_ownership_and_delete_async)
    233     owned_mdp.reset(mdp);
    234 
    235   AutoLock lock(lock_);
    236 
    237   auto mdp_iter = dump_providers_.begin();
    238   for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
    239     if ((*mdp_iter)->dump_provider == mdp)
    240       break;
    241   }
    242 
    243   if (mdp_iter == dump_providers_.end())
    244     return;  // Not registered / already unregistered.
    245 
    246   if (take_mdp_ownership_and_delete_async) {
    247     // The MDP will be deleted whenever the MDPInfo struct will, that is either:
    248     // - At the end of this function, if no dump is in progress.
    249     // - In the prologue of the ContinueAsyncProcessDump().
    250     DCHECK(!(*mdp_iter)->owned_dump_provider);
    251     (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
    252   } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
    253     // If you hit this DCHECK, your dump provider has a bug.
    254     // Unregistration of a MemoryDumpProvider is safe only if:
    255     // - The MDP has specified a thread affinity (via task_runner()) AND
    256     //   the unregistration happens on the same thread (so the MDP cannot
    257     //   unregister and be in the middle of a OnMemoryDump() at the same time.
    258     // - The MDP has NOT specified a thread affinity and its ownership is
    259     //   transferred via UnregisterAndDeleteDumpProviderSoon().
    260     // In all the other cases, it is not possible to guarantee that the
    261     // unregistration will not race with OnMemoryDump() calls.
    262     DCHECK((*mdp_iter)->task_runner &&
    263            (*mdp_iter)->task_runner->BelongsToCurrentThread())
    264         << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
    265         << "unregister itself in a racy way. Please file a crbug.";
    266   }
    267 
    268   // The MDPInfo instance can still be referenced by the
    269   // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
    270   // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump
    271   // to just skip it, without actually invoking the |mdp|, which might be
    272   // destroyed by the caller soon after this method returns.
    273   (*mdp_iter)->disabled = true;
    274   dump_providers_.erase(mdp_iter);
    275 }
    276 
    277 void MemoryDumpManager::RequestGlobalDump(
    278     MemoryDumpType dump_type,
    279     MemoryDumpLevelOfDetail level_of_detail,
    280     const MemoryDumpCallback& callback) {
    281   // Bail out immediately if tracing is not enabled at all.
    282   if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
    283     if (!callback.is_null())
    284       callback.Run(0u /* guid */, false /* success */);
    285     return;
    286   }
    287 
    288   const uint64_t guid =
    289       TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
    290 
    291   // Creates an async event to keep track of the global dump evolution.
    292   // The |wrapped_callback| will generate the ASYNC_END event and then invoke
    293   // the real |callback| provided by the caller.
    294   TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "GlobalMemoryDump",
    295                                     TRACE_ID_MANGLE(guid));
    296   MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
    297 
    298   // Technically there is no need to grab the |lock_| here as the delegate is
    299   // long-lived and can only be set by Initialize(), which is locked and
    300   // necessarily happens before memory_tracing_enabled_ == true.
    301   // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
    302   // (memory-infra is enabled) we're not in the fast-path anymore.
    303   MemoryDumpManagerDelegate* delegate;
    304   {
    305     AutoLock lock(lock_);
    306     delegate = delegate_;
    307   }
    308 
    309   // The delegate will coordinate the IPC broadcast and at some point invoke
    310   // CreateProcessDump() to get a dump for the current process.
    311   MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
    312   delegate->RequestGlobalMemoryDump(args, wrapped_callback);
    313 }
    314 
    315 void MemoryDumpManager::RequestGlobalDump(
    316     MemoryDumpType dump_type,
    317     MemoryDumpLevelOfDetail level_of_detail) {
    318   RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
    319 }
    320 
    321 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
    322                                           const MemoryDumpCallback& callback) {
    323   TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
    324                                     TRACE_ID_MANGLE(args.dump_guid));
    325 
    326   scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
    327   {
    328     AutoLock lock(lock_);
    329     pmd_async_state.reset(
    330         new ProcessMemoryDumpAsyncState(args, dump_providers_, session_state_,
    331                                         callback, dump_thread_->task_runner()));
    332   }
    333 
    334   TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
    335                          TRACE_ID_MANGLE(args.dump_guid),
    336                          TRACE_EVENT_FLAG_FLOW_OUT);
    337 
    338   // Start the thread hop. |dump_providers_| are kept sorted by thread, so
    339   // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
    340   // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
    341   ContinueAsyncProcessDump(pmd_async_state.release());
    342 }
    343 
    344 // At most one ContinueAsyncProcessDump() can be active at any time for a given
    345 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to
    346 // ensure consistency w.r.t. (un)registrations of |dump_providers_|.
    347 // The linearization of dump providers' OnMemoryDump invocations is achieved by
    348 // means of subsequent PostTask(s).
    349 //
    350 // 1) Prologue:
    351 //   - If this was the last hop, create a trace event, add it to the trace
    352 //     and finalize (invoke callback).
    353 //   - Check if we are on the right thread. If not hop and continue there.
    354 //   - Check if the dump provider is disabled, if so skip the dump.
    355 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
    356 // 3) Epilogue:
    357 //   - Unregister the dump provider if it failed too many times consecutively.
    358 //   - Pop() the MDP from the |pending_dump_providers| list, eventually
    359 //     destroying the MDPInfo if that was unregistered in the meantime.
    360 void MemoryDumpManager::ContinueAsyncProcessDump(
    361     ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
    362   // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
    363   // in the PostTask below don't end up registering their own dump providers
    364   // (for discounting trace memory overhead) while holding the |lock_|.
    365   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
    366 
    367   // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
    368   // why it isn't is because of the corner case logic of |did_post_task| below,
    369   // which needs to take back the ownership of the |pmd_async_state| when a
    370   // thread goes away and consequently the PostTask() fails.
    371   // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
    372   // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
    373   // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
    374   auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state);
    375   owned_pmd_async_state = nullptr;
    376 
    377   if (pmd_async_state->pending_dump_providers.empty())
    378     return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
    379 
    380   // Read MemoryDumpProviderInfo thread safety considerations in
    381   // memory_dump_manager.h when accessing |mdpinfo| fields.
    382   MemoryDumpProviderInfo* mdpinfo =
    383       pmd_async_state->pending_dump_providers.back().get();
    384 
    385   // If the dump provider did not specify a thread affinity, dump on
    386   // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this
    387   // point (if tracing was disabled in the meanwhile). In such case the
    388   // PostTask() below will fail, but |task_runner| should always be non-null.
    389   SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get();
    390   if (!task_runner)
    391     task_runner = pmd_async_state->dump_thread_task_runner.get();
    392 
    393   bool post_task_failed = false;
    394   if (!task_runner->BelongsToCurrentThread()) {
    395     // It's time to hop onto another thread.
    396     post_task_failed = !task_runner->PostTask(
    397         FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
    398                         Unretained(this), Unretained(pmd_async_state.get())));
    399     if (!post_task_failed) {
    400       // Ownership is tranferred to the next ContinueAsyncProcessDump().
    401       ignore_result(pmd_async_state.release());
    402       return;
    403     }
    404   }
    405 
    406   // At this point either:
    407   // - The MDP has a task runner affinity and we are on the right thread.
    408   // - The MDP has a task runner affinity but the underlying thread is gone,
    409   //   hence the above |post_task_failed| == true.
    410   // - The MDP does NOT have a task runner affinity. A locked access is required
    411   //   to R/W |disabled| (for the UnregisterAndDeleteDumpProviderSoon() case).
    412   bool should_dump;
    413   const char* disabled_reason = nullptr;
    414   {
    415     AutoLock lock(lock_);
    416     if (!mdpinfo->disabled) {
    417       if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
    418         mdpinfo->disabled = true;
    419         disabled_reason =
    420             "Dump failure, possibly related with sandboxing (crbug.com/461788)."
    421             " Try --no-sandbox.";
    422       } else if (post_task_failed) {
    423         disabled_reason = "The thread it was meant to dump onto is gone.";
    424         mdpinfo->disabled = true;
    425       }
    426     }
    427     should_dump = !mdpinfo->disabled;
    428   }
    429 
    430   if (disabled_reason) {
    431     LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name << "\". "
    432                << disabled_reason;
    433   }
    434 
    435   if (should_dump) {
    436     // Invoke the dump provider.
    437     TRACE_EVENT_WITH_FLOW1(kTraceCategory,
    438                            "MemoryDumpManager::ContinueAsyncProcessDump",
    439                            TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
    440                            TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
    441                            "dump_provider.name", mdpinfo->name);
    442 
    443     // Pid of the target process being dumped. Often kNullProcessId (= current
    444     // process), non-zero when the coordinator process creates dumps on behalf
    445     // of child processes (see crbug.com/461788).
    446     ProcessId target_pid = mdpinfo->options.target_pid;
    447     ProcessMemoryDump* pmd =
    448         pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid);
    449     MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
    450     bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
    451     mdpinfo->consecutive_failures =
    452         dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
    453   }  // if (!mdpinfo->disabled)
    454 
    455   pmd_async_state->pending_dump_providers.pop_back();
    456   ContinueAsyncProcessDump(pmd_async_state.release());
    457 }
    458 
    459 // static
    460 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
    461     scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
    462   DCHECK(pmd_async_state->pending_dump_providers.empty());
    463   const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
    464   if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
    465     scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
    466         pmd_async_state->callback_task_runner;
    467     callback_task_runner->PostTask(
    468         FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
    469                         Passed(&pmd_async_state)));
    470     return;
    471   }
    472 
    473   TRACE_EVENT_WITH_FLOW0(kTraceCategory,
    474                          "MemoryDumpManager::FinalizeDumpAndAddToTrace",
    475                          TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN);
    476 
    477   for (const auto& kv : pmd_async_state->process_dumps) {
    478     ProcessId pid = kv.first;  // kNullProcessId for the current process.
    479     ProcessMemoryDump* process_memory_dump = kv.second.get();
    480     TracedValue* traced_value = new TracedValue();
    481     scoped_refptr<ConvertableToTraceFormat> event_value(traced_value);
    482     process_memory_dump->AsValueInto(traced_value);
    483     traced_value->SetString("level_of_detail",
    484                             MemoryDumpLevelOfDetailToString(
    485                                 pmd_async_state->req_args.level_of_detail));
    486     const char* const event_name =
    487         MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
    488 
    489     TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
    490         TRACE_EVENT_PHASE_MEMORY_DUMP,
    491         TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
    492         dump_guid, pid, kTraceEventNumArgs, kTraceEventArgNames,
    493         kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
    494         TRACE_EVENT_FLAG_HAS_ID);
    495   }
    496 
    497   if (!pmd_async_state->callback.is_null()) {
    498     pmd_async_state->callback.Run(dump_guid, true /* success */);
    499     pmd_async_state->callback.Reset();
    500   }
    501 
    502   TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
    503                                   TRACE_ID_MANGLE(dump_guid));
    504 }
    505 
    506 void MemoryDumpManager::OnTraceLogEnabled() {
    507   bool enabled;
    508   TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
    509   if (!enabled)
    510     return;
    511 
    512   // Initialize the TraceLog for the current thread. This is to avoid that the
    513   // TraceLog memory dump provider is registered lazily in the PostTask() below
    514   // while the |lock_| is taken;
    515   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
    516 
    517   // Spin-up the thread used to invoke unbound dump providers.
    518   scoped_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
    519   if (!dump_thread->Start()) {
    520     LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
    521     return;
    522   }
    523 
    524   AutoLock lock(lock_);
    525 
    526   DCHECK(delegate_);  // At this point we must have a delegate.
    527 
    528   scoped_refptr<StackFrameDeduplicator> stack_frame_deduplicator = nullptr;
    529   scoped_refptr<TypeNameDeduplicator> type_name_deduplicator = nullptr;
    530 
    531   if (heap_profiling_enabled_) {
    532     // If heap profiling is enabled, the stack frame deduplicator and type name
    533     // deduplicator will be in use. Add a metadata events to write the frames
    534     // and type IDs.
    535     stack_frame_deduplicator = new StackFrameDeduplicator;
    536     type_name_deduplicator = new TypeNameDeduplicator;
    537     TRACE_EVENT_API_ADD_METADATA_EVENT(
    538         "stackFrames", "stackFrames",
    539         scoped_refptr<ConvertableToTraceFormat>(stack_frame_deduplicator));
    540     TRACE_EVENT_API_ADD_METADATA_EVENT(
    541         "typeNames", "typeNames",
    542         scoped_refptr<ConvertableToTraceFormat>(type_name_deduplicator));
    543   }
    544 
    545   DCHECK(!dump_thread_);
    546   dump_thread_ = std::move(dump_thread);
    547   session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator,
    548                                               type_name_deduplicator);
    549 
    550   subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
    551 
    552   // TODO(primiano): This is a temporary hack to disable periodic memory dumps
    553   // when running memory benchmarks until telemetry uses TraceConfig to
    554   // enable/disable periodic dumps. See crbug.com/529184 .
    555   if (!is_coordinator_ ||
    556       CommandLine::ForCurrentProcess()->HasSwitch(
    557           "enable-memory-benchmarking")) {
    558     return;
    559   }
    560 
    561   // Enable periodic dumps. At the moment the periodic support is limited to at
    562   // most one low-detail periodic dump and at most one high-detail periodic
    563   // dump. If both are specified the high-detail period must be an integer
    564   // multiple of the low-level one.
    565   g_periodic_dumps_count = 0;
    566   const TraceConfig trace_config =
    567       TraceLog::GetInstance()->GetCurrentTraceConfig();
    568   const TraceConfig::MemoryDumpConfig& config_list =
    569       trace_config.memory_dump_config();
    570   if (config_list.empty())
    571     return;
    572 
    573   uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
    574   uint32_t heavy_dump_period_ms = 0;
    575   DCHECK_LE(config_list.size(), 2u);
    576   for (const TraceConfig::MemoryDumpTriggerConfig& config : config_list) {
    577     DCHECK(config.periodic_interval_ms);
    578     if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
    579       heavy_dump_period_ms = config.periodic_interval_ms;
    580     min_timer_period_ms =
    581         std::min(min_timer_period_ms, config.periodic_interval_ms);
    582   }
    583   DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
    584   g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms;
    585 
    586   periodic_dump_timer_.Start(FROM_HERE,
    587                              TimeDelta::FromMilliseconds(min_timer_period_ms),
    588                              base::Bind(&RequestPeriodicGlobalDump));
    589 }
    590 
    591 void MemoryDumpManager::OnTraceLogDisabled() {
    592   subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
    593   scoped_ptr<Thread> dump_thread;
    594   {
    595     AutoLock lock(lock_);
    596     dump_thread = std::move(dump_thread_);
    597     session_state_ = nullptr;
    598   }
    599 
    600   // Thread stops are blocking and must be performed outside of the |lock_|
    601   // or will deadlock (e.g., if ContinueAsyncProcessDump() tries to acquire it).
    602   periodic_dump_timer_.Stop();
    603   if (dump_thread)
    604     dump_thread->Stop();
    605 }
    606 
    607 uint64_t MemoryDumpManager::GetTracingProcessId() const {
    608   return delegate_->GetTracingProcessId();
    609 }
    610 
    611 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
    612     MemoryDumpProvider* dump_provider,
    613     const char* name,
    614     const scoped_refptr<SingleThreadTaskRunner>& task_runner,
    615     const MemoryDumpProvider::Options& options)
    616     : dump_provider(dump_provider),
    617       name(name),
    618       task_runner(task_runner),
    619       options(options),
    620       consecutive_failures(0),
    621       disabled(false) {}
    622 
    623 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
    624 
    625 bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
    626     const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
    627     const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
    628   if (!a || !b)
    629     return a.get() < b.get();
    630   // Ensure that unbound providers (task_runner == nullptr) always run last.
    631   // Rationale: some unbound dump providers are known to be slow, keep them last
    632   // to avoid skewing timings of the other dump providers.
    633   return std::tie(a->task_runner, a->dump_provider) >
    634          std::tie(b->task_runner, b->dump_provider);
    635 }
    636 
    637 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
    638     MemoryDumpRequestArgs req_args,
    639     const MemoryDumpProviderInfo::OrderedSet& dump_providers,
    640     const scoped_refptr<MemoryDumpSessionState>& session_state,
    641     MemoryDumpCallback callback,
    642     const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner)
    643     : req_args(req_args),
    644       session_state(session_state),
    645       callback(callback),
    646       callback_task_runner(MessageLoop::current()->task_runner()),
    647       dump_thread_task_runner(dump_thread_task_runner) {
    648   pending_dump_providers.reserve(dump_providers.size());
    649   pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
    650 }
    651 
    652 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
    653 }
    654 
    655 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
    656     GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) {
    657   auto iter = process_dumps.find(pid);
    658   if (iter == process_dumps.end()) {
    659     scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state));
    660     iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
    661   }
    662   return iter->second.get();
    663 }
    664 
    665 }  // namespace trace_event
    666 }  // namespace base
    667