xref: /aosp_15_r20/external/cronet/base/trace_event/memory_dump_manager.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/memory_dump_manager.h"
6 
7 #include <inttypes.h>
8 #include <stdio.h>
9 
10 #include <algorithm>
11 #include <memory>
12 #include <tuple>
13 #include <utility>
14 
15 #include "base/base_switches.h"
16 #include "base/command_line.h"
17 #include "base/debug/alias.h"
18 #include "base/debug/stack_trace.h"
19 #include "base/logging.h"
20 #include "base/memory/ptr_util.h"
21 #include "base/strings/string_util.h"
22 #include "base/task/sequenced_task_runner.h"
23 #include "base/task/single_thread_task_runner.h"
24 #include "base/threading/thread.h"
25 #include "base/trace_event/heap_profiler.h"
26 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
27 #include "base/trace_event/malloc_dump_provider.h"
28 #include "base/trace_event/memory_dump_provider.h"
29 #include "base/trace_event/memory_dump_scheduler.h"
30 #include "base/trace_event/memory_infra_background_allowlist.h"
31 #include "base/trace_event/process_memory_dump.h"
32 #include "base/trace_event/trace_event.h"
33 #include "base/trace_event/traced_value.h"
34 #include "build/build_config.h"
35 #include "partition_alloc/partition_alloc_buildflags.h"
36 #include "third_party/abseil-cpp/absl/base/dynamic_annotations.h"
37 
38 #if BUILDFLAG(IS_ANDROID)
39 #include "base/trace_event/java_heap_dump_provider_android.h"
40 
41 #if BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
42 #include "base/trace_event/cfi_backtrace_android.h"
43 #endif
44 
45 #endif  // BUILDFLAG(IS_ANDROID)
46 
47 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
48 #include "base/trace_event/address_space_dump_provider.h"
49 #endif
50 
51 namespace base {
52 namespace trace_event {
53 
54 namespace {
55 
56 MemoryDumpManager* g_memory_dump_manager_for_testing = nullptr;
57 
58 // Temporary (until scheduler is moved outside of here)
59 // trampoline function to match the |request_dump_function| passed to Initialize
60 // to the callback expected by MemoryDumpScheduler.
61 // TODO(primiano): remove this.
DoGlobalDumpWithoutCallback(MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,MemoryDumpType dump_type,MemoryDumpLevelOfDetail level_of_detail)62 void DoGlobalDumpWithoutCallback(
63     MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
64     MemoryDumpType dump_type,
65     MemoryDumpLevelOfDetail level_of_detail) {
66   global_dump_fn.Run(dump_type, level_of_detail);
67 }
68 
69 }  // namespace
70 
71 // static
72 constexpr const char* MemoryDumpManager::kTraceCategory;
73 
74 // static
75 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
76 
77 // static
78 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
79 
80 // static
81 const char* const MemoryDumpManager::kSystemAllocatorPoolName =
82 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
83     MallocDumpProvider::kAllocatedObjects;
84 #else
85     nullptr;
86 #endif
87 
88 // static
GetInstance()89 MemoryDumpManager* MemoryDumpManager::GetInstance() {
90   if (g_memory_dump_manager_for_testing)
91     return g_memory_dump_manager_for_testing;
92 
93   return Singleton<MemoryDumpManager,
94                    LeakySingletonTraits<MemoryDumpManager>>::get();
95 }
96 
97 // static
98 std::unique_ptr<MemoryDumpManager>
CreateInstanceForTesting()99 MemoryDumpManager::CreateInstanceForTesting() {
100   DCHECK(!g_memory_dump_manager_for_testing);
101   std::unique_ptr<MemoryDumpManager> instance(new MemoryDumpManager());
102   g_memory_dump_manager_for_testing = instance.get();
103   return instance;
104 }
105 
106 MemoryDumpManager::MemoryDumpManager() = default;
107 
~MemoryDumpManager()108 MemoryDumpManager::~MemoryDumpManager() {
109   Thread* dump_thread = nullptr;
110   {
111     AutoLock lock(lock_);
112     if (dump_thread_) {
113       dump_thread = dump_thread_.get();
114     }
115   }
116   if (dump_thread) {
117     dump_thread->Stop();
118   }
119   AutoLock lock(lock_);
120   dump_thread_.reset();
121   g_memory_dump_manager_for_testing = nullptr;
122 }
123 
Initialize(RequestGlobalDumpFunction request_dump_function,bool is_coordinator)124 void MemoryDumpManager::Initialize(
125     RequestGlobalDumpFunction request_dump_function,
126     bool is_coordinator) {
127   {
128     AutoLock lock(lock_);
129     DCHECK(!request_dump_function.is_null());
130     DCHECK(!can_request_global_dumps());
131     request_dump_function_ = request_dump_function;
132     is_coordinator_ = is_coordinator;
133   }
134 
135 // Enable the core dump providers.
136 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
137   RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
138 #endif
139 
140 #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
141   RegisterDumpProvider(AddressSpaceDumpProvider::GetInstance(),
142                        "PartitionAlloc.AddressSpace", nullptr);
143 #endif
144 
145 #if BUILDFLAG(IS_ANDROID)
146   RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
147                        nullptr);
148 #endif
149 }
150 
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner,MemoryDumpProvider::Options options)151 void MemoryDumpManager::RegisterDumpProvider(
152     MemoryDumpProvider* mdp,
153     const char* name,
154     scoped_refptr<SingleThreadTaskRunner> task_runner,
155     MemoryDumpProvider::Options options) {
156   options.dumps_on_single_thread_task_runner = true;
157   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
158 }
159 
RegisterDumpProvider(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SingleThreadTaskRunner> task_runner)160 void MemoryDumpManager::RegisterDumpProvider(
161     MemoryDumpProvider* mdp,
162     const char* name,
163     scoped_refptr<SingleThreadTaskRunner> task_runner) {
164   // Set |dumps_on_single_thread_task_runner| to true because all providers
165   // without task runner are run on dump thread.
166   MemoryDumpProvider::Options options;
167   options.dumps_on_single_thread_task_runner = true;
168   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
169 }
170 
RegisterDumpProviderWithSequencedTaskRunner(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,MemoryDumpProvider::Options options)171 void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
172     MemoryDumpProvider* mdp,
173     const char* name,
174     scoped_refptr<SequencedTaskRunner> task_runner,
175     MemoryDumpProvider::Options options) {
176   DCHECK(task_runner);
177   options.dumps_on_single_thread_task_runner = false;
178   RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
179 }
180 
RegisterDumpProviderInternal(MemoryDumpProvider * mdp,const char * name,scoped_refptr<SequencedTaskRunner> task_runner,const MemoryDumpProvider::Options & options)181 void MemoryDumpManager::RegisterDumpProviderInternal(
182     MemoryDumpProvider* mdp,
183     const char* name,
184     scoped_refptr<SequencedTaskRunner> task_runner,
185     const MemoryDumpProvider::Options& options) {
186   if (dumper_registrations_ignored_for_testing_)
187     return;
188 
189   // Only a handful of MDPs are required to compute the memory metrics. These
190   // have small enough performance overhead that it is reasonable to run them
191   // in the background while the user is doing other things. Those MDPs are
192   // 'allowed in background mode'.
193   bool allowed_in_background_mode = IsMemoryDumpProviderInAllowlist(name);
194 
195   scoped_refptr<MemoryDumpProviderInfo> mdpinfo = new MemoryDumpProviderInfo(
196       mdp, name, std::move(task_runner), options, allowed_in_background_mode);
197 
198   {
199     AutoLock lock(lock_);
200     bool already_registered = !dump_providers_.insert(mdpinfo).second;
201     // This actually happens in some tests which don't have a clean tear-down
202     // path for RenderThreadImpl::Init().
203     if (already_registered)
204       return;
205   }
206 }
207 
UnregisterDumpProvider(MemoryDumpProvider * mdp)208 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
209   UnregisterDumpProviderInternal(mdp, false /* delete_async */);
210 }
211 
UnregisterAndDeleteDumpProviderSoon(std::unique_ptr<MemoryDumpProvider> mdp)212 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
213     std::unique_ptr<MemoryDumpProvider> mdp) {
214   UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
215 }
216 
UnregisterDumpProviderInternal(MemoryDumpProvider * mdp,bool take_mdp_ownership_and_delete_async)217 void MemoryDumpManager::UnregisterDumpProviderInternal(
218     MemoryDumpProvider* mdp,
219     bool take_mdp_ownership_and_delete_async) {
220   std::unique_ptr<MemoryDumpProvider> owned_mdp;
221   if (take_mdp_ownership_and_delete_async)
222     owned_mdp.reset(mdp);
223 
224   AutoLock lock(lock_);
225 
226   auto mdp_iter = dump_providers_.begin();
227   for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
228     if ((*mdp_iter)->dump_provider == mdp)
229       break;
230   }
231 
232   if (mdp_iter == dump_providers_.end())
233     return;  // Not registered / already unregistered.
234 
235   if (take_mdp_ownership_and_delete_async) {
236     // The MDP will be deleted whenever the MDPInfo struct will, that is either:
237     // - At the end of this function, if no dump is in progress.
238     // - In ContinueAsyncProcessDump() when MDPInfo is removed from
239     //   |pending_dump_providers|.
240     DCHECK(!(*mdp_iter)->owned_dump_provider);
241     (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
242   } else {
243     // If you hit this DCHECK, your dump provider has a bug.
244     // Unregistration of a MemoryDumpProvider is safe only if:
245     // - The MDP has specified a sequenced task runner affinity AND the
246     //   unregistration happens on the same task runner. So that the MDP cannot
247     //   unregister and be in the middle of a OnMemoryDump() at the same time.
248     // - The MDP has NOT specified a task runner affinity and its ownership is
249     //   transferred via UnregisterAndDeleteDumpProviderSoon().
250     // In all the other cases, it is not possible to guarantee that the
251     // unregistration will not race with OnMemoryDump() calls.
252     DCHECK((*mdp_iter)->task_runner &&
253            (*mdp_iter)->task_runner->RunsTasksInCurrentSequence())
254         << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
255         << "unregister itself in a racy way. Please file a crbug.";
256   }
257 
258   // The MDPInfo instance can still be referenced by the
259   // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
260   // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
261   // to just skip it, without actually invoking the |mdp|, which might be
262   // destroyed by the caller soon after this method returns.
263   (*mdp_iter)->disabled = true;
264   dump_providers_.erase(mdp_iter);
265 }
266 
IsDumpProviderRegisteredForTesting(MemoryDumpProvider * provider)267 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
268     MemoryDumpProvider* provider) {
269   AutoLock lock(lock_);
270 
271   for (const auto& info : dump_providers_) {
272     if (info->dump_provider == provider)
273       return true;
274   }
275   return false;
276 }
277 
ResetForTesting()278 void MemoryDumpManager::ResetForTesting() {
279   AutoLock lock(lock_);
280   request_dump_function_.Reset();
281   dump_providers_.clear();
282 }
283 
284 scoped_refptr<SequencedTaskRunner>
GetDumpThreadTaskRunner()285 MemoryDumpManager::GetDumpThreadTaskRunner() {
286   base::AutoLock lock(lock_);
287   return GetOrCreateBgTaskRunnerLocked();
288 }
289 
290 scoped_refptr<base::SequencedTaskRunner>
GetOrCreateBgTaskRunnerLocked()291 MemoryDumpManager::GetOrCreateBgTaskRunnerLocked() {
292   if (dump_thread_)
293     return dump_thread_->task_runner();
294 
295   dump_thread_ = std::make_unique<Thread>("MemoryInfra");
296   bool started = dump_thread_->Start();
297   CHECK(started);
298 
299   return dump_thread_->task_runner();
300 }
301 
CreateProcessDump(const MemoryDumpRequestArgs & args,ProcessMemoryDumpCallback callback)302 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
303                                           ProcessMemoryDumpCallback callback) {
304   char guid_str[20];
305   snprintf(guid_str, std::size(guid_str), "0x%" PRIx64, args.dump_guid);
306   TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
307                                     TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
308                                     TRACE_STR_COPY(guid_str));
309 
310   // If argument filter is enabled then only background mode dumps should be
311   // allowed. In case the trace config passed for background tracing session
312   // missed the allowed modes argument, it crashes here instead of creating
313   // unexpected dumps.
314   if (TraceLog::GetInstance()
315           ->GetCurrentTraceConfig()
316           .IsArgumentFilterEnabled()) {
317     CHECK_EQ(MemoryDumpLevelOfDetail::kBackground, args.level_of_detail);
318   }
319 
320   std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
321   {
322     AutoLock lock(lock_);
323 
324     pmd_async_state = std::make_unique<ProcessMemoryDumpAsyncState>(
325         args, dump_providers_, std::move(callback),
326         GetOrCreateBgTaskRunnerLocked());
327   }
328 
329   // Start the process dump. This involves task runner hops as specified by the
330   // MemoryDumpProvider(s) in RegisterDumpProvider()).
331   ContinueAsyncProcessDump(pmd_async_state.release());
332 }
333 
334 // Invokes OnMemoryDump() on all MDPs that are next in the pending list and run
335 // on the current sequenced task runner. If the next MDP does not run in current
336 // sequenced task runner, then switches to that task runner and continues. All
337 // OnMemoryDump() invocations are linearized. |lock_| is used in these functions
338 // purely to ensure consistency w.r.t. (un)registrations of |dump_providers_|.
ContinueAsyncProcessDump(ProcessMemoryDumpAsyncState * owned_pmd_async_state)339 void MemoryDumpManager::ContinueAsyncProcessDump(
340     ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
341   HEAP_PROFILER_SCOPED_IGNORE;
342   // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
343   // in the PostTask below don't end up registering their own dump providers
344   // (for discounting trace memory overhead) while holding the |lock_|.
345   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
346 
347   // In theory |owned_pmd_async_state| should be a unique_ptr. The only reason
348   // why it isn't is because of the corner case logic of |did_post_task|
349   // above, which needs to take back the ownership of the |pmd_async_state| when
350   // the PostTask() fails.
351   // Unfortunately, PostTask() destroys the unique_ptr arguments upon failure
352   // to prevent accidental leaks. Using a unique_ptr would prevent us to to
353   // skip the hop and move on. Hence the manual naked -> unique ptr juggling.
354   auto pmd_async_state = WrapUnique(owned_pmd_async_state);
355   owned_pmd_async_state = nullptr;
356 
357   while (!pmd_async_state->pending_dump_providers.empty()) {
358     // Read MemoryDumpProviderInfo thread safety considerations in
359     // memory_dump_manager.h when accessing |mdpinfo| fields.
360     MemoryDumpProviderInfo* mdpinfo =
361         pmd_async_state->pending_dump_providers.back().get();
362 
363     // If we are in background mode, we should invoke only the allowed
364     // providers. Ignore other providers and continue.
365     if (pmd_async_state->req_args.level_of_detail ==
366             MemoryDumpLevelOfDetail::kBackground &&
367         !mdpinfo->allowed_in_background_mode) {
368       pmd_async_state->pending_dump_providers.pop_back();
369       continue;
370     }
371 
372     // If the dump provider did not specify a task runner affinity, dump on
373     // |dump_thread_|.
374     scoped_refptr<SequencedTaskRunner> task_runner = mdpinfo->task_runner;
375     if (!task_runner) {
376       DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
377       task_runner = pmd_async_state->dump_thread_task_runner;
378       DCHECK(task_runner);
379     }
380 
381     // If |RunsTasksInCurrentSequence()| is true then no PostTask is
382     // required since we are on the right SequencedTaskRunner.
383     if (task_runner->RunsTasksInCurrentSequence()) {
384       InvokeOnMemoryDump(mdpinfo, pmd_async_state->process_memory_dump.get());
385       pmd_async_state->pending_dump_providers.pop_back();
386       continue;
387     }
388 
389     bool did_post_task = task_runner->PostTask(
390         FROM_HERE,
391         BindOnce(&MemoryDumpManager::ContinueAsyncProcessDump, Unretained(this),
392                  Unretained(pmd_async_state.get())));
393 
394     if (did_post_task) {
395       // Ownership is transferred to the posted task.
396       std::ignore = pmd_async_state.release();
397       return;
398     }
399 
400     // PostTask usually fails only if the process or thread is shut down. So,
401     // the dump provider is disabled here. But, don't disable unbound dump
402     // providers, since the |dump_thread_| is controlled by MDM.
403     if (mdpinfo->task_runner) {
404       // A locked access is required to R/W |disabled| (for the
405       // UnregisterAndDeleteDumpProviderSoon() case).
406       AutoLock lock(lock_);
407       mdpinfo->disabled = true;
408     }
409 
410     // PostTask failed. Ignore the dump provider and continue.
411     pmd_async_state->pending_dump_providers.pop_back();
412   }
413 
414   FinishAsyncProcessDump(std::move(pmd_async_state));
415 }
416 
417 // This function is called on the right task runner for current MDP. It is
418 // either the task runner specified by MDP or |dump_thread_task_runner| if the
419 // MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
420 // (unless disabled).
InvokeOnMemoryDump(MemoryDumpProviderInfo * mdpinfo,ProcessMemoryDump * pmd)421 void MemoryDumpManager::InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
422                                            ProcessMemoryDump* pmd) {
423   HEAP_PROFILER_SCOPED_IGNORE;
424   DCHECK(!mdpinfo->task_runner ||
425          mdpinfo->task_runner->RunsTasksInCurrentSequence());
426 
427   TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
428                "dump_provider.name", mdpinfo->name);
429 
430   // Do not add any other TRACE_EVENT macro (or function that might have them)
431   // below this point. Under some rare circunstances, they can re-initialize
432   // and invalide the current ThreadLocalEventBuffer MDP, making the
433   // |should_dump| check below susceptible to TOCTTOU bugs
434   // (https://crbug.com/763365).
435 
436   bool is_thread_bound;
437   {
438     // A locked access is required to R/W |disabled| (for the
439     // UnregisterAndDeleteDumpProviderSoon() case).
440     AutoLock lock(lock_);
441 
442     // Unregister the dump provider if it failed too many times consecutively.
443     if (!mdpinfo->disabled &&
444         mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
445       mdpinfo->disabled = true;
446       DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
447                   << "\". Dump failed multiple times consecutively.";
448     }
449     if (mdpinfo->disabled)
450       return;
451 
452     is_thread_bound = mdpinfo->task_runner != nullptr;
453   }  // AutoLock lock(lock_);
454 
455   // Invoke the dump provider.
456 
457   // A stack allocated string with dump provider name is useful to debug
458   // crashes while invoking dump after a |dump_provider| is not unregistered
459   // in safe way.
460   char provider_name_for_debugging[16];
461   strncpy(provider_name_for_debugging, mdpinfo->name,
462           sizeof(provider_name_for_debugging) - 1);
463   provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
464   base::debug::Alias(provider_name_for_debugging);
465 
466   ABSL_ANNOTATE_BENIGN_RACE(&mdpinfo->disabled, "best-effort race detection");
467   CHECK(!is_thread_bound ||
468         !*(static_cast<volatile bool*>(&mdpinfo->disabled)));
469   bool dump_successful =
470       mdpinfo->dump_provider->OnMemoryDump(pmd->dump_args(), pmd);
471   mdpinfo->consecutive_failures =
472       dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
473 }
474 
FinishAsyncProcessDump(std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state)475 void MemoryDumpManager::FinishAsyncProcessDump(
476     std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
477   HEAP_PROFILER_SCOPED_IGNORE;
478   DCHECK(pmd_async_state->pending_dump_providers.empty());
479   const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
480   if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
481     scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
482         pmd_async_state->callback_task_runner;
483     callback_task_runner->PostTask(
484         FROM_HERE, BindOnce(&MemoryDumpManager::FinishAsyncProcessDump,
485                             Unretained(this), std::move(pmd_async_state)));
486     return;
487   }
488 
489   TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinishAsyncProcessDump");
490 
491   if (!pmd_async_state->callback.is_null()) {
492     std::move(pmd_async_state->callback)
493         .Run(true /* success */, dump_guid,
494              std::move(pmd_async_state->process_memory_dump));
495   }
496 
497   TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
498                                   TRACE_ID_LOCAL(dump_guid));
499 }
500 
SetupForTracing(const TraceConfig::MemoryDumpConfig & memory_dump_config)501 void MemoryDumpManager::SetupForTracing(
502     const TraceConfig::MemoryDumpConfig& memory_dump_config) {
503   AutoLock lock(lock_);
504 
505   // At this point we must have the ability to request global dumps.
506   DCHECK(can_request_global_dumps());
507 
508   MemoryDumpScheduler::Config periodic_config;
509   for (const auto& trigger : memory_dump_config.triggers) {
510     if (trigger.trigger_type == MemoryDumpType::kPeriodicInterval) {
511       if (periodic_config.triggers.empty()) {
512         periodic_config.callback =
513             BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
514                           MemoryDumpType::kPeriodicInterval);
515       }
516       periodic_config.triggers.push_back(
517           {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
518     }
519   }
520 
521   // Only coordinator process triggers periodic memory dumps.
522   if (is_coordinator_ && !periodic_config.triggers.empty()) {
523     MemoryDumpScheduler::GetInstance()->Start(periodic_config,
524                                               GetOrCreateBgTaskRunnerLocked());
525   }
526 }
527 
TeardownForTracing()528 void MemoryDumpManager::TeardownForTracing() {
529   // There might be a memory dump in progress while this happens. Therefore,
530   // ensure that the MDM state which depends on the tracing enabled / disabled
531   // state is always accessed by the dumping methods holding the |lock_|.
532   AutoLock lock(lock_);
533 
534   MemoryDumpScheduler::GetInstance()->Stop();
535 }
536 
ProcessMemoryDumpAsyncState(MemoryDumpRequestArgs req_args,const MemoryDumpProviderInfo::OrderedSet & dump_providers,ProcessMemoryDumpCallback callback,scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)537 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
538     MemoryDumpRequestArgs req_args,
539     const MemoryDumpProviderInfo::OrderedSet& dump_providers,
540     ProcessMemoryDumpCallback callback,
541     scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)
542     : req_args(req_args),
543       callback(std::move(callback)),
544       callback_task_runner(SingleThreadTaskRunner::GetCurrentDefault()),
545       dump_thread_task_runner(std::move(dump_thread_task_runner)) {
546   pending_dump_providers.reserve(dump_providers.size());
547   pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
548   MemoryDumpArgs args = {req_args.level_of_detail, req_args.determinism,
549                          req_args.dump_guid};
550   process_memory_dump = std::make_unique<ProcessMemoryDump>(args);
551 }
552 
553 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() =
554     default;
555 
556 }  // namespace trace_event
557 }  // namespace base
558