xref: /aosp_15_r20/external/libchrome/base/profiler/stack_sampling_profiler.cc (revision 635a864187cb8b6c713ff48b7e790a6b21769273)
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/profiler/stack_sampling_profiler.h"
6 
7 #include <algorithm>
8 #include <utility>
9 
10 #include "base/atomic_sequence_num.h"
11 #include "base/atomicops.h"
12 #include "base/bind.h"
13 #include "base/bind_helpers.h"
14 #include "base/callback.h"
15 #include "base/location.h"
16 #include "base/macros.h"
17 #include "base/memory/ptr_util.h"
18 #include "base/memory/singleton.h"
19 #include "base/profiler/native_stack_sampler.h"
20 #include "base/synchronization/lock.h"
21 #include "base/threading/thread.h"
22 #include "base/threading/thread_restrictions.h"
23 #include "base/threading/thread_task_runner_handle.h"
24 #include "base/timer/elapsed_timer.h"
25 
26 namespace base {
27 
28 const size_t kUnknownModuleIndex = static_cast<size_t>(-1);
29 
30 namespace {
31 
32 // This value is used to initialize the WaitableEvent object. This MUST BE set
33 // to MANUAL for correct operation of the IsSignaled() call in Start(). See the
34 // comment there for why.
35 constexpr WaitableEvent::ResetPolicy kResetPolicy =
36     WaitableEvent::ResetPolicy::MANUAL;
37 
38 // This value is used when there is no collection in progress and thus no ID
39 // for referencing the active collection to the SamplingThread.
40 const int kNullProfilerId = -1;
41 
42 }  // namespace
43 
44 // StackSamplingProfiler::Module ----------------------------------------------
45 
Module()46 StackSamplingProfiler::Module::Module() : base_address(0u) {}
47 
Module(uintptr_t base_address,const std::string & id,const FilePath & filename)48 StackSamplingProfiler::Module::Module(uintptr_t base_address,
49                                       const std::string& id,
50                                       const FilePath& filename)
51     : base_address(base_address), id(id), filename(filename) {}
52 
53 StackSamplingProfiler::Module::~Module() = default;
54 
55 // StackSamplingProfiler::InternalModule --------------------------------------
56 
InternalModule()57 StackSamplingProfiler::InternalModule::InternalModule() : is_valid(false) {}
58 
InternalModule(uintptr_t base_address,const std::string & id,const FilePath & filename)59 StackSamplingProfiler::InternalModule::InternalModule(uintptr_t base_address,
60                                                       const std::string& id,
61                                                       const FilePath& filename)
62     : base_address(base_address), id(id), filename(filename), is_valid(true) {}
63 
64 StackSamplingProfiler::InternalModule::~InternalModule() = default;
65 
66 // StackSamplingProfiler::Frame -----------------------------------------------
67 
Frame(uintptr_t instruction_pointer,size_t module_index)68 StackSamplingProfiler::Frame::Frame(uintptr_t instruction_pointer,
69                                     size_t module_index)
70     : instruction_pointer(instruction_pointer), module_index(module_index) {}
71 
72 StackSamplingProfiler::Frame::~Frame() = default;
73 
Frame()74 StackSamplingProfiler::Frame::Frame()
75     : instruction_pointer(0), module_index(kUnknownModuleIndex) {}
76 
77 // StackSamplingProfiler::InternalFrame -------------------------------------
78 
InternalFrame(uintptr_t instruction_pointer,InternalModule internal_module)79 StackSamplingProfiler::InternalFrame::InternalFrame(
80     uintptr_t instruction_pointer,
81     InternalModule internal_module)
82     : instruction_pointer(instruction_pointer),
83       internal_module(std::move(internal_module)) {}
84 
85 StackSamplingProfiler::InternalFrame::~InternalFrame() = default;
86 
87 // StackSamplingProfiler::Sample ----------------------------------------------
88 
89 StackSamplingProfiler::Sample::Sample() = default;
90 
91 StackSamplingProfiler::Sample::Sample(const Sample& sample) = default;
92 
93 StackSamplingProfiler::Sample::~Sample() = default;
94 
Sample(const Frame & frame)95 StackSamplingProfiler::Sample::Sample(const Frame& frame) {
96   frames.push_back(std::move(frame));
97 }
98 
Sample(const std::vector<Frame> & frames)99 StackSamplingProfiler::Sample::Sample(const std::vector<Frame>& frames)
100     : frames(frames) {}
101 
102 // StackSamplingProfiler::CallStackProfile ------------------------------------
103 
104 StackSamplingProfiler::CallStackProfile::CallStackProfile() = default;
105 
106 StackSamplingProfiler::CallStackProfile::CallStackProfile(
107     CallStackProfile&& other) = default;
108 
109 StackSamplingProfiler::CallStackProfile::~CallStackProfile() = default;
110 
111 StackSamplingProfiler::CallStackProfile&
112 StackSamplingProfiler::CallStackProfile::operator=(CallStackProfile&& other) =
113     default;
114 
115 StackSamplingProfiler::CallStackProfile
CopyForTesting() const116 StackSamplingProfiler::CallStackProfile::CopyForTesting() const {
117   return CallStackProfile(*this);
118 }
119 
120 StackSamplingProfiler::CallStackProfile::CallStackProfile(
121     const CallStackProfile& other) = default;
122 
123 // StackSamplingProfiler::SamplingThread --------------------------------------
124 
125 class StackSamplingProfiler::SamplingThread : public Thread {
126  public:
127   class TestAPI {
128    public:
129     // Reset the existing sampler. This will unfortunately create the object
130     // unnecessarily if it doesn't already exist but there's no way around that.
131     static void Reset();
132 
133     // Disables inherent idle-shutdown behavior.
134     static void DisableIdleShutdown();
135 
136     // Begins an idle shutdown as if the idle-timer had expired and wait for
137     // it to execute. Since the timer would have only been started at a time
138     // when the sampling thread actually was idle, this must be called only
139     // when it is known that there are no active sampling threads. If
140     // |simulate_intervening_add| is true then, when executed, the shutdown
141     // task will believe that a new collection has been added since it was
142     // posted.
143     static void ShutdownAssumingIdle(bool simulate_intervening_add);
144 
145    private:
146     // Calls the sampling threads ShutdownTask and then signals an event.
147     static void ShutdownTaskAndSignalEvent(SamplingThread* sampler,
148                                            int add_events,
149                                            WaitableEvent* event);
150   };
151 
152   struct CollectionContext {
CollectionContextbase::StackSamplingProfiler::SamplingThread::CollectionContext153     CollectionContext(PlatformThreadId target,
154                       const SamplingParams& params,
155                       WaitableEvent* finished,
156                       std::unique_ptr<NativeStackSampler> sampler,
157                       std::unique_ptr<ProfileBuilder> profile_builder)
158         : collection_id(next_collection_id.GetNext()),
159           target(target),
160           params(params),
161           finished(finished),
162           native_sampler(std::move(sampler)),
163           profile_builder(std::move(profile_builder)) {}
164     ~CollectionContext() = default;
165 
166     // An identifier for this collection, used to uniquely identify the
167     // collection to outside interests.
168     const int collection_id;
169 
170     const PlatformThreadId target;  // ID of The thread being sampled.
171     const SamplingParams params;    // Information about how to sample.
172     WaitableEvent* const finished;  // Signaled when all sampling complete.
173 
174     // Platform-specific module that does the actual sampling.
175     std::unique_ptr<NativeStackSampler> native_sampler;
176 
177     // Receives the sampling data and builds a CallStackProfile.
178     std::unique_ptr<ProfileBuilder> profile_builder;
179 
180     // The absolute time for the next sample.
181     Time next_sample_time;
182 
183     // The time that a profile was started, for calculating the total duration.
184     Time profile_start_time;
185 
186     // Counter that indicates the current sample position along the acquisition.
187     int sample_count = 0;
188 
189     // Sequence number for generating new collection ids.
190     static AtomicSequenceNumber next_collection_id;
191   };
192 
193   // Gets the single instance of this class.
194   static SamplingThread* GetInstance();
195 
196   // Adds a new CollectionContext to the thread. This can be called externally
197   // from any thread. This returns a collection id that can later be used to
198   // stop the sampling.
199   int Add(std::unique_ptr<CollectionContext> collection);
200 
201   // Removes an active collection based on its collection id, forcing it to run
202   // its callback if any data has been collected. This can be called externally
203   // from any thread.
204   void Remove(int collection_id);
205 
206  private:
207   friend class TestAPI;
208   friend struct DefaultSingletonTraits<SamplingThread>;
209 
210   // The different states in which the sampling-thread can be.
211   enum ThreadExecutionState {
212     // The thread is not running because it has never been started. It will be
213     // started when a sampling request is received.
214     NOT_STARTED,
215 
216     // The thread is running and processing tasks. This is the state when any
217     // sampling requests are active and during the "idle" period afterward
218     // before the thread is stopped.
219     RUNNING,
220 
221     // Once all sampling requests have finished and the "idle" period has
222     // expired, the thread will be set to this state and its shutdown
223     // initiated. A call to Stop() must be made to ensure the previous thread
224     // has completely exited before calling Start() and moving back to the
225     // RUNNING state.
226     EXITING,
227   };
228 
229   SamplingThread();
230   ~SamplingThread() override;
231 
232   // Get task runner that is usable from the outside.
233   scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunnerForAdd();
234   scoped_refptr<SingleThreadTaskRunner> GetTaskRunner(
235       ThreadExecutionState* out_state);
236 
237   // Get task runner that is usable from the sampling thread itself.
238   scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
239 
240   // Finishes a collection. The collection's |finished| waitable event will be
241   // signalled. The |collection| should already have been removed from
242   // |active_collections_| by the caller, as this is needed to avoid flakiness
243   // in unit tests.
244   void FinishCollection(CollectionContext* collection);
245 
246   // Check if the sampling thread is idle and begin a shutdown if it is.
247   void ScheduleShutdownIfIdle();
248 
249   // These methods are tasks that get posted to the internal message queue.
250   void AddCollectionTask(std::unique_ptr<CollectionContext> collection);
251   void RemoveCollectionTask(int collection_id);
252   void RecordSampleTask(int collection_id);
253   void ShutdownTask(int add_events);
254 
255   // Thread:
256   void CleanUp() override;
257 
258   // A stack-buffer used by the native sampler for its work. This buffer can
259   // be re-used for multiple native sampler objects so long as the API calls
260   // that take it are not called concurrently.
261   std::unique_ptr<NativeStackSampler::StackBuffer> stack_buffer_;
262 
263   // A map of collection ids to collection contexts. Because this class is a
264   // singleton that is never destroyed, context objects will never be destructed
265   // except by explicit action. Thus, it's acceptable to pass unretained
266   // pointers to these objects when posting tasks.
267   std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
268 
269   // State maintained about the current execution (or non-execution) of
270   // the thread. This state must always be accessed while holding the
271   // lock. A copy of the task-runner is maintained here for use by any
272   // calling thread; this is necessary because Thread's accessor for it is
273   // not itself thread-safe. The lock is also used to order calls to the
274   // Thread API (Start, Stop, StopSoon, & DetachFromSequence) so that
275   // multiple threads may make those calls.
276   Lock thread_execution_state_lock_;  // Protects all thread_execution_state_*
277   ThreadExecutionState thread_execution_state_ = NOT_STARTED;
278   scoped_refptr<SingleThreadTaskRunner> thread_execution_state_task_runner_;
279   bool thread_execution_state_disable_idle_shutdown_for_testing_ = false;
280 
281   // A counter that notes adds of new collection requests. It is incremented
282   // when changes occur so that delayed shutdown tasks are able to detect if
283   // something new has happened while it was waiting. Like all "execution_state"
284   // vars, this must be accessed while holding |thread_execution_state_lock_|.
285   int thread_execution_state_add_events_ = 0;
286 
287   DISALLOW_COPY_AND_ASSIGN(SamplingThread);
288 };
289 
290 // static
Reset()291 void StackSamplingProfiler::SamplingThread::TestAPI::Reset() {
292   SamplingThread* sampler = SamplingThread::GetInstance();
293 
294   ThreadExecutionState state;
295   {
296     AutoLock lock(sampler->thread_execution_state_lock_);
297     state = sampler->thread_execution_state_;
298     DCHECK(sampler->active_collections_.empty());
299   }
300 
301   // Stop the thread and wait for it to exit. This has to be done through by
302   // the thread itself because it has taken ownership of its own lifetime.
303   if (state == RUNNING) {
304     ShutdownAssumingIdle(false);
305     state = EXITING;
306   }
307   // Make sure thread is cleaned up since state will be reset to NOT_STARTED.
308   if (state == EXITING)
309     sampler->Stop();
310 
311   // Reset internal variables to the just-initialized state.
312   {
313     AutoLock lock(sampler->thread_execution_state_lock_);
314     sampler->thread_execution_state_ = NOT_STARTED;
315     sampler->thread_execution_state_task_runner_ = nullptr;
316     sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = false;
317     sampler->thread_execution_state_add_events_ = 0;
318   }
319 }
320 
321 // static
DisableIdleShutdown()322 void StackSamplingProfiler::SamplingThread::TestAPI::DisableIdleShutdown() {
323   SamplingThread* sampler = SamplingThread::GetInstance();
324 
325   {
326     AutoLock lock(sampler->thread_execution_state_lock_);
327     sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = true;
328   }
329 }
330 
331 // static
ShutdownAssumingIdle(bool simulate_intervening_add)332 void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownAssumingIdle(
333     bool simulate_intervening_add) {
334   SamplingThread* sampler = SamplingThread::GetInstance();
335 
336   ThreadExecutionState state;
337   scoped_refptr<SingleThreadTaskRunner> task_runner =
338       sampler->GetTaskRunner(&state);
339   DCHECK_EQ(RUNNING, state);
340   DCHECK(task_runner);
341 
342   int add_events;
343   {
344     AutoLock lock(sampler->thread_execution_state_lock_);
345     add_events = sampler->thread_execution_state_add_events_;
346     if (simulate_intervening_add)
347       ++sampler->thread_execution_state_add_events_;
348   }
349 
350   WaitableEvent executed(WaitableEvent::ResetPolicy::MANUAL,
351                          WaitableEvent::InitialState::NOT_SIGNALED);
352   // PostTaskAndReply won't work because thread and associated message-loop may
353   // be shut down.
354   task_runner->PostTask(
355       FROM_HERE, BindOnce(&ShutdownTaskAndSignalEvent, Unretained(sampler),
356                           add_events, Unretained(&executed)));
357   executed.Wait();
358 }
359 
360 // static
ShutdownTaskAndSignalEvent(SamplingThread * sampler,int add_events,WaitableEvent * event)361 void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownTaskAndSignalEvent(
362     SamplingThread* sampler,
363     int add_events,
364     WaitableEvent* event) {
365   sampler->ShutdownTask(add_events);
366   event->Signal();
367 }
368 
369 AtomicSequenceNumber StackSamplingProfiler::SamplingThread::CollectionContext::
370     next_collection_id;
371 
SamplingThread()372 StackSamplingProfiler::SamplingThread::SamplingThread()
373     : Thread("StackSamplingProfiler") {}
374 
375 StackSamplingProfiler::SamplingThread::~SamplingThread() = default;
376 
377 StackSamplingProfiler::SamplingThread*
GetInstance()378 StackSamplingProfiler::SamplingThread::GetInstance() {
379   return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get();
380 }
381 
Add(std::unique_ptr<CollectionContext> collection)382 int StackSamplingProfiler::SamplingThread::Add(
383     std::unique_ptr<CollectionContext> collection) {
384   // This is not to be run on the sampling thread.
385 
386   int collection_id = collection->collection_id;
387   scoped_refptr<SingleThreadTaskRunner> task_runner =
388       GetOrCreateTaskRunnerForAdd();
389 
390   task_runner->PostTask(
391       FROM_HERE, BindOnce(&SamplingThread::AddCollectionTask, Unretained(this),
392                           std::move(collection)));
393 
394   return collection_id;
395 }
396 
Remove(int collection_id)397 void StackSamplingProfiler::SamplingThread::Remove(int collection_id) {
398   // This is not to be run on the sampling thread.
399 
400   ThreadExecutionState state;
401   scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
402   if (state != RUNNING)
403     return;
404   DCHECK(task_runner);
405 
406   // This can fail if the thread were to exit between acquisition of the task
407   // runner above and the call below. In that case, however, everything has
408   // stopped so there's no need to try to stop it.
409   task_runner->PostTask(FROM_HERE,
410                         BindOnce(&SamplingThread::RemoveCollectionTask,
411                                  Unretained(this), collection_id));
412 }
413 
414 scoped_refptr<SingleThreadTaskRunner>
GetOrCreateTaskRunnerForAdd()415 StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunnerForAdd() {
416   AutoLock lock(thread_execution_state_lock_);
417 
418   // The increment of the "add events" count is why this method is to be only
419   // called from "add".
420   ++thread_execution_state_add_events_;
421 
422   if (thread_execution_state_ == RUNNING) {
423     DCHECK(thread_execution_state_task_runner_);
424     // This shouldn't be called from the sampling thread as it's inefficient.
425     // Use GetTaskRunnerOnSamplingThread() instead.
426     DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
427     return thread_execution_state_task_runner_;
428   }
429 
430   if (thread_execution_state_ == EXITING) {
431     // StopSoon() was previously called to shut down the thread
432     // asynchonously. Stop() must now be called before calling Start() again to
433     // reset the thread state.
434     //
435     // We must allow blocking here to satisfy the Thread implementation, but in
436     // practice the Stop() call is unlikely to actually block. For this to
437     // happen a new profiling request would have to be made within the narrow
438     // window between StopSoon() and thread exit following the end of the 60
439     // second idle period.
440     ScopedAllowBlocking allow_blocking;
441     Stop();
442   }
443 
444   DCHECK(!stack_buffer_);
445   stack_buffer_ = NativeStackSampler::CreateStackBuffer();
446 
447   // The thread is not running. Start it and get associated runner. The task-
448   // runner has to be saved for future use because though it can be used from
449   // any thread, it can be acquired via task_runner() only on the created
450   // thread and the thread that creates it (i.e. this thread) for thread-safety
451   // reasons which are alleviated in SamplingThread by gating access to it with
452   // the |thread_execution_state_lock_|.
453   Start();
454   thread_execution_state_ = RUNNING;
455   thread_execution_state_task_runner_ = Thread::task_runner();
456 
457   // Detach the sampling thread from the "sequence" (i.e. thread) that
458   // started it so that it can be self-managed or stopped by another thread.
459   DetachFromSequence();
460 
461   return thread_execution_state_task_runner_;
462 }
463 
464 scoped_refptr<SingleThreadTaskRunner>
GetTaskRunner(ThreadExecutionState * out_state)465 StackSamplingProfiler::SamplingThread::GetTaskRunner(
466     ThreadExecutionState* out_state) {
467   AutoLock lock(thread_execution_state_lock_);
468   if (out_state)
469     *out_state = thread_execution_state_;
470   if (thread_execution_state_ == RUNNING) {
471     // This shouldn't be called from the sampling thread as it's inefficient.
472     // Use GetTaskRunnerOnSamplingThread() instead.
473     DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
474     DCHECK(thread_execution_state_task_runner_);
475   } else {
476     DCHECK(!thread_execution_state_task_runner_);
477   }
478 
479   return thread_execution_state_task_runner_;
480 }
481 
482 scoped_refptr<SingleThreadTaskRunner>
GetTaskRunnerOnSamplingThread()483 StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
484   // This should be called only from the sampling thread as it has limited
485   // accessibility.
486   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
487 
488   return Thread::task_runner();
489 }
490 
FinishCollection(CollectionContext * collection)491 void StackSamplingProfiler::SamplingThread::FinishCollection(
492     CollectionContext* collection) {
493   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
494   DCHECK_EQ(0u, active_collections_.count(collection->collection_id));
495 
496   TimeDelta profile_duration = Time::Now() - collection->profile_start_time +
497                                collection->params.sampling_interval;
498 
499   collection->profile_builder->OnProfileCompleted(
500       profile_duration, collection->params.sampling_interval);
501 
502   // Signal that this collection is finished.
503   collection->finished->Signal();
504 
505   ScheduleShutdownIfIdle();
506 }
507 
ScheduleShutdownIfIdle()508 void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() {
509   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
510 
511   if (!active_collections_.empty())
512     return;
513 
514   int add_events;
515   {
516     AutoLock lock(thread_execution_state_lock_);
517     if (thread_execution_state_disable_idle_shutdown_for_testing_)
518       return;
519     add_events = thread_execution_state_add_events_;
520   }
521 
522   GetTaskRunnerOnSamplingThread()->PostDelayedTask(
523       FROM_HERE,
524       BindOnce(&SamplingThread::ShutdownTask, Unretained(this), add_events),
525       TimeDelta::FromSeconds(60));
526 }
527 
AddCollectionTask(std::unique_ptr<CollectionContext> collection)528 void StackSamplingProfiler::SamplingThread::AddCollectionTask(
529     std::unique_ptr<CollectionContext> collection) {
530   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
531 
532   const int collection_id = collection->collection_id;
533   const TimeDelta initial_delay = collection->params.initial_delay;
534 
535   active_collections_.insert(
536       std::make_pair(collection_id, std::move(collection)));
537 
538   GetTaskRunnerOnSamplingThread()->PostDelayedTask(
539       FROM_HERE,
540       BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
541                collection_id),
542       initial_delay);
543 
544   // Another increment of "add events" serves to invalidate any pending
545   // shutdown tasks that may have been initiated between the Add() and this
546   // task running.
547   {
548     AutoLock lock(thread_execution_state_lock_);
549     ++thread_execution_state_add_events_;
550   }
551 }
552 
RemoveCollectionTask(int collection_id)553 void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(
554     int collection_id) {
555   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
556 
557   auto found = active_collections_.find(collection_id);
558   if (found == active_collections_.end())
559     return;
560 
561   // Remove |collection| from |active_collections_|.
562   std::unique_ptr<CollectionContext> collection = std::move(found->second);
563   size_t count = active_collections_.erase(collection_id);
564   DCHECK_EQ(1U, count);
565 
566   FinishCollection(collection.get());
567 }
568 
RecordSampleTask(int collection_id)569 void StackSamplingProfiler::SamplingThread::RecordSampleTask(
570     int collection_id) {
571   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
572 
573   auto found = active_collections_.find(collection_id);
574 
575   // The task won't be found if it has been stopped.
576   if (found == active_collections_.end())
577     return;
578 
579   CollectionContext* collection = found->second.get();
580 
581   // If this is the first sample, the collection params need to be filled.
582   if (collection->sample_count == 0) {
583     collection->profile_start_time = Time::Now();
584     collection->next_sample_time = Time::Now();
585     collection->native_sampler->ProfileRecordingStarting();
586   }
587 
588   // Record a single sample.
589   collection->profile_builder->OnSampleCompleted(
590       collection->native_sampler->RecordStackFrames(
591           stack_buffer_.get(), collection->profile_builder.get()));
592 
593   // Schedule the next sample recording if there is one.
594   if (++collection->sample_count < collection->params.samples_per_profile) {
595     // This will keep a consistent average interval between samples but will
596     // result in constant series of acquisitions, thus nearly locking out the
597     // target thread, if the interval is smaller than the time it takes to
598     // actually acquire the sample. Anything sampling that quickly is going
599     // to be a problem anyway so don't worry about it.
600     collection->next_sample_time += collection->params.sampling_interval;
601     bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
602         FROM_HERE,
603         BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
604                  collection_id),
605         std::max(collection->next_sample_time - Time::Now(), TimeDelta()));
606     DCHECK(success);
607     return;
608   }
609 
610   // Take ownership of |collection| and remove it from the map.
611   std::unique_ptr<CollectionContext> owned_collection =
612       std::move(found->second);
613   size_t count = active_collections_.erase(collection_id);
614   DCHECK_EQ(1U, count);
615 
616   // All capturing has completed so finish the collection.
617   FinishCollection(collection);
618 }
619 
ShutdownTask(int add_events)620 void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
621   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
622 
623   // Holding this lock ensures that any attempt to start another job will
624   // get postponed until |thread_execution_state_| is updated, thus eliminating
625   // the race in starting a new thread while the previous one is exiting.
626   AutoLock lock(thread_execution_state_lock_);
627 
628   // If the current count of creation requests doesn't match the passed count
629   // then other tasks have been created since this was posted. Abort shutdown.
630   if (thread_execution_state_add_events_ != add_events)
631     return;
632 
633   // There can be no new AddCollectionTasks at this point because creating
634   // those always increments "add events". There may be other requests, like
635   // Remove, but it's okay to schedule the thread to stop once they've been
636   // executed (i.e. "soon").
637   DCHECK(active_collections_.empty());
638   StopSoon();
639 
640   // StopSoon will have set the owning sequence (again) so it must be detached
641   // (again) in order for Stop/Start to be called (again) should more work
642   // come in. Holding the |thread_execution_state_lock_| ensures the necessary
643   // happens-after with regard to this detach and future Thread API calls.
644   DetachFromSequence();
645 
646   // Set the thread_state variable so the thread will be restarted when new
647   // work comes in. Remove the |thread_execution_state_task_runner_| to avoid
648   // confusion.
649   thread_execution_state_ = EXITING;
650   thread_execution_state_task_runner_ = nullptr;
651   stack_buffer_.reset();
652 }
653 
CleanUp()654 void StackSamplingProfiler::SamplingThread::CleanUp() {
655   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
656 
657   // There should be no collections remaining when the thread stops.
658   DCHECK(active_collections_.empty());
659 
660   // Let the parent clean up.
661   Thread::CleanUp();
662 }
663 
664 // StackSamplingProfiler ------------------------------------------------------
665 
666 // static
Reset()667 void StackSamplingProfiler::TestAPI::Reset() {
668   SamplingThread::TestAPI::Reset();
669 }
670 
671 // static
IsSamplingThreadRunning()672 bool StackSamplingProfiler::TestAPI::IsSamplingThreadRunning() {
673   return SamplingThread::GetInstance()->IsRunning();
674 }
675 
676 // static
DisableIdleShutdown()677 void StackSamplingProfiler::TestAPI::DisableIdleShutdown() {
678   SamplingThread::TestAPI::DisableIdleShutdown();
679 }
680 
681 // static
PerformSamplingThreadIdleShutdown(bool simulate_intervening_start)682 void StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(
683     bool simulate_intervening_start) {
684   SamplingThread::TestAPI::ShutdownAssumingIdle(simulate_intervening_start);
685 }
686 
StackSamplingProfiler(const SamplingParams & params,std::unique_ptr<ProfileBuilder> profile_builder,NativeStackSamplerTestDelegate * test_delegate)687 StackSamplingProfiler::StackSamplingProfiler(
688     const SamplingParams& params,
689     std::unique_ptr<ProfileBuilder> profile_builder,
690     NativeStackSamplerTestDelegate* test_delegate)
691     : StackSamplingProfiler(PlatformThread::CurrentId(),
692                             params,
693                             std::move(profile_builder),
694                             test_delegate) {}
695 
StackSamplingProfiler(PlatformThreadId thread_id,const SamplingParams & params,std::unique_ptr<ProfileBuilder> profile_builder,NativeStackSamplerTestDelegate * test_delegate)696 StackSamplingProfiler::StackSamplingProfiler(
697     PlatformThreadId thread_id,
698     const SamplingParams& params,
699     std::unique_ptr<ProfileBuilder> profile_builder,
700     NativeStackSamplerTestDelegate* test_delegate)
701     : thread_id_(thread_id),
702       params_(params),
703       profile_builder_(std::move(profile_builder)),
704       // The event starts "signaled" so code knows it's safe to start thread
705       // and "manual" so that it can be waited in multiple places.
706       profiling_inactive_(kResetPolicy, WaitableEvent::InitialState::SIGNALED),
707       profiler_id_(kNullProfilerId),
708       test_delegate_(test_delegate) {
709   DCHECK(profile_builder_);
710 }
711 
~StackSamplingProfiler()712 StackSamplingProfiler::~StackSamplingProfiler() {
713   // Stop returns immediately but the shutdown runs asynchronously. There is a
714   // non-zero probability that one more sample will be taken after this call
715   // returns.
716   Stop();
717 
718   // The behavior of sampling a thread that has exited is undefined and could
719   // cause Bad Things(tm) to occur. The safety model provided by this class is
720   // that an instance of this object is expected to live at least as long as
721   // the thread it is sampling. However, because the sampling is performed
722   // asynchronously by the SamplingThread, there is no way to guarantee this
723   // is true without waiting for it to signal that it has finished.
724   //
725   // The wait time should, at most, be only as long as it takes to collect one
726   // sample (~200us) or none at all if sampling has already completed.
727   ThreadRestrictions::ScopedAllowWait allow_wait;
728   profiling_inactive_.Wait();
729 }
730 
Start()731 void StackSamplingProfiler::Start() {
732   // Multiple calls to Start() for a single StackSamplingProfiler object is not
733   // allowed. If profile_builder_ is nullptr, then Start() has been called
734   // already.
735   DCHECK(profile_builder_);
736 
737   std::unique_ptr<NativeStackSampler> native_sampler =
738       NativeStackSampler::Create(thread_id_, test_delegate_);
739 
740   if (!native_sampler)
741     return;
742 
743   // The IsSignaled() check below requires that the WaitableEvent be manually
744   // reset, to avoid signaling the event in IsSignaled() itself.
745   static_assert(kResetPolicy == WaitableEvent::ResetPolicy::MANUAL,
746                 "The reset policy must be set to MANUAL");
747 
748   // If a previous profiling phase is still winding down, wait for it to
749   // complete. We can't use task posting for this coordination because the
750   // thread owning the profiler may not have a message loop.
751   if (!profiling_inactive_.IsSignaled())
752     profiling_inactive_.Wait();
753   profiling_inactive_.Reset();
754 
755   DCHECK_EQ(kNullProfilerId, profiler_id_);
756   profiler_id_ = SamplingThread::GetInstance()->Add(
757       std::make_unique<SamplingThread::CollectionContext>(
758           thread_id_, params_, &profiling_inactive_, std::move(native_sampler),
759           std::move(profile_builder_)));
760   DCHECK_NE(kNullProfilerId, profiler_id_);
761 }
762 
Stop()763 void StackSamplingProfiler::Stop() {
764   SamplingThread::GetInstance()->Remove(profiler_id_);
765   profiler_id_ = kNullProfilerId;
766 }
767 
768 // StackSamplingProfiler::Frame global functions ------------------------------
769 
operator ==(const StackSamplingProfiler::Module & a,const StackSamplingProfiler::Module & b)770 bool operator==(const StackSamplingProfiler::Module& a,
771                 const StackSamplingProfiler::Module& b) {
772   return a.base_address == b.base_address && a.id == b.id &&
773          a.filename == b.filename;
774 }
775 
operator ==(const StackSamplingProfiler::Sample & a,const StackSamplingProfiler::Sample & b)776 bool operator==(const StackSamplingProfiler::Sample& a,
777                 const StackSamplingProfiler::Sample& b) {
778   return a.process_milestones == b.process_milestones && a.frames == b.frames;
779 }
780 
operator !=(const StackSamplingProfiler::Sample & a,const StackSamplingProfiler::Sample & b)781 bool operator!=(const StackSamplingProfiler::Sample& a,
782                 const StackSamplingProfiler::Sample& b) {
783   return !(a == b);
784 }
785 
operator <(const StackSamplingProfiler::Sample & a,const StackSamplingProfiler::Sample & b)786 bool operator<(const StackSamplingProfiler::Sample& a,
787                const StackSamplingProfiler::Sample& b) {
788   if (a.process_milestones != b.process_milestones)
789     return a.process_milestones < b.process_milestones;
790 
791   return a.frames < b.frames;
792 }
793 
operator ==(const StackSamplingProfiler::Frame & a,const StackSamplingProfiler::Frame & b)794 bool operator==(const StackSamplingProfiler::Frame& a,
795                 const StackSamplingProfiler::Frame& b) {
796   return a.instruction_pointer == b.instruction_pointer &&
797          a.module_index == b.module_index;
798 }
799 
operator <(const StackSamplingProfiler::Frame & a,const StackSamplingProfiler::Frame & b)800 bool operator<(const StackSamplingProfiler::Frame& a,
801                const StackSamplingProfiler::Frame& b) {
802   if (a.module_index != b.module_index)
803     return a.module_index < b.module_index;
804 
805   return a.instruction_pointer < b.instruction_pointer;
806 }
807 
808 }  // namespace base
809