1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/task/sequence_manager/sequence_manager_impl.h"
6
7 #include <array>
8 #include <atomic>
9 #include <optional>
10 #include <queue>
11 #include <string_view>
12 #include <vector>
13
14 #include "base/callback_list.h"
15 #include "base/compiler_specific.h"
16 #include "base/debug/crash_logging.h"
17 #include "base/debug/stack_trace.h"
18 #include "base/functional/bind.h"
19 #include "base/functional/callback.h"
20 #include "base/functional/callback_helpers.h"
21 #include "base/json/json_writer.h"
22 #include "base/logging.h"
23 #include "base/memory/ptr_util.h"
24 #include "base/notreached.h"
25 #include "base/observer_list.h"
26 #include "base/rand_util.h"
27 #include "base/ranges/algorithm.h"
28 #include "base/task/sequence_manager/enqueue_order.h"
29 #include "base/task/sequence_manager/task_queue_impl.h"
30 #include "base/task/sequence_manager/task_time_observer.h"
31 #include "base/task/sequence_manager/thread_controller_impl.h"
32 #include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
33 #include "base/task/sequence_manager/time_domain.h"
34 #include "base/task/sequence_manager/wake_up_queue.h"
35 #include "base/task/sequence_manager/work_queue.h"
36 #include "base/task/sequence_manager/work_queue_sets.h"
37 #include "base/task/task_features.h"
38 #include "base/threading/thread_id_name_manager.h"
39 #include "base/time/default_tick_clock.h"
40 #include "base/time/tick_clock.h"
41 #include "base/trace_event/base_tracing.h"
42 #include "build/build_config.h"
43 #include "third_party/abseil-cpp/absl/base/attributes.h"
44
45 namespace base {
46 namespace sequence_manager {
47 namespace {
48
49 // Whether SequenceManagerImpl records crash keys. Enable via Finch when needed
50 // for an investigation. Disabled by default to avoid unnecessary overhead.
51 BASE_FEATURE(kRecordSequenceManagerCrashKeys,
52 "RecordSequenceManagerCrashKeys",
53 base::FEATURE_DISABLED_BY_DEFAULT);
54
55 ABSL_CONST_INIT thread_local internal::SequenceManagerImpl*
56 thread_local_sequence_manager = nullptr;
57
58 class TracedBaseValue : public trace_event::ConvertableToTraceFormat {
59 public:
TracedBaseValue(Value value)60 explicit TracedBaseValue(Value value) : value_(std::move(value)) {}
61 ~TracedBaseValue() override = default;
62
AppendAsTraceFormat(std::string * out) const63 void AppendAsTraceFormat(std::string* out) const override {
64 if (!value_.is_none()) {
65 std::string tmp;
66 JSONWriter::Write(value_, &tmp);
67 *out += tmp;
68 } else {
69 *out += "{}";
70 }
71 }
72
73 private:
74 base::Value value_;
75 };
76
77 } // namespace
78
CreateSequenceManagerOnCurrentThread(SequenceManager::Settings settings)79 std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread(
80 SequenceManager::Settings settings) {
81 return internal::SequenceManagerImpl::CreateOnCurrentThread(
82 std::move(settings));
83 }
84
CreateSequenceManagerOnCurrentThreadWithPump(std::unique_ptr<MessagePump> message_pump,SequenceManager::Settings settings)85 std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThreadWithPump(
86 std::unique_ptr<MessagePump> message_pump,
87 SequenceManager::Settings settings) {
88 std::unique_ptr<SequenceManager> manager =
89 internal::SequenceManagerImpl::CreateUnbound(std::move(settings));
90 manager->BindToMessagePump(std::move(message_pump));
91 return manager;
92 }
93
CreateUnboundSequenceManager(SequenceManager::Settings settings)94 std::unique_ptr<SequenceManager> CreateUnboundSequenceManager(
95 SequenceManager::Settings settings) {
96 return internal::SequenceManagerImpl::CreateUnbound(std::move(settings));
97 }
98
99 namespace internal {
100
CreateUnboundSequenceManagerImpl(PassKey<base::internal::SequenceManagerThreadDelegate>,SequenceManager::Settings settings)101 std::unique_ptr<SequenceManagerImpl> CreateUnboundSequenceManagerImpl(
102 PassKey<base::internal::SequenceManagerThreadDelegate>,
103 SequenceManager::Settings settings) {
104 return SequenceManagerImpl::CreateUnbound(std::move(settings));
105 }
106
107 using TimeRecordingPolicy =
108 base::sequence_manager::TaskQueue::TaskTiming::TimeRecordingPolicy;
109
110 namespace {
111
112 constexpr TimeDelta kLongTaskTraceEventThreshold = Milliseconds(50);
113 // Proportion of tasks which will record thread time for metrics.
114 const double kTaskSamplingRateForRecordingCPUTime = 0.01;
115 // Proprortion of SequenceManagers which will record thread time for each task,
116 // enabling advanced metrics.
117 const double kThreadSamplingRateForRecordingCPUTime = 0.0001;
118
ReclaimMemoryFromQueue(internal::TaskQueueImpl * queue,LazyNow * lazy_now)119 void ReclaimMemoryFromQueue(internal::TaskQueueImpl* queue, LazyNow* lazy_now) {
120 queue->ReclaimMemory(lazy_now->Now());
121 // If the queue was shut down as a side-effect of reclaiming memory, |queue|
122 // will still be valid but the work queues will have been removed by
123 // TaskQueueImpl::UnregisterTaskQueue.
124 if (queue->delayed_work_queue()) {
125 queue->delayed_work_queue()->RemoveAllCanceledTasksFromFront();
126 queue->immediate_work_queue()->RemoveAllCanceledTasksFromFront();
127 }
128 }
129
InitializeMetricRecordingSettings(bool randomised_sampling_enabled)130 SequenceManager::MetricRecordingSettings InitializeMetricRecordingSettings(
131 bool randomised_sampling_enabled) {
132 if (!randomised_sampling_enabled)
133 return SequenceManager::MetricRecordingSettings(0);
134 bool records_cpu_time_for_each_task =
135 base::RandDouble() < kThreadSamplingRateForRecordingCPUTime;
136 return SequenceManager::MetricRecordingSettings(
137 records_cpu_time_for_each_task ? 1
138 : kTaskSamplingRateForRecordingCPUTime);
139 }
140
141 // Writes |address| in hexadecimal ("0x11223344") form starting from |output|
142 // and moving backwards in memory. Returns a pointer to the first digit of the
143 // result. Does *not* NUL-terminate the number.
144 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
PrependHexAddress(char * output,const void * address)145 char* PrependHexAddress(char* output, const void* address) {
146 uintptr_t value = reinterpret_cast<uintptr_t>(address);
147 static const char kHexChars[] = "0123456789ABCDEF";
148 do {
149 *output-- = kHexChars[value % 16];
150 value /= 16;
151 } while (value);
152 *output-- = 'x';
153 *output = '0';
154 return output;
155 }
156 #endif // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
157
158 // Atomic to avoid TSAN flags when a test tries to access the value before the
159 // feature list is available.
160 std::atomic_bool g_record_crash_keys = false;
161
162 #if BUILDFLAG(IS_WIN)
163 bool g_explicit_high_resolution_timer_win = true;
164 #endif // BUILDFLAG(IS_WIN)
165
166 } // namespace
167
168 // static
GetCurrent()169 SequenceManagerImpl* SequenceManagerImpl::GetCurrent() {
170 // Workaround false-positive MSAN use-of-uninitialized-value on
171 // thread_local storage for loaded libraries:
172 // https://github.com/google/sanitizers/issues/1265
173 MSAN_UNPOISON(&thread_local_sequence_manager, sizeof(SequenceManagerImpl*));
174
175 return thread_local_sequence_manager;
176 }
177
SequenceManagerImpl(std::unique_ptr<internal::ThreadController> controller,SequenceManager::Settings settings)178 SequenceManagerImpl::SequenceManagerImpl(
179 std::unique_ptr<internal::ThreadController> controller,
180 SequenceManager::Settings settings)
181 : associated_thread_(controller->GetAssociatedThread()),
182 controller_(std::move(controller)),
183 settings_(std::move(settings)),
184 metric_recording_settings_(InitializeMetricRecordingSettings(
185 settings_.randomised_sampling_enabled)),
186 add_queue_time_to_tasks_(settings_.add_queue_time_to_tasks),
187
188 empty_queues_to_reload_(associated_thread_),
189 main_thread_only_(this, associated_thread_, settings_, settings_.clock),
190 clock_(settings_.clock) {
191 TRACE_EVENT_OBJECT_CREATED_WITH_ID(
192 TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
193 main_thread_only().selector.SetTaskQueueSelectorObserver(this);
194
195 main_thread_only().next_time_to_reclaim_memory =
196 main_thread_clock()->NowTicks() + kReclaimMemoryInterval;
197
198 controller_->SetSequencedTaskSource(this);
199 }
200
~SequenceManagerImpl()201 SequenceManagerImpl::~SequenceManagerImpl() {
202 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
203 TRACE_EVENT_OBJECT_DELETED_WITH_ID(
204 TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
205
206 #if BUILDFLAG(IS_IOS)
207 if (settings_.message_loop_type == MessagePumpType::UI &&
208 associated_thread_->IsBound()) {
209 controller_->DetachFromMessagePump();
210 }
211 #endif
212
213 // Make sure no Task is running as given that RunLoop does not support the
214 // Delegate being destroyed from a Task and
215 // ThreadControllerWithMessagePumpImpl does not support being destroyed from a
216 // Task. If we are using a ThreadControllerImpl (i.e. no pump) destruction is
217 // fine
218 DCHECK(!controller_->GetBoundMessagePump() ||
219 main_thread_only().task_execution_stack.empty());
220
221 for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
222 main_thread_only().selector.RemoveQueue(queue);
223 queue->UnregisterTaskQueue();
224 }
225
226 // TODO(altimin): restore default task runner automatically when
227 // ThreadController is destroyed.
228 controller_->RestoreDefaultTaskRunner();
229
230 main_thread_only().active_queues.clear();
231 main_thread_only().selector.SetTaskQueueSelectorObserver(nullptr);
232
233 // In the case of an early startup exits or in some tests a NestingObserver
234 // may not have been registered.
235 if (main_thread_only().nesting_observer_registered_)
236 controller_->RemoveNestingObserver(this);
237
238 // Let interested parties have one last shot at accessing this.
239 for (auto& observer : main_thread_only().destruction_observers)
240 observer.WillDestroyCurrentMessageLoop();
241
242 // OK, now make it so that no one can find us.
243 if (GetMessagePump()) {
244 DCHECK_EQ(this, GetCurrent());
245 thread_local_sequence_manager = nullptr;
246 }
247 }
248
MainThreadOnly(SequenceManagerImpl * sequence_manager,const scoped_refptr<AssociatedThreadId> & associated_thread,const SequenceManager::Settings & settings,const base::TickClock * clock)249 SequenceManagerImpl::MainThreadOnly::MainThreadOnly(
250 SequenceManagerImpl* sequence_manager,
251 const scoped_refptr<AssociatedThreadId>& associated_thread,
252 const SequenceManager::Settings& settings,
253 const base::TickClock* clock)
254 : selector(associated_thread, settings),
255 default_clock(clock),
256 time_domain(nullptr),
257 wake_up_queue(std::make_unique<DefaultWakeUpQueue>(associated_thread,
258 sequence_manager)),
259 non_waking_wake_up_queue(
260 std::make_unique<NonWakingWakeUpQueue>(associated_thread)) {
261 if (settings.randomised_sampling_enabled) {
262 metrics_subsampler = base::MetricsSubSampler();
263 }
264 }
265
266 SequenceManagerImpl::MainThreadOnly::~MainThreadOnly() = default;
267
268 // static
269 std::unique_ptr<ThreadControllerImpl>
CreateThreadControllerImplForCurrentThread(const TickClock * clock)270 SequenceManagerImpl::CreateThreadControllerImplForCurrentThread(
271 const TickClock* clock) {
272 return ThreadControllerImpl::Create(GetCurrent(), clock);
273 }
274
275 // static
CreateOnCurrentThread(SequenceManager::Settings settings)276 std::unique_ptr<SequenceManagerImpl> SequenceManagerImpl::CreateOnCurrentThread(
277 SequenceManager::Settings settings) {
278 auto thread_controller =
279 CreateThreadControllerImplForCurrentThread(settings.clock);
280 std::unique_ptr<SequenceManagerImpl> manager(new SequenceManagerImpl(
281 std::move(thread_controller), std::move(settings)));
282 manager->BindToCurrentThread();
283 return manager;
284 }
285
286 // static
CreateUnbound(SequenceManager::Settings settings)287 std::unique_ptr<SequenceManagerImpl> SequenceManagerImpl::CreateUnbound(
288 SequenceManager::Settings settings) {
289 auto thread_controller =
290 ThreadControllerWithMessagePumpImpl::CreateUnbound(settings);
291 return WrapUnique(new SequenceManagerImpl(std::move(thread_controller),
292 std::move(settings)));
293 }
294
295 // static
InitializeFeatures()296 void SequenceManagerImpl::InitializeFeatures() {
297 TaskQueueImpl::InitializeFeatures();
298 MessagePump::InitializeFeatures();
299 ThreadControllerWithMessagePumpImpl::InitializeFeatures();
300 #if BUILDFLAG(IS_WIN)
301 g_explicit_high_resolution_timer_win =
302 FeatureList::IsEnabled(kExplicitHighResolutionTimerWin);
303 #endif // BUILDFLAG(IS_WIN)
304
305 g_record_crash_keys.store(
306 FeatureList::IsEnabled(kRecordSequenceManagerCrashKeys),
307 std::memory_order_relaxed);
308 TaskQueueSelector::InitializeFeatures();
309 }
310
BindToMessagePump(std::unique_ptr<MessagePump> pump)311 void SequenceManagerImpl::BindToMessagePump(std::unique_ptr<MessagePump> pump) {
312 controller_->BindToCurrentThread(std::move(pump));
313 CompleteInitializationOnBoundThread();
314
315 // On Android attach to the native loop when there is one.
316 #if BUILDFLAG(IS_ANDROID)
317 if (settings_.message_loop_type == MessagePumpType::UI ||
318 settings_.message_loop_type == MessagePumpType::JAVA) {
319 controller_->AttachToMessagePump();
320 }
321 #endif
322
323 // On iOS attach to the native loop when there is one.
324 #if BUILDFLAG(IS_IOS)
325 if (settings_.message_loop_type == MessagePumpType::UI) {
326 controller_->AttachToMessagePump();
327 }
328 #endif
329 }
330
BindToCurrentThread()331 void SequenceManagerImpl::BindToCurrentThread() {
332 associated_thread_->BindToCurrentThread();
333 CompleteInitializationOnBoundThread();
334 }
335
336 scoped_refptr<SequencedTaskRunner>
GetTaskRunnerForCurrentTask()337 SequenceManagerImpl::GetTaskRunnerForCurrentTask() {
338 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
339 if (main_thread_only().task_execution_stack.empty())
340 return nullptr;
341 return main_thread_only()
342 .task_execution_stack.back()
343 .pending_task.task_runner;
344 }
345
CompleteInitializationOnBoundThread()346 void SequenceManagerImpl::CompleteInitializationOnBoundThread() {
347 controller_->AddNestingObserver(this);
348 main_thread_only().nesting_observer_registered_ = true;
349 if (GetMessagePump()) {
350 DCHECK(!GetCurrent())
351 << "Can't register a second SequenceManagerImpl on the same thread.";
352 thread_local_sequence_manager = this;
353 }
354 for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
355 queue->CompleteInitializationOnBoundThread();
356 }
357 }
358
SetTimeDomain(TimeDomain * time_domain)359 void SequenceManagerImpl::SetTimeDomain(TimeDomain* time_domain) {
360 DCHECK(!main_thread_only().time_domain);
361 DCHECK(time_domain);
362 time_domain->OnAssignedToSequenceManager(this);
363 controller_->SetTickClock(time_domain);
364 main_thread_only().time_domain = time_domain;
365 clock_.store(time_domain, std::memory_order_release);
366 }
367
ResetTimeDomain()368 void SequenceManagerImpl::ResetTimeDomain() {
369 controller_->SetTickClock(main_thread_only().default_clock);
370 clock_.store(main_thread_only().default_clock.get(),
371 std::memory_order_release);
372 main_thread_only().time_domain = nullptr;
373 }
374
375 std::unique_ptr<internal::TaskQueueImpl>
CreateTaskQueueImpl(const TaskQueue::Spec & spec)376 SequenceManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) {
377 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
378 std::unique_ptr<internal::TaskQueueImpl> task_queue =
379 std::make_unique<internal::TaskQueueImpl>(
380 this,
381 spec.non_waking ? main_thread_only().non_waking_wake_up_queue.get()
382 : main_thread_only().wake_up_queue.get(),
383 spec);
384 main_thread_only().active_queues.insert(task_queue.get());
385 main_thread_only().selector.AddQueue(
386 task_queue.get(), settings().priority_settings.default_priority());
387 return task_queue;
388 }
389
SetAddQueueTimeToTasks(bool enable)390 void SequenceManagerImpl::SetAddQueueTimeToTasks(bool enable) {
391 base::subtle::NoBarrier_Store(&add_queue_time_to_tasks_, enable ? 1 : 0);
392 }
393
GetAddQueueTimeToTasks()394 bool SequenceManagerImpl::GetAddQueueTimeToTasks() {
395 return base::subtle::NoBarrier_Load(&add_queue_time_to_tasks_);
396 }
397
SetObserver(Observer * observer)398 void SequenceManagerImpl::SetObserver(Observer* observer) {
399 main_thread_only().observer = observer;
400 }
401
UnregisterTaskQueueImpl(std::unique_ptr<internal::TaskQueueImpl> task_queue)402 void SequenceManagerImpl::UnregisterTaskQueueImpl(
403 std::unique_ptr<internal::TaskQueueImpl> task_queue) {
404 TRACE_EVENT1("sequence_manager", "SequenceManagerImpl::UnregisterTaskQueue",
405 "queue_name", task_queue->GetName());
406 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
407
408 main_thread_only().selector.RemoveQueue(task_queue.get());
409
410 // After UnregisterTaskQueue returns no new tasks can be posted.
411 // It's important to call it first to avoid race condition between removing
412 // the task queue from various lists here and adding it to the same lists
413 // when posting a task.
414 task_queue->UnregisterTaskQueue();
415
416 // Add |task_queue| to |main_thread_only().queues_to_delete| so we can prevent
417 // it from being freed while any of our structures hold hold a raw pointer to
418 // it.
419 main_thread_only().active_queues.erase(task_queue.get());
420 main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue);
421 }
422
423 AtomicFlagSet::AtomicFlag
GetFlagToRequestReloadForEmptyQueue(TaskQueueImpl * task_queue)424 SequenceManagerImpl::GetFlagToRequestReloadForEmptyQueue(
425 TaskQueueImpl* task_queue) {
426 return empty_queues_to_reload_.AddFlag(BindRepeating(
427 &TaskQueueImpl::ReloadEmptyImmediateWorkQueue, Unretained(task_queue)));
428 }
429
ReloadEmptyWorkQueues()430 void SequenceManagerImpl::ReloadEmptyWorkQueues() {
431 work_tracker_.WillReloadImmediateWorkQueues();
432
433 // There are two cases where a queue needs reloading. First, it might be
434 // completely empty and we've just posted a task (this method handles that
435 // case). Secondly if the work queue becomes empty when calling
436 // WorkQueue::TakeTaskFromWorkQueue (handled there).
437 //
438 // Invokes callbacks created by GetFlagToRequestReloadForEmptyQueue above.
439 empty_queues_to_reload_.RunActiveCallbacks();
440 }
441
MoveReadyDelayedTasksToWorkQueues(LazyNow * lazy_now)442 void SequenceManagerImpl::MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now) {
443 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
444 "SequenceManagerImpl::MoveReadyDelayedTasksToWorkQueues");
445
446 EnqueueOrder delayed_task_group_enqueue_order = GetNextSequenceNumber();
447 main_thread_only().wake_up_queue->MoveReadyDelayedTasksToWorkQueues(
448 lazy_now, delayed_task_group_enqueue_order);
449 main_thread_only()
450 .non_waking_wake_up_queue->MoveReadyDelayedTasksToWorkQueues(
451 lazy_now, delayed_task_group_enqueue_order);
452 }
453
OnBeginNestedRunLoop()454 void SequenceManagerImpl::OnBeginNestedRunLoop() {
455 main_thread_only().nesting_depth++;
456 if (main_thread_only().observer)
457 main_thread_only().observer->OnBeginNestedRunLoop();
458 }
459
OnExitNestedRunLoop()460 void SequenceManagerImpl::OnExitNestedRunLoop() {
461 main_thread_only().nesting_depth--;
462 DCHECK_GE(main_thread_only().nesting_depth, 0);
463 if (main_thread_only().nesting_depth == 0) {
464 // While we were nested some non-nestable tasks may have been deferred. We
465 // push them back onto the *front* of their original work queues, that's why
466 // we iterate |non_nestable_task_queue| in LIFO order (we want
467 // |non_nestable_task.front()| to be the last task pushed at the front of
468 // |task_queue|).
469 LazyNow exited_nested_now(main_thread_clock());
470 while (!main_thread_only().non_nestable_task_queue.empty()) {
471 internal::TaskQueueImpl::DeferredNonNestableTask& non_nestable_task =
472 main_thread_only().non_nestable_task_queue.back();
473 if (!non_nestable_task.task.queue_time.is_null()) {
474 // Adjust the deferred tasks' queue time to now so that intentionally
475 // deferred tasks are not unfairly considered as having been stuck in
476 // the queue for a while. Note: this does not affect task ordering as
477 // |enqueue_order| is untouched and deferred tasks will still be pushed
478 // back to the front of the queue.
479 non_nestable_task.task.queue_time = exited_nested_now.Now();
480 }
481 auto* const task_queue = non_nestable_task.task_queue;
482 task_queue->RequeueDeferredNonNestableTask(std::move(non_nestable_task));
483 main_thread_only().non_nestable_task_queue.pop_back();
484 }
485 }
486 if (main_thread_only().observer)
487 main_thread_only().observer->OnExitNestedRunLoop();
488 }
489
ScheduleWork()490 void SequenceManagerImpl::ScheduleWork() {
491 controller_->ScheduleWork();
492 }
493
SetNextWakeUp(LazyNow * lazy_now,std::optional<WakeUp> wake_up)494 void SequenceManagerImpl::SetNextWakeUp(LazyNow* lazy_now,
495 std::optional<WakeUp> wake_up) {
496 auto next_wake_up = AdjustWakeUp(wake_up, lazy_now);
497 if (next_wake_up && next_wake_up->is_immediate()) {
498 ScheduleWork();
499 } else {
500 controller_->SetNextDelayedDoWork(lazy_now, next_wake_up);
501 }
502 }
503
MaybeEmitTaskDetails(perfetto::EventContext & ctx,const SequencedTaskSource::SelectedTask & selected_task) const504 void SequenceManagerImpl::MaybeEmitTaskDetails(
505 perfetto::EventContext& ctx,
506 const SequencedTaskSource::SelectedTask& selected_task) const {
507 #if BUILDFLAG(ENABLE_BASE_TRACING)
508 // Other parameters are included only when "scheduler" category is enabled.
509 const uint8_t* scheduler_category_enabled =
510 TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("scheduler");
511
512 if (!*scheduler_category_enabled)
513 return;
514 auto* event = ctx.event<perfetto::protos::pbzero::ChromeTrackEvent>();
515 auto* sequence_manager_task = event->set_sequence_manager_task();
516 sequence_manager_task->set_priority(
517 settings().priority_settings.TaskPriorityToProto(selected_task.priority));
518 sequence_manager_task->set_queue_name(selected_task.task_queue_name);
519
520 #endif // BUILDFLAG(ENABLE_BASE_TRACING)
521 }
522
SetRunTaskSynchronouslyAllowed(bool can_run_tasks_synchronously)523 void SequenceManagerImpl::SetRunTaskSynchronouslyAllowed(
524 bool can_run_tasks_synchronously) {
525 work_tracker_.SetRunTaskSynchronouslyAllowed(can_run_tasks_synchronously);
526 }
527
528 std::optional<SequenceManagerImpl::SelectedTask>
SelectNextTask(LazyNow & lazy_now,SelectTaskOption option)529 SequenceManagerImpl::SelectNextTask(LazyNow& lazy_now,
530 SelectTaskOption option) {
531 std::optional<SelectedTask> selected_task =
532 SelectNextTaskImpl(lazy_now, option);
533
534 if (selected_task.has_value()) {
535 work_tracker_.AssertHasWork();
536 }
537
538 return selected_task;
539 }
540
541 #if DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
LogTaskDebugInfo(const WorkQueue * selected_work_queue) const542 void SequenceManagerImpl::LogTaskDebugInfo(
543 const WorkQueue* selected_work_queue) const {
544 const Task* task = selected_work_queue->GetFrontTask();
545 switch (settings_.task_execution_logging) {
546 case Settings::TaskLogging::kNone:
547 break;
548
549 case Settings::TaskLogging::kEnabled:
550 LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
551 << selected_work_queue->task_queue()->GetName()
552 << (task->cross_thread_ ? " Run crossthread " : " Run ")
553 << task->posted_from.ToString();
554 break;
555
556 case Settings::TaskLogging::kEnabledWithBacktrace: {
557 std::array<const void*, PendingTask::kTaskBacktraceLength + 1> task_trace;
558 task_trace[0] = task->posted_from.program_counter();
559 ranges::copy(task->task_backtrace, task_trace.begin() + 1);
560 size_t length = 0;
561 while (length < task_trace.size() && task_trace[length])
562 ++length;
563 if (length == 0)
564 break;
565 LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
566 << selected_work_queue->task_queue()->GetName()
567 << (task->cross_thread_ ? " Run crossthread " : " Run ")
568 << debug::StackTrace(task_trace.data(), length);
569 break;
570 }
571
572 case Settings::TaskLogging::kReorderedOnly: {
573 std::vector<const Task*> skipped_tasks;
574 main_thread_only().selector.CollectSkippedOverLowerPriorityTasks(
575 selected_work_queue, &skipped_tasks);
576
577 if (skipped_tasks.empty())
578 break;
579
580 LOG(INFO) << "#" << static_cast<uint64_t>(task->enqueue_order()) << " "
581 << selected_work_queue->task_queue()->GetName()
582 << (task->cross_thread_ ? " Run crossthread " : " Run ")
583 << task->posted_from.ToString();
584
585 for (const Task* skipped_task : skipped_tasks) {
586 LOG(INFO) << "# (skipped over) "
587 << static_cast<uint64_t>(skipped_task->enqueue_order()) << " "
588 << skipped_task->posted_from.ToString();
589 }
590 }
591 }
592 }
593 #endif // DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
594
595 std::optional<SequenceManagerImpl::SelectedTask>
SelectNextTaskImpl(LazyNow & lazy_now,SelectTaskOption option)596 SequenceManagerImpl::SelectNextTaskImpl(LazyNow& lazy_now,
597 SelectTaskOption option) {
598 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
599 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
600 "SequenceManagerImpl::SelectNextTask");
601
602 ReloadEmptyWorkQueues();
603 MoveReadyDelayedTasksToWorkQueues(&lazy_now);
604
605 // If we sampled now, check if it's time to reclaim memory next time we go
606 // idle.
607 if (lazy_now.has_value() &&
608 lazy_now.Now() >= main_thread_only().next_time_to_reclaim_memory) {
609 main_thread_only().memory_reclaim_scheduled = true;
610 }
611
612 while (true) {
613 internal::WorkQueue* work_queue =
614 main_thread_only().selector.SelectWorkQueueToService(option);
615 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
616 TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"), "SequenceManager",
617 this,
618 AsValueWithSelectorResultForTracing(work_queue,
619 /* force_verbose */ false));
620
621 if (!work_queue)
622 return std::nullopt;
623
624 // If the head task was canceled, remove it and run the selector again.
625 if (UNLIKELY(work_queue->RemoveAllCanceledTasksFromFront()))
626 continue;
627
628 if (UNLIKELY(work_queue->GetFrontTask()->nestable ==
629 Nestable::kNonNestable &&
630 main_thread_only().nesting_depth > 0)) {
631 // Defer non-nestable work. NOTE these tasks can be arbitrarily delayed so
632 // the additional delay should not be a problem.
633 // Note because we don't delete queues while nested, it's perfectly OK to
634 // store the raw pointer for |queue| here.
635 internal::TaskQueueImpl::DeferredNonNestableTask deferred_task{
636 work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
637 work_queue->queue_type()};
638 main_thread_only().non_nestable_task_queue.push_back(
639 std::move(deferred_task));
640 continue;
641 }
642
643 #if DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
644 LogTaskDebugInfo(work_queue);
645 #endif // DCHECK_IS_ON() && !BUILDFLAG(IS_NACL)
646
647 main_thread_only().task_execution_stack.emplace_back(
648 work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
649 InitializeTaskTiming(work_queue->task_queue()));
650
651 ExecutingTask& executing_task =
652 *main_thread_only().task_execution_stack.rbegin();
653 NotifyWillProcessTask(&executing_task, &lazy_now);
654
655 // Maybe invalidate the delayed task handle. If already invalidated, then
656 // don't run this task.
657 if (!executing_task.pending_task.WillRunTask()) {
658 executing_task.pending_task.task = DoNothing();
659 }
660
661 return SelectedTask(
662 executing_task.pending_task,
663 executing_task.task_queue->task_execution_trace_logger(),
664 executing_task.priority, executing_task.task_queue_name);
665 }
666 }
667
DidRunTask(LazyNow & lazy_now)668 void SequenceManagerImpl::DidRunTask(LazyNow& lazy_now) {
669 work_tracker_.AssertHasWork();
670
671 ExecutingTask& executing_task =
672 *main_thread_only().task_execution_stack.rbegin();
673
674 NotifyDidProcessTask(&executing_task, &lazy_now);
675 main_thread_only().task_execution_stack.pop_back();
676
677 if (main_thread_only().nesting_depth == 0)
678 CleanUpQueues();
679 }
680
RemoveAllCanceledDelayedTasksFromFront(LazyNow * lazy_now)681 void SequenceManagerImpl::RemoveAllCanceledDelayedTasksFromFront(
682 LazyNow* lazy_now) {
683 main_thread_only().wake_up_queue->RemoveAllCanceledDelayedTasksFromFront(
684 lazy_now);
685 main_thread_only()
686 .non_waking_wake_up_queue->RemoveAllCanceledDelayedTasksFromFront(
687 lazy_now);
688 }
689
GetPendingWakeUp(LazyNow * lazy_now,SelectTaskOption option)690 std::optional<WakeUp> SequenceManagerImpl::GetPendingWakeUp(
691 LazyNow* lazy_now,
692 SelectTaskOption option) {
693 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
694
695 RemoveAllCanceledDelayedTasksFromFront(lazy_now);
696
697 if (main_thread_only().selector.GetHighestPendingPriority(option)) {
698 // If the selector has non-empty queues we trivially know there is immediate
699 // work to be done. However we may want to yield to native work if it is
700 // more important.
701 return WakeUp{};
702 }
703
704 // There may be some incoming immediate work which we haven't accounted for.
705 // NB ReloadEmptyWorkQueues involves a memory barrier, so it's fastest to not
706 // do this always.
707 ReloadEmptyWorkQueues();
708
709 if (main_thread_only().selector.GetHighestPendingPriority(option)) {
710 return WakeUp{};
711 }
712
713 // Otherwise we need to find the shortest delay, if any. NB we don't need to
714 // call MoveReadyDelayedTasksToWorkQueues because it's assumed
715 // DelayTillNextTask will return TimeDelta>() if the delayed task is due to
716 // run now.
717 return AdjustWakeUp(GetNextDelayedWakeUpWithOption(option), lazy_now);
718 }
719
GetNextDelayedWakeUp() const720 std::optional<WakeUp> SequenceManagerImpl::GetNextDelayedWakeUp() const {
721 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
722 return main_thread_only().wake_up_queue->GetNextDelayedWakeUp();
723 }
724
GetNextDelayedWakeUpWithOption(SelectTaskOption option) const725 std::optional<WakeUp> SequenceManagerImpl::GetNextDelayedWakeUpWithOption(
726 SelectTaskOption option) const {
727 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
728
729 if (option == SelectTaskOption::kSkipDelayedTask)
730 return std::nullopt;
731 return GetNextDelayedWakeUp();
732 }
733
AdjustWakeUp(std::optional<WakeUp> wake_up,LazyNow * lazy_now) const734 std::optional<WakeUp> SequenceManagerImpl::AdjustWakeUp(
735 std::optional<WakeUp> wake_up,
736 LazyNow* lazy_now) const {
737 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
738 if (!wake_up)
739 return std::nullopt;
740 // Overdue work needs to be run immediately.
741 if (lazy_now->Now() >= wake_up->earliest_time())
742 return WakeUp{};
743 // If |time_domain| is present, we don't want an actual OS level delayed wake
744 // up scheduled, so pretend we have no more work. This will result in
745 // appearing idle and |time_domain| will decide what to do in
746 // MaybeFastForwardToWakeUp().
747 if (main_thread_only().time_domain)
748 return std::nullopt;
749 return *wake_up;
750 }
751
MaybeAddLeewayToTask(Task & task) const752 void SequenceManagerImpl::MaybeAddLeewayToTask(Task& task) const {
753 if (!main_thread_only().time_domain) {
754 task.leeway = MessagePump::GetLeewayForCurrentThread();
755 }
756 }
757
758 // TODO(crbug/1267874): Rename once ExplicitHighResolutionTimerWin experiment is
759 // shipped.
HasPendingHighResolutionTasks()760 bool SequenceManagerImpl::HasPendingHighResolutionTasks() {
761 // Only consider high-res tasks in the |wake_up_queue| (ignore the
762 // |non_waking_wake_up_queue|).
763 #if BUILDFLAG(IS_WIN)
764 if (g_explicit_high_resolution_timer_win) {
765 std::optional<WakeUp> wake_up =
766 main_thread_only().wake_up_queue->GetNextDelayedWakeUp();
767 if (!wake_up)
768 return false;
769 // Under the kExplicitHighResolutionTimerWin experiment, rely on leeway
770 // being larger than the minimum time of a low resolution timer (16ms). This
771 // way, we don't need to activate the high resolution timer for precise
772 // tasks that will run in more than 16ms if there are non precise tasks in
773 // front of them.
774 DCHECK_GE(MessagePump::GetLeewayIgnoringThreadOverride(),
775 Milliseconds(Time::kMinLowResolutionThresholdMs));
776 return wake_up->delay_policy == subtle::DelayPolicy::kPrecise;
777 }
778 #endif // BUILDFLAG(IS_WIN)
779 return main_thread_only().wake_up_queue->has_pending_high_resolution_tasks();
780 }
781
OnBeginWork()782 void SequenceManagerImpl::OnBeginWork() {
783 work_tracker_.OnBeginWork();
784 }
785
OnIdle()786 bool SequenceManagerImpl::OnIdle() {
787 bool have_work_to_do = false;
788 if (main_thread_only().time_domain) {
789 auto wakeup = main_thread_only().wake_up_queue->GetNextDelayedWakeUp();
790 have_work_to_do = main_thread_only().time_domain->MaybeFastForwardToWakeUp(
791 wakeup, controller_->ShouldQuitRunLoopWhenIdle());
792 }
793 if (!have_work_to_do) {
794 MaybeReclaimMemory();
795 main_thread_only().on_next_idle_callbacks.Notify();
796 if (main_thread_only().task_execution_stack.empty()) {
797 work_tracker_.OnIdle();
798 }
799 }
800 return have_work_to_do;
801 }
802
WillRequestReloadImmediateWorkQueue()803 void SequenceManagerImpl::WillRequestReloadImmediateWorkQueue() {
804 work_tracker_.WillRequestReloadImmediateWorkQueue();
805 }
806
TryAcquireSyncWorkAuthorization()807 SyncWorkAuthorization SequenceManagerImpl::TryAcquireSyncWorkAuthorization() {
808 return work_tracker_.TryAcquireSyncWorkAuthorization();
809 }
810
WillQueueTask(Task * pending_task)811 void SequenceManagerImpl::WillQueueTask(Task* pending_task) {
812 controller_->WillQueueTask(pending_task);
813 }
814
InitializeTaskTiming(internal::TaskQueueImpl * task_queue)815 TaskQueue::TaskTiming SequenceManagerImpl::InitializeTaskTiming(
816 internal::TaskQueueImpl* task_queue) {
817 bool records_wall_time =
818 ShouldRecordTaskTiming(task_queue) == TimeRecordingPolicy::DoRecord;
819 bool records_thread_time = records_wall_time && ShouldRecordCPUTimeForTask();
820 return TaskQueue::TaskTiming(records_wall_time, records_thread_time);
821 }
822
ShouldRecordTaskTiming(const internal::TaskQueueImpl * task_queue)823 TimeRecordingPolicy SequenceManagerImpl::ShouldRecordTaskTiming(
824 const internal::TaskQueueImpl* task_queue) {
825 if (task_queue->RequiresTaskTiming())
826 return TimeRecordingPolicy::DoRecord;
827 if (main_thread_only().nesting_depth == 0 &&
828 !main_thread_only().task_time_observers.empty()) {
829 return TimeRecordingPolicy::DoRecord;
830 }
831 return TimeRecordingPolicy::DoNotRecord;
832 }
833
NotifyWillProcessTask(ExecutingTask * executing_task,LazyNow * time_before_task)834 void SequenceManagerImpl::NotifyWillProcessTask(ExecutingTask* executing_task,
835 LazyNow* time_before_task) {
836 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
837 "SequenceManagerImpl::NotifyWillProcessTaskObservers");
838
839 if (g_record_crash_keys.load(std::memory_order_relaxed)) {
840 RecordCrashKeys(executing_task->pending_task);
841 }
842
843 if (executing_task->task_queue->GetQuiescenceMonitored())
844 main_thread_only().task_was_run_on_quiescence_monitored_queue = true;
845
846 TimeRecordingPolicy recording_policy =
847 ShouldRecordTaskTiming(executing_task->task_queue);
848 if (recording_policy == TimeRecordingPolicy::DoRecord)
849 executing_task->task_timing.RecordTaskStart(time_before_task);
850
851 if (!executing_task->task_queue->GetShouldNotifyObservers())
852 return;
853
854 const bool was_blocked_or_low_priority =
855 executing_task->task_queue->WasBlockedOrLowPriority(
856 executing_task->pending_task.enqueue_order());
857
858 {
859 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
860 "SequenceManager.WillProcessTaskObservers");
861 for (auto& observer : main_thread_only().task_observers) {
862 observer.WillProcessTask(executing_task->pending_task,
863 was_blocked_or_low_priority);
864 }
865 }
866
867 {
868 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
869 "SequenceManager.QueueNotifyWillProcessTask");
870 executing_task->task_queue->NotifyWillProcessTask(
871 executing_task->pending_task, was_blocked_or_low_priority);
872 }
873
874 if (recording_policy != TimeRecordingPolicy::DoRecord)
875 return;
876
877 if (main_thread_only().nesting_depth == 0) {
878 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
879 "SequenceManager.WillProcessTaskTimeObservers");
880 for (auto& observer : main_thread_only().task_time_observers)
881 observer.WillProcessTask(executing_task->task_timing.start_time());
882 }
883
884 {
885 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
886 "SequenceManager.QueueOnTaskStarted");
887 executing_task->task_queue->OnTaskStarted(executing_task->pending_task,
888 executing_task->task_timing);
889 }
890 }
891
NotifyDidProcessTask(ExecutingTask * executing_task,LazyNow * time_after_task)892 void SequenceManagerImpl::NotifyDidProcessTask(ExecutingTask* executing_task,
893 LazyNow* time_after_task) {
894 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
895 "SequenceManagerImpl::NotifyDidProcessTaskObservers");
896 if (!executing_task->task_queue->GetShouldNotifyObservers())
897 return;
898
899 TaskQueue::TaskTiming& task_timing = executing_task->task_timing;
900
901 {
902 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
903 "SequenceManager.QueueOnTaskCompleted");
904 if (task_timing.has_wall_time()) {
905 executing_task->task_queue->OnTaskCompleted(
906 executing_task->pending_task, &task_timing, time_after_task);
907 }
908 }
909
910 bool has_valid_start =
911 task_timing.state() != TaskQueue::TaskTiming::State::NotStarted;
912 TimeRecordingPolicy recording_policy =
913 ShouldRecordTaskTiming(executing_task->task_queue);
914 // Record end time ASAP to avoid bias due to the overhead of observers.
915 if (recording_policy == TimeRecordingPolicy::DoRecord && has_valid_start) {
916 task_timing.RecordTaskEnd(time_after_task);
917 }
918
919 if (has_valid_start && task_timing.has_wall_time() &&
920 main_thread_only().nesting_depth == 0) {
921 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
922 "SequenceManager.DidProcessTaskTimeObservers");
923 for (auto& observer : main_thread_only().task_time_observers) {
924 observer.DidProcessTask(task_timing.start_time(), task_timing.end_time());
925 }
926 }
927
928 {
929 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
930 "SequenceManager.DidProcessTaskObservers");
931 for (auto& observer : main_thread_only().task_observers)
932 observer.DidProcessTask(executing_task->pending_task);
933 }
934
935 {
936 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
937 "SequenceManager.QueueNotifyDidProcessTask");
938 executing_task->task_queue->NotifyDidProcessTask(
939 executing_task->pending_task);
940 }
941
942 // TODO(altimin): Move this back to blink.
943 if (task_timing.has_wall_time() &&
944 recording_policy == TimeRecordingPolicy::DoRecord &&
945 task_timing.wall_duration() > kLongTaskTraceEventThreshold &&
946 main_thread_only().nesting_depth == 0) {
947 TRACE_EVENT_INSTANT1("blink", "LongTask", TRACE_EVENT_SCOPE_THREAD,
948 "duration", task_timing.wall_duration().InSecondsF());
949 }
950 }
951
SetWorkBatchSize(int work_batch_size)952 void SequenceManagerImpl::SetWorkBatchSize(int work_batch_size) {
953 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
954 DCHECK_GE(work_batch_size, 1);
955 controller_->SetWorkBatchSize(work_batch_size);
956 }
957
AddTaskObserver(TaskObserver * task_observer)958 void SequenceManagerImpl::AddTaskObserver(TaskObserver* task_observer) {
959 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
960 main_thread_only().task_observers.AddObserver(task_observer);
961 }
962
RemoveTaskObserver(TaskObserver * task_observer)963 void SequenceManagerImpl::RemoveTaskObserver(TaskObserver* task_observer) {
964 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
965 main_thread_only().task_observers.RemoveObserver(task_observer);
966 }
967
AddTaskTimeObserver(TaskTimeObserver * task_time_observer)968 void SequenceManagerImpl::AddTaskTimeObserver(
969 TaskTimeObserver* task_time_observer) {
970 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
971 main_thread_only().task_time_observers.AddObserver(task_time_observer);
972 }
973
RemoveTaskTimeObserver(TaskTimeObserver * task_time_observer)974 void SequenceManagerImpl::RemoveTaskTimeObserver(
975 TaskTimeObserver* task_time_observer) {
976 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
977 main_thread_only().task_time_observers.RemoveObserver(task_time_observer);
978 }
979
GetAndClearSystemIsQuiescentBit()980 bool SequenceManagerImpl::GetAndClearSystemIsQuiescentBit() {
981 bool task_was_run =
982 main_thread_only().task_was_run_on_quiescence_monitored_queue;
983 main_thread_only().task_was_run_on_quiescence_monitored_queue = false;
984 return !task_was_run;
985 }
986
GetNextSequenceNumber()987 EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
988 return enqueue_order_generator_.GenerateNext();
989 }
990
991 std::unique_ptr<trace_event::ConvertableToTraceFormat>
AsValueWithSelectorResultForTracing(internal::WorkQueue * selected_work_queue,bool force_verbose) const992 SequenceManagerImpl::AsValueWithSelectorResultForTracing(
993 internal::WorkQueue* selected_work_queue,
994 bool force_verbose) const {
995 return std::make_unique<TracedBaseValue>(
996 Value(AsValueWithSelectorResult(selected_work_queue, force_verbose)));
997 }
998
AsValueWithSelectorResult(internal::WorkQueue * selected_work_queue,bool force_verbose) const999 Value::Dict SequenceManagerImpl::AsValueWithSelectorResult(
1000 internal::WorkQueue* selected_work_queue,
1001 bool force_verbose) const {
1002 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
1003 TimeTicks now = NowTicks();
1004 Value::Dict state;
1005 Value::List active_queues;
1006 for (internal::TaskQueueImpl* const queue :
1007 main_thread_only().active_queues) {
1008 active_queues.Append(queue->AsValue(now, force_verbose));
1009 }
1010 state.Set("active_queues", std::move(active_queues));
1011 Value::List shutdown_queues;
1012 Value::List queues_to_delete;
1013 for (const auto& pair : main_thread_only().queues_to_delete)
1014 queues_to_delete.Append(pair.first->AsValue(now, force_verbose));
1015 state.Set("queues_to_delete", std::move(queues_to_delete));
1016 state.Set("selector", main_thread_only().selector.AsValue());
1017 if (selected_work_queue) {
1018 state.Set("selected_queue", selected_work_queue->task_queue()->GetName());
1019 state.Set("work_queue_name", selected_work_queue->name());
1020 }
1021 state.Set("time_domain", main_thread_only().time_domain
1022 ? main_thread_only().time_domain->AsValue()
1023 : Value::Dict());
1024 state.Set("wake_up_queue", main_thread_only().wake_up_queue->AsValue(now));
1025 state.Set("non_waking_wake_up_queue",
1026 main_thread_only().non_waking_wake_up_queue->AsValue(now));
1027 return state;
1028 }
1029
OnTaskQueueEnabled(internal::TaskQueueImpl * queue)1030 void SequenceManagerImpl::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
1031 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
1032 DCHECK(queue->IsQueueEnabled());
1033 // Only schedule DoWork if there's something to do.
1034 if (queue->HasTaskToRunImmediatelyOrReadyDelayedTask() &&
1035 !queue->BlockedByFence())
1036 ScheduleWork();
1037 }
1038
OnWorkAvailable()1039 void SequenceManagerImpl::OnWorkAvailable() {
1040 work_tracker_.OnBeginWork();
1041 }
1042
MaybeReclaimMemory()1043 void SequenceManagerImpl::MaybeReclaimMemory() {
1044 if (!main_thread_only().memory_reclaim_scheduled)
1045 return;
1046
1047 TRACE_EVENT0("sequence_manager", "SequenceManagerImpl::MaybeReclaimMemory");
1048 ReclaimMemory();
1049
1050 // To avoid performance regressions we only want to do this every so often.
1051 main_thread_only().next_time_to_reclaim_memory =
1052 NowTicks() + kReclaimMemoryInterval;
1053 main_thread_only().memory_reclaim_scheduled = false;
1054 }
1055
ReclaimMemory()1056 void SequenceManagerImpl::ReclaimMemory() {
1057 LazyNow lazy_now(main_thread_clock());
1058 for (auto it = main_thread_only().active_queues.begin();
1059 it != main_thread_only().active_queues.end();) {
1060 auto* const queue = (*it++).get();
1061 ReclaimMemoryFromQueue(queue, &lazy_now);
1062 }
1063 }
1064
CleanUpQueues()1065 void SequenceManagerImpl::CleanUpQueues() {
1066 main_thread_only().queues_to_delete.clear();
1067 }
1068
GetWeakPtr()1069 WeakPtr<SequenceManagerImpl> SequenceManagerImpl::GetWeakPtr() {
1070 return weak_factory_.GetWeakPtr();
1071 }
1072
SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)1073 void SequenceManagerImpl::SetDefaultTaskRunner(
1074 scoped_refptr<SingleThreadTaskRunner> task_runner) {
1075 controller_->SetDefaultTaskRunner(task_runner);
1076 }
1077
GetTickClock() const1078 const TickClock* SequenceManagerImpl::GetTickClock() const {
1079 return any_thread_clock();
1080 }
1081
NowTicks() const1082 TimeTicks SequenceManagerImpl::NowTicks() const {
1083 return any_thread_clock()->NowTicks();
1084 }
1085
ShouldRecordCPUTimeForTask()1086 bool SequenceManagerImpl::ShouldRecordCPUTimeForTask() {
1087 DCHECK(ThreadTicks::IsSupported() ||
1088 !metric_recording_settings_.records_cpu_time_for_some_tasks());
1089 return metric_recording_settings_.records_cpu_time_for_some_tasks() &&
1090 main_thread_only().metrics_subsampler->ShouldSample(
1091 metric_recording_settings_
1092 .task_sampling_rate_for_recording_cpu_time);
1093 }
1094
1095 const SequenceManager::MetricRecordingSettings&
GetMetricRecordingSettings() const1096 SequenceManagerImpl::GetMetricRecordingSettings() const {
1097 return metric_recording_settings_;
1098 }
1099
SetTaskExecutionAllowedInNativeNestedLoop(bool allowed)1100 void SequenceManagerImpl::SetTaskExecutionAllowedInNativeNestedLoop(
1101 bool allowed) {
1102 controller_->SetTaskExecutionAllowedInNativeNestedLoop(allowed);
1103 }
1104
IsTaskExecutionAllowedInNativeNestedLoop() const1105 bool SequenceManagerImpl::IsTaskExecutionAllowedInNativeNestedLoop() const {
1106 return controller_->IsTaskExecutionAllowed();
1107 }
1108
1109 #if BUILDFLAG(IS_IOS)
AttachToMessagePump()1110 void SequenceManagerImpl::AttachToMessagePump() {
1111 return controller_->AttachToMessagePump();
1112 }
1113 #endif
1114
IsIdleForTesting()1115 bool SequenceManagerImpl::IsIdleForTesting() {
1116 ReloadEmptyWorkQueues();
1117
1118 // Make sure that canceled tasks don't affect the return value.
1119 for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
1120 queue->delayed_work_queue()->RemoveAllCanceledTasksFromFront();
1121 queue->immediate_work_queue()->RemoveAllCanceledTasksFromFront();
1122 }
1123
1124 return !main_thread_only().selector.GetHighestPendingPriority().has_value();
1125 }
1126
EnableMessagePumpTimeKeeperMetrics(const char * thread_name)1127 void SequenceManagerImpl::EnableMessagePumpTimeKeeperMetrics(
1128 const char* thread_name) {
1129 controller_->EnableMessagePumpTimeKeeperMetrics(thread_name);
1130 }
1131
GetPendingTaskCountForTesting() const1132 size_t SequenceManagerImpl::GetPendingTaskCountForTesting() const {
1133 size_t total = 0;
1134 for (internal::TaskQueueImpl* task_queue : main_thread_only().active_queues) {
1135 total += task_queue->GetNumberOfPendingTasks();
1136 }
1137 return total;
1138 }
1139
CreateTaskQueue(const TaskQueue::Spec & spec)1140 TaskQueue::Handle SequenceManagerImpl::CreateTaskQueue(
1141 const TaskQueue::Spec& spec) {
1142 return TaskQueue::Handle(CreateTaskQueueImpl(spec));
1143 }
1144
DescribeAllPendingTasks() const1145 std::string SequenceManagerImpl::DescribeAllPendingTasks() const {
1146 Value::Dict value =
1147 AsValueWithSelectorResult(nullptr, /* force_verbose */ true);
1148 std::string result;
1149 JSONWriter::Write(value, &result);
1150 return result;
1151 }
1152
PrioritizeYieldingToNative(base::TimeTicks prioritize_until)1153 void SequenceManagerImpl::PrioritizeYieldingToNative(
1154 base::TimeTicks prioritize_until) {
1155 controller_->PrioritizeYieldingToNative(prioritize_until);
1156 }
1157
AddDestructionObserver(CurrentThread::DestructionObserver * destruction_observer)1158 void SequenceManagerImpl::AddDestructionObserver(
1159 CurrentThread::DestructionObserver* destruction_observer) {
1160 main_thread_only().destruction_observers.AddObserver(destruction_observer);
1161 }
1162
RemoveDestructionObserver(CurrentThread::DestructionObserver * destruction_observer)1163 void SequenceManagerImpl::RemoveDestructionObserver(
1164 CurrentThread::DestructionObserver* destruction_observer) {
1165 main_thread_only().destruction_observers.RemoveObserver(destruction_observer);
1166 }
1167
RegisterOnNextIdleCallback(OnceClosure on_next_idle_callback)1168 CallbackListSubscription SequenceManagerImpl::RegisterOnNextIdleCallback(
1169 OnceClosure on_next_idle_callback) {
1170 return main_thread_only().on_next_idle_callbacks.Add(
1171 std::move(on_next_idle_callback));
1172 }
1173
SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)1174 void SequenceManagerImpl::SetTaskRunner(
1175 scoped_refptr<SingleThreadTaskRunner> task_runner) {
1176 controller_->SetDefaultTaskRunner(task_runner);
1177 }
1178
GetTaskRunner()1179 scoped_refptr<SingleThreadTaskRunner> SequenceManagerImpl::GetTaskRunner() {
1180 return controller_->GetDefaultTaskRunner();
1181 }
1182
IsBoundToCurrentThread() const1183 bool SequenceManagerImpl::IsBoundToCurrentThread() const {
1184 return associated_thread_->IsBoundToCurrentThread();
1185 }
1186
GetMessagePump() const1187 MessagePump* SequenceManagerImpl::GetMessagePump() const {
1188 return controller_->GetBoundMessagePump();
1189 }
1190
IsType(MessagePumpType type) const1191 bool SequenceManagerImpl::IsType(MessagePumpType type) const {
1192 return settings_.message_loop_type == type;
1193 }
1194
EnableCrashKeys(const char * async_stack_crash_key)1195 void SequenceManagerImpl::EnableCrashKeys(const char* async_stack_crash_key) {
1196 DCHECK(!main_thread_only().async_stack_crash_key);
1197 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
1198 main_thread_only().async_stack_crash_key = debug::AllocateCrashKeyString(
1199 async_stack_crash_key, debug::CrashKeySize::Size64);
1200 static_assert(sizeof(main_thread_only().async_stack_buffer) ==
1201 static_cast<size_t>(debug::CrashKeySize::Size64),
1202 "Async stack buffer size must match crash key size.");
1203 #endif // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
1204 }
1205
RecordCrashKeys(const PendingTask & pending_task)1206 void SequenceManagerImpl::RecordCrashKeys(const PendingTask& pending_task) {
1207 #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
1208 // SetCrashKeyString is a no-op even if the crash key is null, but we'd still
1209 // have construct the std::string_view that is passed in.
1210 if (!main_thread_only().async_stack_crash_key)
1211 return;
1212
1213 // Write the async stack trace onto a crash key as whitespace-delimited hex
1214 // addresses. These will be symbolized by the crash reporting system. With
1215 // 63 characters we can fit the address of the task that posted the current
1216 // task and its predecessor. Avoid HexEncode since it incurs a memory
1217 // allocation and snprintf because it's about 3.5x slower on Android this
1218 // this.
1219 //
1220 // See
1221 // https://chromium.googlesource.com/chromium/src/+/main/docs/debugging_with_crash_keys.md
1222 // for instructions for symbolizing these crash keys.
1223 //
1224 // TODO(skyostil): Find a way to extract the destination function address
1225 // from the task.
1226 size_t max_size = main_thread_only().async_stack_buffer.size();
1227 char* const buffer = &main_thread_only().async_stack_buffer[0];
1228 char* const buffer_end = &buffer[max_size - 1];
1229 char* pos = buffer_end;
1230 // Leave space for the NUL terminator.
1231 pos = PrependHexAddress(pos - 1, pending_task.task_backtrace[0]);
1232 *(--pos) = ' ';
1233 pos = PrependHexAddress(pos - 1, pending_task.posted_from.program_counter());
1234 DCHECK_GE(pos, buffer);
1235 debug::SetCrashKeyString(
1236 main_thread_only().async_stack_crash_key,
1237 std::string_view(pos, static_cast<size_t>(buffer_end - pos)));
1238 #endif // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_ANDROID)
1239 }
1240
currently_executing_task_queue() const1241 internal::TaskQueueImpl* SequenceManagerImpl::currently_executing_task_queue()
1242 const {
1243 if (main_thread_only().task_execution_stack.empty())
1244 return nullptr;
1245 return main_thread_only().task_execution_stack.rbegin()->task_queue;
1246 }
1247
GetPriorityCount() const1248 TaskQueue::QueuePriority SequenceManagerImpl::GetPriorityCount() const {
1249 return settings().priority_settings.priority_count();
1250 }
1251
1252 constexpr TimeDelta SequenceManagerImpl::kReclaimMemoryInterval;
1253
1254 } // namespace internal
1255 } // namespace sequence_manager
1256 } // namespace base
1257