1 // Copyright 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/task/sequence_manager/sequence_manager_impl.h"
6 
7 #include <queue>
8 #include <vector>
9 
10 #include "base/bind.h"
11 #include "base/bit_cast.h"
12 #include "base/compiler_specific.h"
13 #include "base/debug/crash_logging.h"
14 #include "base/memory/ptr_util.h"
15 #include "base/metrics/histogram_macros.h"
16 #include "base/rand_util.h"
17 #include "base/task/sequence_manager/real_time_domain.h"
18 #include "base/task/sequence_manager/task_time_observer.h"
19 #include "base/task/sequence_manager/thread_controller_impl.h"
20 #include "base/task/sequence_manager/work_queue.h"
21 #include "base/task/sequence_manager/work_queue_sets.h"
22 #include "base/time/default_tick_clock.h"
23 #include "base/time/tick_clock.h"
24 #include "base/trace_event/trace_event.h"
25 #include "build/build_config.h"
26 
27 namespace base {
28 namespace sequence_manager {
29 
CreateSequenceManagerOnCurrentThread()30 std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread() {
31   return internal::SequenceManagerImpl::CreateOnCurrentThread();
32 }
33 
34 namespace internal {
35 
36 namespace {
37 
38 constexpr base::TimeDelta kLongTaskTraceEventThreshold =
39     base::TimeDelta::FromMilliseconds(50);
40 // Proportion of tasks which will record thread time for metrics.
41 const double kTaskSamplingRateForRecordingCPUTime = 0.01;
42 // Proprortion of SequenceManagers which will record thread time for each task,
43 // enabling advanced metrics.
44 const double kThreadSamplingRateForRecordingCPUTime = 0.0001;
45 
46 // Magic value to protect against memory corruption and bail out
47 // early when detected.
48 constexpr int kMemoryCorruptionSentinelValue = 0xdeadbeef;
49 
SweepCanceledDelayedTasksInQueue(internal::TaskQueueImpl * queue,std::map<TimeDomain *,TimeTicks> * time_domain_now)50 void SweepCanceledDelayedTasksInQueue(
51     internal::TaskQueueImpl* queue,
52     std::map<TimeDomain*, TimeTicks>* time_domain_now) {
53   TimeDomain* time_domain = queue->GetTimeDomain();
54   if (time_domain_now->find(time_domain) == time_domain_now->end())
55     time_domain_now->insert(std::make_pair(time_domain, time_domain->Now()));
56   queue->SweepCanceledDelayedTasks(time_domain_now->at(time_domain));
57 }
58 
InitializeMetricRecordingSettings()59 SequenceManager::MetricRecordingSettings InitializeMetricRecordingSettings() {
60   bool cpu_time_recording_always_on =
61       base::RandDouble() < kThreadSamplingRateForRecordingCPUTime;
62   return SequenceManager::MetricRecordingSettings(
63       cpu_time_recording_always_on, kTaskSamplingRateForRecordingCPUTime);
64 }
65 
66 }  // namespace
67 
SequenceManagerImpl(std::unique_ptr<internal::ThreadController> controller)68 SequenceManagerImpl::SequenceManagerImpl(
69     std::unique_ptr<internal::ThreadController> controller)
70     : graceful_shutdown_helper_(new internal::GracefulQueueShutdownHelper()),
71       controller_(std::move(controller)),
72       metric_recording_settings_(InitializeMetricRecordingSettings()),
73       memory_corruption_sentinel_(kMemoryCorruptionSentinelValue),
74       weak_factory_(this) {
75   // TODO(altimin): Create a sequence checker here.
76   DCHECK(controller_->RunsTasksInCurrentSequence());
77 
78   TRACE_EVENT_WARMUP_CATEGORY("sequence_manager");
79   TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("sequence_manager"));
80   TRACE_EVENT_WARMUP_CATEGORY(
81       TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"));
82   TRACE_EVENT_WARMUP_CATEGORY(
83       TRACE_DISABLED_BY_DEFAULT("sequence_manager.verbose_snapshots"));
84 
85   TRACE_EVENT_OBJECT_CREATED_WITH_ID(
86       TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
87   main_thread_only().selector.SetTaskQueueSelectorObserver(this);
88 
89   RegisterTimeDomain(main_thread_only().real_time_domain.get());
90 
91   controller_->SetSequencedTaskSource(this);
92   controller_->AddNestingObserver(this);
93 }
94 
~SequenceManagerImpl()95 SequenceManagerImpl::~SequenceManagerImpl() {
96   TRACE_EVENT_OBJECT_DELETED_WITH_ID(
97       TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
98 
99   // TODO(altimin): restore default task runner automatically when
100   // ThreadController is destroyed.
101   controller_->RestoreDefaultTaskRunner();
102 
103   for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
104     main_thread_only().selector.RemoveQueue(queue);
105     queue->UnregisterTaskQueue();
106   }
107 
108   main_thread_only().active_queues.clear();
109   main_thread_only().queues_to_gracefully_shutdown.clear();
110 
111   graceful_shutdown_helper_->OnSequenceManagerDeleted();
112 
113   main_thread_only().selector.SetTaskQueueSelectorObserver(nullptr);
114   controller_->RemoveNestingObserver(this);
115 }
116 
117 SequenceManagerImpl::AnyThread::AnyThread() = default;
118 
119 SequenceManagerImpl::AnyThread::~AnyThread() = default;
120 
MainThreadOnly()121 SequenceManagerImpl::MainThreadOnly::MainThreadOnly()
122     : random_generator(RandUint64()),
123       uniform_distribution(0.0, 1.0),
124       real_time_domain(new internal::RealTimeDomain()) {}
125 
126 SequenceManagerImpl::MainThreadOnly::~MainThreadOnly() = default;
127 
128 // static
129 std::unique_ptr<SequenceManagerImpl>
CreateOnCurrentThread()130 SequenceManagerImpl::CreateOnCurrentThread() {
131   return WrapUnique(
132       new SequenceManagerImpl(internal::ThreadControllerImpl::Create(
133           MessageLoop::current(), DefaultTickClock::GetInstance())));
134 }
135 
RegisterTimeDomain(TimeDomain * time_domain)136 void SequenceManagerImpl::RegisterTimeDomain(TimeDomain* time_domain) {
137   main_thread_only().time_domains.insert(time_domain);
138   time_domain->OnRegisterWithSequenceManager(this);
139 }
140 
UnregisterTimeDomain(TimeDomain * time_domain)141 void SequenceManagerImpl::UnregisterTimeDomain(TimeDomain* time_domain) {
142   main_thread_only().time_domains.erase(time_domain);
143 }
144 
GetRealTimeDomain() const145 TimeDomain* SequenceManagerImpl::GetRealTimeDomain() const {
146   return main_thread_only().real_time_domain.get();
147 }
148 
149 std::unique_ptr<internal::TaskQueueImpl>
CreateTaskQueueImpl(const TaskQueue::Spec & spec)150 SequenceManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) {
151   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
152   TimeDomain* time_domain = spec.time_domain
153                                 ? spec.time_domain
154                                 : main_thread_only().real_time_domain.get();
155   DCHECK(main_thread_only().time_domains.find(time_domain) !=
156          main_thread_only().time_domains.end());
157   std::unique_ptr<internal::TaskQueueImpl> task_queue =
158       std::make_unique<internal::TaskQueueImpl>(this, time_domain, spec);
159   main_thread_only().active_queues.insert(task_queue.get());
160   main_thread_only().selector.AddQueue(task_queue.get());
161   return task_queue;
162 }
163 
SetObserver(Observer * observer)164 void SequenceManagerImpl::SetObserver(Observer* observer) {
165   main_thread_only().observer = observer;
166 }
167 
AddToIncomingImmediateWorkList(internal::TaskQueueImpl * task_queue,internal::EnqueueOrder enqueue_order)168 bool SequenceManagerImpl::AddToIncomingImmediateWorkList(
169     internal::TaskQueueImpl* task_queue,
170     internal::EnqueueOrder enqueue_order) {
171   AutoLock lock(any_thread_lock_);
172   // Check if |task_queue| is already in the linked list.
173   if (task_queue->immediate_work_list_storage()->queue)
174     return false;
175 
176   // Insert into the linked list.
177   task_queue->immediate_work_list_storage()->queue = task_queue;
178   task_queue->immediate_work_list_storage()->order = enqueue_order;
179   task_queue->immediate_work_list_storage()->next =
180       any_thread().incoming_immediate_work_list;
181   any_thread().incoming_immediate_work_list =
182       task_queue->immediate_work_list_storage();
183   return true;
184 }
185 
RemoveFromIncomingImmediateWorkList(internal::TaskQueueImpl * task_queue)186 void SequenceManagerImpl::RemoveFromIncomingImmediateWorkList(
187     internal::TaskQueueImpl* task_queue) {
188   AutoLock lock(any_thread_lock_);
189   internal::IncomingImmediateWorkList** prev =
190       &any_thread().incoming_immediate_work_list;
191   while (*prev) {
192     if ((*prev)->queue == task_queue) {
193       *prev = (*prev)->next;
194       break;
195     }
196     prev = &(*prev)->next;
197   }
198 
199   task_queue->immediate_work_list_storage()->next = nullptr;
200   task_queue->immediate_work_list_storage()->queue = nullptr;
201 }
202 
UnregisterTaskQueueImpl(std::unique_ptr<internal::TaskQueueImpl> task_queue)203 void SequenceManagerImpl::UnregisterTaskQueueImpl(
204     std::unique_ptr<internal::TaskQueueImpl> task_queue) {
205   TRACE_EVENT1("sequence_manager", "SequenceManagerImpl::UnregisterTaskQueue",
206                "queue_name", task_queue->GetName());
207   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
208 
209   main_thread_only().selector.RemoveQueue(task_queue.get());
210 
211   // After UnregisterTaskQueue returns no new tasks can be posted.
212   // It's important to call it first to avoid race condition between removing
213   // the task queue from various lists here and adding it to the same lists
214   // when posting a task.
215   task_queue->UnregisterTaskQueue();
216 
217   // Remove |task_queue| from the linked list if present.
218   // This is O(n).  We assume this will be a relatively infrequent operation.
219   RemoveFromIncomingImmediateWorkList(task_queue.get());
220 
221   // Add |task_queue| to |main_thread_only().queues_to_delete| so we can prevent
222   // it from being freed while any of our structures hold hold a raw pointer to
223   // it.
224   main_thread_only().active_queues.erase(task_queue.get());
225   main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue);
226 }
227 
ReloadEmptyWorkQueues()228 void SequenceManagerImpl::ReloadEmptyWorkQueues() {
229   // There are two cases where a queue needs reloading.  First, it might be
230   // completely empty and we've just posted a task (this method handles that
231   // case). Secondly if the work queue becomes empty in when calling
232   // WorkQueue::TakeTaskFromWorkQueue (handled there).
233   for (internal::TaskQueueImpl* queue : main_thread_only().queues_to_reload) {
234     queue->ReloadImmediateWorkQueueIfEmpty();
235   }
236 }
237 
WakeUpReadyDelayedQueues(LazyNow * lazy_now)238 void SequenceManagerImpl::WakeUpReadyDelayedQueues(LazyNow* lazy_now) {
239   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
240                "SequenceManagerImpl::WakeUpReadyDelayedQueues");
241 
242   for (TimeDomain* time_domain : main_thread_only().time_domains) {
243     if (time_domain == main_thread_only().real_time_domain.get()) {
244       time_domain->WakeUpReadyDelayedQueues(lazy_now);
245     } else {
246       LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
247       time_domain->WakeUpReadyDelayedQueues(&time_domain_lazy_now);
248     }
249   }
250 }
251 
OnBeginNestedRunLoop()252 void SequenceManagerImpl::OnBeginNestedRunLoop() {
253   main_thread_only().nesting_depth++;
254   if (main_thread_only().observer)
255     main_thread_only().observer->OnBeginNestedRunLoop();
256 }
257 
OnExitNestedRunLoop()258 void SequenceManagerImpl::OnExitNestedRunLoop() {
259   main_thread_only().nesting_depth--;
260   DCHECK_GE(main_thread_only().nesting_depth, 0);
261   if (main_thread_only().nesting_depth == 0) {
262     // While we were nested some non-nestable tasks may have been deferred.
263     // We push them back onto the *front* of their original work queues,
264     // that's why we iterate |non_nestable_task_queue| in FIFO order.
265     while (!main_thread_only().non_nestable_task_queue.empty()) {
266       internal::TaskQueueImpl::DeferredNonNestableTask& non_nestable_task =
267           main_thread_only().non_nestable_task_queue.back();
268       non_nestable_task.task_queue->RequeueDeferredNonNestableTask(
269           std::move(non_nestable_task));
270       main_thread_only().non_nestable_task_queue.pop_back();
271     }
272   }
273   if (main_thread_only().observer)
274     main_thread_only().observer->OnExitNestedRunLoop();
275 }
276 
OnQueueHasIncomingImmediateWork(internal::TaskQueueImpl * queue,internal::EnqueueOrder enqueue_order,bool queue_is_blocked)277 void SequenceManagerImpl::OnQueueHasIncomingImmediateWork(
278     internal::TaskQueueImpl* queue,
279     internal::EnqueueOrder enqueue_order,
280     bool queue_is_blocked) {
281   if (AddToIncomingImmediateWorkList(queue, enqueue_order) && !queue_is_blocked)
282     controller_->ScheduleWork();
283 }
284 
MaybeScheduleImmediateWork(const Location & from_here)285 void SequenceManagerImpl::MaybeScheduleImmediateWork(
286     const Location& from_here) {
287   controller_->ScheduleWork();
288 }
289 
SetNextDelayedDoWork(LazyNow * lazy_now,TimeTicks run_time)290 void SequenceManagerImpl::SetNextDelayedDoWork(LazyNow* lazy_now,
291                                                TimeTicks run_time) {
292   controller_->SetNextDelayedDoWork(lazy_now, run_time);
293 }
294 
TakeTask()295 Optional<PendingTask> SequenceManagerImpl::TakeTask() {
296   CHECK(Validate());
297 
298   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
299   TRACE_EVENT0("sequence_manager", "SequenceManagerImpl::TakeTask");
300 
301   {
302     AutoLock lock(any_thread_lock_);
303     main_thread_only().queues_to_reload.clear();
304 
305     for (internal::IncomingImmediateWorkList* iter =
306              any_thread().incoming_immediate_work_list;
307          iter; iter = iter->next) {
308       main_thread_only().queues_to_reload.push_back(iter->queue);
309       iter->queue = nullptr;
310     }
311 
312     any_thread().incoming_immediate_work_list = nullptr;
313   }
314 
315   // It's important we call ReloadEmptyWorkQueues out side of the lock to
316   // avoid a lock order inversion.
317   ReloadEmptyWorkQueues();
318   LazyNow lazy_now(controller_->GetClock());
319   WakeUpReadyDelayedQueues(&lazy_now);
320 
321   while (true) {
322     internal::WorkQueue* work_queue = nullptr;
323     bool should_run =
324         main_thread_only().selector.SelectWorkQueueToService(&work_queue);
325     TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
326         TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"), "SequenceManager",
327         this, AsValueWithSelectorResult(should_run, work_queue));
328 
329     if (!should_run)
330       return nullopt;
331 
332     // If the head task was canceled, remove it and run the selector again.
333     if (work_queue->RemoveAllCanceledTasksFromFront())
334       continue;
335 
336     if (work_queue->GetFrontTask()->nestable == Nestable::kNonNestable &&
337         main_thread_only().nesting_depth > 0) {
338       // Defer non-nestable work. NOTE these tasks can be arbitrarily delayed so
339       // the additional delay should not be a problem.
340       // Note because we don't delete queues while nested, it's perfectly OK to
341       // store the raw pointer for |queue| here.
342       internal::TaskQueueImpl::DeferredNonNestableTask deferred_task{
343           work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
344           work_queue->queue_type()};
345       main_thread_only().non_nestable_task_queue.push_back(
346           std::move(deferred_task));
347       continue;
348     }
349 
350     main_thread_only().task_execution_stack.emplace_back(
351         work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
352         InitializeTaskTiming(work_queue->task_queue()));
353 
354     UMA_HISTOGRAM_COUNTS_1000("TaskQueueManager.ActiveQueuesCount",
355                               main_thread_only().active_queues.size());
356 
357     ExecutingTask& executing_task =
358         *main_thread_only().task_execution_stack.rbegin();
359     NotifyWillProcessTask(&executing_task, &lazy_now);
360     return std::move(executing_task.pending_task);
361   }
362 }
363 
DidRunTask()364 void SequenceManagerImpl::DidRunTask() {
365   LazyNow lazy_now(controller_->GetClock());
366   ExecutingTask& executing_task =
367       *main_thread_only().task_execution_stack.rbegin();
368   NotifyDidProcessTask(&executing_task, &lazy_now);
369   main_thread_only().task_execution_stack.pop_back();
370 
371   if (main_thread_only().nesting_depth == 0)
372     CleanUpQueues();
373 }
374 
DelayTillNextTask(LazyNow * lazy_now)375 TimeDelta SequenceManagerImpl::DelayTillNextTask(LazyNow* lazy_now) {
376   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
377 
378   // If the selector has non-empty queues we trivially know there is immediate
379   // work to be done.
380   if (!main_thread_only().selector.AllEnabledWorkQueuesAreEmpty())
381     return TimeDelta();
382 
383   // Its possible the selectors state is dirty because ReloadEmptyWorkQueues
384   // hasn't been called yet. This check catches the case of fresh incoming work.
385   {
386     AutoLock lock(any_thread_lock_);
387     for (const internal::IncomingImmediateWorkList* iter =
388              any_thread().incoming_immediate_work_list;
389          iter; iter = iter->next) {
390       if (iter->queue->CouldTaskRun(iter->order))
391         return TimeDelta();
392     }
393   }
394 
395   // Otherwise we need to find the shortest delay, if any.  NB we don't need to
396   // call WakeUpReadyDelayedQueues because it's assumed DelayTillNextTask will
397   // return TimeDelta>() if the delayed task is due to run now.
398   TimeDelta delay_till_next_task = TimeDelta::Max();
399   for (TimeDomain* time_domain : main_thread_only().time_domains) {
400     Optional<TimeDelta> delay = time_domain->DelayTillNextTask(lazy_now);
401     if (!delay)
402       continue;
403 
404     if (*delay < delay_till_next_task)
405       delay_till_next_task = *delay;
406   }
407   return delay_till_next_task;
408 }
409 
WillQueueTask(internal::TaskQueueImpl::Task * pending_task)410 void SequenceManagerImpl::WillQueueTask(
411     internal::TaskQueueImpl::Task* pending_task) {
412   controller_->WillQueueTask(pending_task);
413 }
414 
InitializeTaskTiming(internal::TaskQueueImpl * task_queue)415 TaskQueue::TaskTiming SequenceManagerImpl::InitializeTaskTiming(
416     internal::TaskQueueImpl* task_queue) {
417   bool records_wall_time =
418       (task_queue->GetShouldNotifyObservers() &&
419        main_thread_only().task_time_observers.might_have_observers()) ||
420       task_queue->RequiresTaskTiming();
421   bool records_thread_time = records_wall_time && ShouldRecordCPUTimeForTask();
422   return TaskQueue::TaskTiming(records_wall_time, records_thread_time);
423 }
424 
NotifyWillProcessTask(ExecutingTask * executing_task,LazyNow * time_before_task)425 void SequenceManagerImpl::NotifyWillProcessTask(ExecutingTask* executing_task,
426                                                 LazyNow* time_before_task) {
427   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
428                "SequenceManagerImpl::NotifyWillProcessTaskObservers");
429   if (executing_task->task_queue->GetQuiescenceMonitored())
430     main_thread_only().task_was_run_on_quiescence_monitored_queue = true;
431 
432 #if !defined(OS_NACL)
433   debug::SetCrashKeyString(
434       main_thread_only().file_name_crash_key,
435       executing_task->pending_task.posted_from.file_name());
436   debug::SetCrashKeyString(
437       main_thread_only().function_name_crash_key,
438       executing_task->pending_task.posted_from.function_name());
439 #endif  // OS_NACL
440 
441   executing_task->task_timing.RecordTaskStart(time_before_task);
442 
443   if (!executing_task->task_queue->GetShouldNotifyObservers())
444     return;
445 
446   {
447     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
448                  "SequenceManager.WillProcessTaskObservers");
449     for (auto& observer : main_thread_only().task_observers)
450       observer.WillProcessTask(executing_task->pending_task);
451   }
452 
453   {
454     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
455                  "SequenceManager.QueueNotifyWillProcessTask");
456     executing_task->task_queue->NotifyWillProcessTask(
457         executing_task->pending_task);
458   }
459 
460   bool notify_time_observers =
461       main_thread_only().task_time_observers.might_have_observers() ||
462       executing_task->task_queue->RequiresTaskTiming();
463 
464   if (!notify_time_observers)
465     return;
466 
467   if (main_thread_only().nesting_depth == 0) {
468     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
469                  "SequenceManager.WillProcessTaskTimeObservers");
470     for (auto& observer : main_thread_only().task_time_observers)
471       observer.WillProcessTask(executing_task->task_timing.start_time());
472   }
473 
474   {
475     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
476                  "SequenceManager.QueueOnTaskStarted");
477     executing_task->task_queue->OnTaskStarted(executing_task->pending_task,
478                                               executing_task->task_timing);
479   }
480 }
481 
NotifyDidProcessTask(ExecutingTask * executing_task,LazyNow * time_after_task)482 void SequenceManagerImpl::NotifyDidProcessTask(ExecutingTask* executing_task,
483                                                LazyNow* time_after_task) {
484   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
485                "SequenceManagerImpl::NotifyDidProcessTaskObservers");
486 
487   executing_task->task_timing.RecordTaskEnd(time_after_task);
488 
489   const TaskQueue::TaskTiming& task_timing = executing_task->task_timing;
490 
491   if (!executing_task->task_queue->GetShouldNotifyObservers())
492     return;
493 
494   if (task_timing.has_wall_time() && main_thread_only().nesting_depth == 0) {
495     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
496                  "SequenceManager.DidProcessTaskTimeObservers");
497     for (auto& observer : main_thread_only().task_time_observers) {
498       observer.DidProcessTask(task_timing.start_time(), task_timing.end_time());
499     }
500   }
501 
502   {
503     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
504                  "SequenceManager.DidProcessTaskObservers");
505     for (auto& observer : main_thread_only().task_observers)
506       observer.DidProcessTask(executing_task->pending_task);
507   }
508 
509   {
510     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
511                  "SequenceManager.QueueNotifyDidProcessTask");
512     executing_task->task_queue->NotifyDidProcessTask(
513         executing_task->pending_task);
514   }
515 
516   {
517     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
518                  "SequenceManager.QueueOnTaskCompleted");
519     if (task_timing.has_wall_time())
520       executing_task->task_queue->OnTaskCompleted(executing_task->pending_task,
521                                                   task_timing);
522   }
523 
524   // TODO(altimin): Move this back to blink.
525   if (task_timing.has_wall_time() &&
526       task_timing.wall_duration() > kLongTaskTraceEventThreshold &&
527       main_thread_only().nesting_depth == 0) {
528     TRACE_EVENT_INSTANT1("blink", "LongTask", TRACE_EVENT_SCOPE_THREAD,
529                          "duration", task_timing.wall_duration().InSecondsF());
530   }
531 }
532 
SetWorkBatchSize(int work_batch_size)533 void SequenceManagerImpl::SetWorkBatchSize(int work_batch_size) {
534   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
535   DCHECK_GE(work_batch_size, 1);
536   controller_->SetWorkBatchSize(work_batch_size);
537 }
538 
AddTaskObserver(MessageLoop::TaskObserver * task_observer)539 void SequenceManagerImpl::AddTaskObserver(
540     MessageLoop::TaskObserver* task_observer) {
541   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
542   main_thread_only().task_observers.AddObserver(task_observer);
543 }
544 
RemoveTaskObserver(MessageLoop::TaskObserver * task_observer)545 void SequenceManagerImpl::RemoveTaskObserver(
546     MessageLoop::TaskObserver* task_observer) {
547   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
548   main_thread_only().task_observers.RemoveObserver(task_observer);
549 }
550 
AddTaskTimeObserver(TaskTimeObserver * task_time_observer)551 void SequenceManagerImpl::AddTaskTimeObserver(
552     TaskTimeObserver* task_time_observer) {
553   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
554   main_thread_only().task_time_observers.AddObserver(task_time_observer);
555 }
556 
RemoveTaskTimeObserver(TaskTimeObserver * task_time_observer)557 void SequenceManagerImpl::RemoveTaskTimeObserver(
558     TaskTimeObserver* task_time_observer) {
559   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
560   main_thread_only().task_time_observers.RemoveObserver(task_time_observer);
561 }
562 
GetAndClearSystemIsQuiescentBit()563 bool SequenceManagerImpl::GetAndClearSystemIsQuiescentBit() {
564   bool task_was_run =
565       main_thread_only().task_was_run_on_quiescence_monitored_queue;
566   main_thread_only().task_was_run_on_quiescence_monitored_queue = false;
567   return !task_was_run;
568 }
569 
GetNextSequenceNumber()570 internal::EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
571   return enqueue_order_generator_.GenerateNext();
572 }
573 
574 std::unique_ptr<trace_event::ConvertableToTraceFormat>
AsValueWithSelectorResult(bool should_run,internal::WorkQueue * selected_work_queue) const575 SequenceManagerImpl::AsValueWithSelectorResult(
576     bool should_run,
577     internal::WorkQueue* selected_work_queue) const {
578   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
579   std::unique_ptr<trace_event::TracedValue> state(
580       new trace_event::TracedValue());
581   TimeTicks now = NowTicks();
582   state->BeginArray("active_queues");
583   for (auto* const queue : main_thread_only().active_queues)
584     queue->AsValueInto(now, state.get());
585   state->EndArray();
586   state->BeginArray("queues_to_gracefully_shutdown");
587   for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
588     pair.first->AsValueInto(now, state.get());
589   state->EndArray();
590   state->BeginArray("queues_to_delete");
591   for (const auto& pair : main_thread_only().queues_to_delete)
592     pair.first->AsValueInto(now, state.get());
593   state->EndArray();
594   state->BeginDictionary("selector");
595   main_thread_only().selector.AsValueInto(state.get());
596   state->EndDictionary();
597   if (should_run) {
598     state->SetString("selected_queue",
599                      selected_work_queue->task_queue()->GetName());
600     state->SetString("work_queue_name", selected_work_queue->name());
601   }
602 
603   state->BeginArray("time_domains");
604   for (auto* time_domain : main_thread_only().time_domains)
605     time_domain->AsValueInto(state.get());
606   state->EndArray();
607   {
608     AutoLock lock(any_thread_lock_);
609     state->BeginArray("has_incoming_immediate_work");
610     for (const internal::IncomingImmediateWorkList* iter =
611              any_thread().incoming_immediate_work_list;
612          iter; iter = iter->next) {
613       state->AppendString(iter->queue->GetName());
614     }
615     state->EndArray();
616   }
617   return std::move(state);
618 }
619 
OnTaskQueueEnabled(internal::TaskQueueImpl * queue)620 void SequenceManagerImpl::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
621   DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
622   DCHECK(queue->IsQueueEnabled());
623   // Only schedule DoWork if there's something to do.
624   if (queue->HasTaskToRunImmediately() && !queue->BlockedByFence())
625     MaybeScheduleImmediateWork(FROM_HERE);
626 }
627 
SweepCanceledDelayedTasks()628 void SequenceManagerImpl::SweepCanceledDelayedTasks() {
629   std::map<TimeDomain*, TimeTicks> time_domain_now;
630   for (auto* const queue : main_thread_only().active_queues)
631     SweepCanceledDelayedTasksInQueue(queue, &time_domain_now);
632   for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
633     SweepCanceledDelayedTasksInQueue(pair.first, &time_domain_now);
634 }
635 
TakeQueuesToGracefullyShutdownFromHelper()636 void SequenceManagerImpl::TakeQueuesToGracefullyShutdownFromHelper() {
637   std::vector<std::unique_ptr<internal::TaskQueueImpl>> queues =
638       graceful_shutdown_helper_->TakeQueues();
639   for (std::unique_ptr<internal::TaskQueueImpl>& queue : queues) {
640     main_thread_only().queues_to_gracefully_shutdown[queue.get()] =
641         std::move(queue);
642   }
643 }
644 
CleanUpQueues()645 void SequenceManagerImpl::CleanUpQueues() {
646   TakeQueuesToGracefullyShutdownFromHelper();
647 
648   for (auto it = main_thread_only().queues_to_gracefully_shutdown.begin();
649        it != main_thread_only().queues_to_gracefully_shutdown.end();) {
650     if (it->first->IsEmpty()) {
651       UnregisterTaskQueueImpl(std::move(it->second));
652       main_thread_only().active_queues.erase(it->first);
653       main_thread_only().queues_to_gracefully_shutdown.erase(it++);
654     } else {
655       ++it;
656     }
657   }
658   main_thread_only().queues_to_delete.clear();
659 }
660 
661 scoped_refptr<internal::GracefulQueueShutdownHelper>
GetGracefulQueueShutdownHelper() const662 SequenceManagerImpl::GetGracefulQueueShutdownHelper() const {
663   return graceful_shutdown_helper_;
664 }
665 
GetWeakPtr()666 WeakPtr<SequenceManagerImpl> SequenceManagerImpl::GetWeakPtr() {
667   return weak_factory_.GetWeakPtr();
668 }
669 
SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner)670 void SequenceManagerImpl::SetDefaultTaskRunner(
671     scoped_refptr<SingleThreadTaskRunner> task_runner) {
672   controller_->SetDefaultTaskRunner(task_runner);
673 }
674 
GetTickClock() const675 const TickClock* SequenceManagerImpl::GetTickClock() const {
676   return controller_->GetClock();
677 }
678 
NowTicks() const679 TimeTicks SequenceManagerImpl::NowTicks() const {
680   return controller_->GetClock()->NowTicks();
681 }
682 
ShouldRecordCPUTimeForTask()683 bool SequenceManagerImpl::ShouldRecordCPUTimeForTask() {
684   return ThreadTicks::IsSupported() &&
685          main_thread_only().uniform_distribution(
686              main_thread_only().random_generator) <
687              metric_recording_settings_
688                  .task_sampling_rate_for_recording_cpu_time;
689 }
690 
691 const SequenceManager::MetricRecordingSettings&
GetMetricRecordingSettings() const692 SequenceManagerImpl::GetMetricRecordingSettings() const {
693   return metric_recording_settings_;
694 }
695 
MSVC_DISABLE_OPTIMIZE()696 MSVC_DISABLE_OPTIMIZE()
697 bool SequenceManagerImpl::Validate() {
698   return memory_corruption_sentinel_ == kMemoryCorruptionSentinelValue;
699 }
MSVC_ENABLE_OPTIMIZE()700 MSVC_ENABLE_OPTIMIZE()
701 
702 void SequenceManagerImpl::EnableCrashKeys(
703     const char* file_name_crash_key_name,
704     const char* function_name_crash_key_name) {
705   DCHECK(!main_thread_only().file_name_crash_key);
706   DCHECK(!main_thread_only().function_name_crash_key);
707 #if !defined(OS_NACL)
708   main_thread_only().file_name_crash_key = debug::AllocateCrashKeyString(
709       file_name_crash_key_name, debug::CrashKeySize::Size64);
710   main_thread_only().function_name_crash_key = debug::AllocateCrashKeyString(
711       function_name_crash_key_name, debug::CrashKeySize::Size64);
712 #endif  // OS_NACL
713 }
714 
currently_executing_task_queue() const715 internal::TaskQueueImpl* SequenceManagerImpl::currently_executing_task_queue()
716     const {
717   if (main_thread_only().task_execution_stack.empty())
718     return nullptr;
719   return main_thread_only().task_execution_stack.rbegin()->task_queue;
720 }
721 
722 }  // namespace internal
723 }  // namespace sequence_manager
724 }  // namespace base
725