1 /******************************************************************************
2  *
3  *  Copyright 2014 Google, Inc.
4  *
5  *  Licensed under the Apache License, Version 2.0 (the "License");
6  *  you may not use this file except in compliance with the License.
7  *  You may obtain a copy of the License at:
8  *
9  *  http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *  Unless required by applicable law or agreed to in writing, software
12  *  distributed under the License is distributed on an "AS IS" BASIS,
13  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  *  See the License for the specific language governing permissions and
15  *  limitations under the License.
16  *
17  ******************************************************************************/
18 
19 #include "internal_include/bt_target.h"
20 
21 #define LOG_TAG "bt_osi_alarm"
22 
23 #include "osi/include/alarm.h"
24 
25 #include <base/cancelable_callback.h>
26 #include <base/logging.h>
27 #include <errno.h>
28 #include <fcntl.h>
29 #include <inttypes.h>
30 #include <malloc.h>
31 #include <pthread.h>
32 #include <signal.h>
33 #include <string.h>
34 #include <time.h>
35 
36 #include <hardware/bluetooth.h>
37 
38 #include <mutex>
39 
40 #include "osi/include/allocator.h"
41 #include "osi/include/fixed_queue.h"
42 #include "osi/include/list.h"
43 #include "osi/include/log.h"
44 #include "osi/include/osi.h"
45 #include "osi/include/semaphore.h"
46 #include "osi/include/thread.h"
47 #include "osi/include/wakelock.h"
48 #include "stack/include/btu.h"
49 
50 using base::Bind;
51 using base::CancelableClosure;
52 
53 // Callback and timer threads should run at RT priority in order to ensure they
54 // meet audio deadlines.  Use this priority for all audio/timer related thread.
55 static const int THREAD_RT_PRIORITY = 1;
56 
57 typedef struct {
58   size_t count;
59   uint64_t total_ms;
60   uint64_t max_ms;
61 } stat_t;
62 
63 // Alarm-related information and statistics
64 typedef struct {
65   const char* name;
66   size_t scheduled_count;
67   size_t canceled_count;
68   size_t rescheduled_count;
69   size_t total_updates;
70   uint64_t last_update_ms;
71   stat_t overdue_scheduling;
72   stat_t premature_scheduling;
73 } alarm_stats_t;
74 
75 /* Wrapper around CancellableClosure that let it be embedded in structs, without
76  * need to define copy operator. */
77 struct CancelableClosureInStruct {
78   base::CancelableClosure i;
79 
operator =CancelableClosureInStruct80   CancelableClosureInStruct& operator=(const CancelableClosureInStruct& in) {
81     if (!in.i.callback().is_null()) i.Reset(in.i.callback());
82     return *this;
83   }
84 };
85 
86 struct alarm_t {
87   // The mutex is held while the callback for this alarm is being executed.
88   // It allows us to release the coarse-grained monitor lock while a
89   // potentially long-running callback is executing. |alarm_cancel| uses this
90   // mutex to provide a guarantee to its caller that the callback will not be
91   // in progress when it returns.
92   std::shared_ptr<std::recursive_mutex> callback_mutex;
93   uint64_t creation_time_ms;
94   uint64_t period_ms;
95   uint64_t deadline_ms;
96   uint64_t prev_deadline_ms;  // Previous deadline - used for accounting of
97                               // periodic timers
98   bool is_periodic;
99   fixed_queue_t* queue;  // The processing queue to add this alarm to
100   alarm_callback_t callback;
101   void* data;
102   alarm_stats_t stats;
103 
104   bool for_msg_loop;  // True, if the alarm should be processed on message loop
105   CancelableClosureInStruct closure;  // posted to message loop for processing
106 };
107 
108 // If the next wakeup time is less than this threshold, we should acquire
109 // a wakelock instead of setting a wake alarm so we're not bouncing in
110 // and out of suspend frequently. This value is externally visible to allow
111 // unit tests to run faster. It should not be modified by production code.
112 int64_t TIMER_INTERVAL_FOR_WAKELOCK_IN_MS = 3000;
113 static const clockid_t CLOCK_ID = CLOCK_BOOTTIME;
114 
115 // This mutex ensures that the |alarm_set|, |alarm_cancel|, and alarm callback
116 // functions execute serially and not concurrently. As a result, this mutex
117 // also protects the |alarms| list.
118 static std::mutex alarms_mutex;
119 static list_t* alarms;
120 static timer_t timer;
121 static timer_t wakeup_timer;
122 static bool timer_set;
123 
124 // All alarm callbacks are dispatched from |dispatcher_thread|
125 static thread_t* dispatcher_thread;
126 static bool dispatcher_thread_active;
127 static semaphore_t* alarm_expired;
128 
129 // Default alarm callback thread and queue
130 static thread_t* default_callback_thread;
131 static fixed_queue_t* default_callback_queue;
132 
133 static alarm_t* alarm_new_internal(const char* name, bool is_periodic);
134 static bool lazy_initialize(void);
135 static uint64_t now_ms(void);
136 static void alarm_set_internal(alarm_t* alarm, uint64_t period_ms,
137                                alarm_callback_t cb, void* data,
138                                fixed_queue_t* queue, bool for_msg_loop);
139 static void alarm_cancel_internal(alarm_t* alarm);
140 static void remove_pending_alarm(alarm_t* alarm);
141 static void schedule_next_instance(alarm_t* alarm);
142 static void reschedule_root_alarm(void);
143 static void alarm_queue_ready(fixed_queue_t* queue, void* context);
144 static void timer_callback(void* data);
145 static void callback_dispatch(void* context);
146 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer);
147 static void update_scheduling_stats(alarm_stats_t* stats, uint64_t now_ms,
148                                     uint64_t deadline_ms);
149 // Registers |queue| for processing alarm callbacks on |thread|.
150 // |queue| may not be NULL. |thread| may not be NULL.
151 static void alarm_register_processing_queue(fixed_queue_t* queue,
152                                             thread_t* thread);
153 
update_stat(stat_t * stat,uint64_t delta_ms)154 static void update_stat(stat_t* stat, uint64_t delta_ms) {
155   if (stat->max_ms < delta_ms) stat->max_ms = delta_ms;
156   stat->total_ms += delta_ms;
157   stat->count++;
158 }
159 
alarm_new(const char * name)160 alarm_t* alarm_new(const char* name) { return alarm_new_internal(name, false); }
161 
alarm_new_periodic(const char * name)162 alarm_t* alarm_new_periodic(const char* name) {
163   return alarm_new_internal(name, true);
164 }
165 
alarm_new_internal(const char * name,bool is_periodic)166 static alarm_t* alarm_new_internal(const char* name, bool is_periodic) {
167   // Make sure we have a list we can insert alarms into.
168   if (!alarms && !lazy_initialize()) {
169     CHECK(false);  // if initialization failed, we should not continue
170     return NULL;
171   }
172 
173   alarm_t* ret = static_cast<alarm_t*>(osi_calloc(sizeof(alarm_t)));
174 
175   std::shared_ptr<std::recursive_mutex> ptr(new std::recursive_mutex());
176   ret->callback_mutex = ptr;
177   ret->is_periodic = is_periodic;
178   ret->stats.name = osi_strdup(name);
179 
180   ret->for_msg_loop = false;
181   // placement new
182   new (&ret->closure) CancelableClosureInStruct();
183 
184   // NOTE: The stats were reset by osi_calloc() above
185 
186   return ret;
187 }
188 
alarm_free(alarm_t * alarm)189 void alarm_free(alarm_t* alarm) {
190   if (!alarm) return;
191 
192   alarm_cancel(alarm);
193 
194   osi_free((void*)alarm->stats.name);
195   alarm->closure.~CancelableClosureInStruct();
196   alarm->callback_mutex.reset();
197   osi_free(alarm);
198 }
199 
alarm_get_remaining_ms(const alarm_t * alarm)200 uint64_t alarm_get_remaining_ms(const alarm_t* alarm) {
201   CHECK(alarm != NULL);
202   uint64_t remaining_ms = 0;
203   uint64_t just_now_ms = now_ms();
204 
205   std::lock_guard<std::mutex> lock(alarms_mutex);
206   if (alarm->deadline_ms > just_now_ms)
207     remaining_ms = alarm->deadline_ms - just_now_ms;
208 
209   return remaining_ms;
210 }
211 
alarm_set(alarm_t * alarm,uint64_t interval_ms,alarm_callback_t cb,void * data)212 void alarm_set(alarm_t* alarm, uint64_t interval_ms, alarm_callback_t cb,
213                void* data) {
214   alarm_set_internal(alarm, interval_ms, cb, data, default_callback_queue,
215                      false);
216 }
217 
alarm_set_on_mloop(alarm_t * alarm,uint64_t interval_ms,alarm_callback_t cb,void * data)218 void alarm_set_on_mloop(alarm_t* alarm, uint64_t interval_ms,
219                         alarm_callback_t cb, void* data) {
220   alarm_set_internal(alarm, interval_ms, cb, data, NULL, true);
221 }
222 
223 // Runs in exclusion with alarm_cancel and timer_callback.
alarm_set_internal(alarm_t * alarm,uint64_t period_ms,alarm_callback_t cb,void * data,fixed_queue_t * queue,bool for_msg_loop)224 static void alarm_set_internal(alarm_t* alarm, uint64_t period_ms,
225                                alarm_callback_t cb, void* data,
226                                fixed_queue_t* queue, bool for_msg_loop) {
227   CHECK(alarms != NULL);
228   CHECK(alarm != NULL);
229   CHECK(cb != NULL);
230 
231   std::lock_guard<std::mutex> lock(alarms_mutex);
232 
233   alarm->creation_time_ms = now_ms();
234   alarm->period_ms = period_ms;
235   alarm->queue = queue;
236   alarm->callback = cb;
237   alarm->data = data;
238   alarm->for_msg_loop = for_msg_loop;
239 
240   schedule_next_instance(alarm);
241   alarm->stats.scheduled_count++;
242 }
243 
alarm_cancel(alarm_t * alarm)244 void alarm_cancel(alarm_t* alarm) {
245   CHECK(alarms != NULL);
246   if (!alarm) return;
247 
248   std::shared_ptr<std::recursive_mutex> local_mutex_ref;
249   {
250     std::lock_guard<std::mutex> lock(alarms_mutex);
251     local_mutex_ref = alarm->callback_mutex;
252     alarm_cancel_internal(alarm);
253   }
254 
255   // If the callback for |alarm| is in progress, wait here until it completes.
256   std::lock_guard<std::recursive_mutex> lock(*local_mutex_ref);
257 }
258 
259 // Internal implementation of canceling an alarm.
260 // The caller must hold the |alarms_mutex|
alarm_cancel_internal(alarm_t * alarm)261 static void alarm_cancel_internal(alarm_t* alarm) {
262   bool needs_reschedule =
263       (!list_is_empty(alarms) && list_front(alarms) == alarm);
264 
265   remove_pending_alarm(alarm);
266 
267   alarm->deadline_ms = 0;
268   alarm->prev_deadline_ms = 0;
269   alarm->callback = NULL;
270   alarm->data = NULL;
271   alarm->stats.canceled_count++;
272   alarm->queue = NULL;
273 
274   if (needs_reschedule) reschedule_root_alarm();
275 }
276 
alarm_is_scheduled(const alarm_t * alarm)277 bool alarm_is_scheduled(const alarm_t* alarm) {
278   if ((alarms == NULL) || (alarm == NULL)) return false;
279   return (alarm->callback != NULL);
280 }
281 
alarm_cleanup(void)282 void alarm_cleanup(void) {
283   // If lazy_initialize never ran there is nothing else to do
284   if (!alarms) return;
285 
286   dispatcher_thread_active = false;
287   semaphore_post(alarm_expired);
288   thread_free(dispatcher_thread);
289   dispatcher_thread = NULL;
290 
291   std::lock_guard<std::mutex> lock(alarms_mutex);
292 
293   fixed_queue_free(default_callback_queue, NULL);
294   default_callback_queue = NULL;
295   thread_free(default_callback_thread);
296   default_callback_thread = NULL;
297 
298   timer_delete(wakeup_timer);
299   timer_delete(timer);
300   semaphore_free(alarm_expired);
301   alarm_expired = NULL;
302 
303   list_free(alarms);
304   alarms = NULL;
305 }
306 
lazy_initialize(void)307 static bool lazy_initialize(void) {
308   CHECK(alarms == NULL);
309 
310   // timer_t doesn't have an invalid value so we must track whether
311   // the |timer| variable is valid ourselves.
312   bool timer_initialized = false;
313   bool wakeup_timer_initialized = false;
314 
315   std::lock_guard<std::mutex> lock(alarms_mutex);
316 
317   alarms = list_new(NULL);
318   if (!alarms) {
319     LOG_ERROR("%s unable to allocate alarm list.", __func__);
320     goto error;
321   }
322 
323   if (!timer_create_internal(CLOCK_ID, &timer)) goto error;
324   timer_initialized = true;
325 
326   if (!timer_create_internal(CLOCK_BOOTTIME_ALARM, &wakeup_timer)) {
327     if (!timer_create_internal(CLOCK_BOOTTIME, &wakeup_timer)) {
328       goto error;
329     }
330   }
331   wakeup_timer_initialized = true;
332 
333   alarm_expired = semaphore_new(0);
334   if (!alarm_expired) {
335     LOG_ERROR("%s unable to create alarm expired semaphore", __func__);
336     goto error;
337   }
338 
339   default_callback_thread =
340       thread_new_sized("alarm_default_callbacks", SIZE_MAX);
341   if (default_callback_thread == NULL) {
342     LOG_ERROR("%s unable to create default alarm callbacks thread.", __func__);
343     goto error;
344   }
345   thread_set_rt_priority(default_callback_thread, THREAD_RT_PRIORITY);
346   default_callback_queue = fixed_queue_new(SIZE_MAX);
347   if (default_callback_queue == NULL) {
348     LOG_ERROR("%s unable to create default alarm callbacks queue.", __func__);
349     goto error;
350   }
351   alarm_register_processing_queue(default_callback_queue,
352                                   default_callback_thread);
353 
354   dispatcher_thread_active = true;
355   dispatcher_thread = thread_new("alarm_dispatcher");
356   if (!dispatcher_thread) {
357     LOG_ERROR("%s unable to create alarm callback thread.", __func__);
358     goto error;
359   }
360   thread_set_rt_priority(dispatcher_thread, THREAD_RT_PRIORITY);
361   thread_post(dispatcher_thread, callback_dispatch, NULL);
362   return true;
363 
364 error:
365   fixed_queue_free(default_callback_queue, NULL);
366   default_callback_queue = NULL;
367   thread_free(default_callback_thread);
368   default_callback_thread = NULL;
369 
370   thread_free(dispatcher_thread);
371   dispatcher_thread = NULL;
372 
373   dispatcher_thread_active = false;
374 
375   semaphore_free(alarm_expired);
376   alarm_expired = NULL;
377 
378   if (wakeup_timer_initialized) timer_delete(wakeup_timer);
379 
380   if (timer_initialized) timer_delete(timer);
381 
382   list_free(alarms);
383   alarms = NULL;
384 
385   return false;
386 }
387 
now_ms(void)388 static uint64_t now_ms(void) {
389   CHECK(alarms != NULL);
390 
391   struct timespec ts;
392   if (clock_gettime(CLOCK_ID, &ts) == -1) {
393     LOG_ERROR("%s unable to get current time: %s", __func__, strerror(errno));
394     return 0;
395   }
396 
397   return (ts.tv_sec * 1000LL) + (ts.tv_nsec / 1000000LL);
398 }
399 
400 // Remove alarm from internal alarm list and the processing queue
401 // The caller must hold the |alarms_mutex|
remove_pending_alarm(alarm_t * alarm)402 static void remove_pending_alarm(alarm_t* alarm) {
403   list_remove(alarms, alarm);
404 
405   if (alarm->for_msg_loop) {
406     alarm->closure.i.Cancel();
407   } else {
408     while (fixed_queue_try_remove_from_queue(alarm->queue, alarm) != NULL) {
409       // Remove all repeated alarm instances from the queue.
410       // NOTE: We are defensive here - we shouldn't have repeated alarm
411       // instances
412     }
413   }
414 }
415 
416 // Must be called with |alarms_mutex| held
schedule_next_instance(alarm_t * alarm)417 static void schedule_next_instance(alarm_t* alarm) {
418   // If the alarm is currently set and it's at the start of the list,
419   // we'll need to re-schedule since we've adjusted the earliest deadline.
420   bool needs_reschedule =
421       (!list_is_empty(alarms) && list_front(alarms) == alarm);
422   if (alarm->callback) remove_pending_alarm(alarm);
423 
424   // Calculate the next deadline for this alarm
425   uint64_t just_now_ms = now_ms();
426   uint64_t ms_into_period = 0;
427   if ((alarm->is_periodic) && (alarm->period_ms != 0))
428     ms_into_period =
429         ((just_now_ms - alarm->creation_time_ms) % alarm->period_ms);
430   alarm->deadline_ms = just_now_ms + (alarm->period_ms - ms_into_period);
431 
432   // Add it into the timer list sorted by deadline (earliest deadline first).
433   if (list_is_empty(alarms) ||
434       ((alarm_t*)list_front(alarms))->deadline_ms > alarm->deadline_ms) {
435     list_prepend(alarms, alarm);
436   } else {
437     for (list_node_t* node = list_begin(alarms); node != list_end(alarms);
438          node = list_next(node)) {
439       list_node_t* next = list_next(node);
440       if (next == list_end(alarms) ||
441           ((alarm_t*)list_node(next))->deadline_ms > alarm->deadline_ms) {
442         list_insert_after(alarms, node, alarm);
443         break;
444       }
445     }
446   }
447 
448   // If the new alarm has the earliest deadline, we need to re-evaluate our
449   // schedule.
450   if (needs_reschedule ||
451       (!list_is_empty(alarms) && list_front(alarms) == alarm)) {
452     reschedule_root_alarm();
453   }
454 }
455 
456 // NOTE: must be called with |alarms_mutex| held
reschedule_root_alarm(void)457 static void reschedule_root_alarm(void) {
458   CHECK(alarms != NULL);
459 
460   const bool timer_was_set = timer_set;
461   alarm_t* next;
462   int64_t next_expiration;
463 
464   // If used in a zeroed state, disarms the timer.
465   struct itimerspec timer_time;
466   memset(&timer_time, 0, sizeof(timer_time));
467 
468   if (list_is_empty(alarms)) goto done;
469 
470   next = static_cast<alarm_t*>(list_front(alarms));
471   next_expiration = next->deadline_ms - now_ms();
472   if (next_expiration < TIMER_INTERVAL_FOR_WAKELOCK_IN_MS) {
473     if (!timer_set) {
474       if (!wakelock_acquire()) {
475         LOG_ERROR("%s unable to acquire wake lock", __func__);
476         goto done;
477       }
478     }
479 
480     timer_time.it_value.tv_sec = (next->deadline_ms / 1000);
481     timer_time.it_value.tv_nsec = (next->deadline_ms % 1000) * 1000000LL;
482 
483     // It is entirely unsafe to call timer_settime(2) with a zeroed timerspec
484     // for timers with *_ALARM clock IDs. Although the man page states that the
485     // timer would be canceled, the current behavior (as of Linux kernel 3.17)
486     // is that the callback is issued immediately. The only way to cancel an
487     // *_ALARM timer is to delete the timer. But unfortunately, deleting and
488     // re-creating a timer is rather expensive; every timer_create(2) spawns a
489     // new thread. So we simply set the timer to fire at the largest possible
490     // time.
491     //
492     // If we've reached this code path, we're going to grab a wake lock and
493     // wait for the next timer to fire. In that case, there's no reason to
494     // have a pending wakeup timer so we simply cancel it.
495     struct itimerspec end_of_time;
496     memset(&end_of_time, 0, sizeof(end_of_time));
497     end_of_time.it_value.tv_sec = (time_t)(1LL << (sizeof(time_t) * 8 - 2));
498     timer_settime(wakeup_timer, TIMER_ABSTIME, &end_of_time, NULL);
499   } else {
500     // WARNING: do not attempt to use relative timers with *_ALARM clock IDs
501     // in kernels before 3.17 unless you have the following patch:
502     // https://lkml.org/lkml/2014/7/7/576
503     struct itimerspec wakeup_time;
504     memset(&wakeup_time, 0, sizeof(wakeup_time));
505 
506     wakeup_time.it_value.tv_sec = (next->deadline_ms / 1000);
507     wakeup_time.it_value.tv_nsec = (next->deadline_ms % 1000) * 1000000LL;
508     if (timer_settime(wakeup_timer, TIMER_ABSTIME, &wakeup_time, NULL) == -1)
509       LOG_ERROR("%s unable to set wakeup timer: %s", __func__, strerror(errno));
510   }
511 
512 done:
513   timer_set =
514       timer_time.it_value.tv_sec != 0 || timer_time.it_value.tv_nsec != 0;
515   if (timer_was_set && !timer_set) {
516     wakelock_release();
517   }
518 
519   if (timer_settime(timer, TIMER_ABSTIME, &timer_time, NULL) == -1)
520     LOG_ERROR("%s unable to set timer: %s", __func__, strerror(errno));
521 
522   // If next expiration was in the past (e.g. short timer that got context
523   // switched) then the timer might have diarmed itself. Detect this case and
524   // work around it by manually signalling the |alarm_expired| semaphore.
525   //
526   // It is possible that the timer was actually super short (a few
527   // milliseconds) and the timer expired normally before we called
528   // |timer_gettime|. Worst case, |alarm_expired| is signaled twice for that
529   // alarm. Nothing bad should happen in that case though since the callback
530   // dispatch function checks to make sure the timer at the head of the list
531   // actually expired.
532   if (timer_set) {
533     struct itimerspec time_to_expire;
534     timer_gettime(timer, &time_to_expire);
535     if (time_to_expire.it_value.tv_sec == 0 &&
536         time_to_expire.it_value.tv_nsec == 0) {
537       LOG_INFO(
538 
539           "%s alarm expiration too close for posix timers, switching to guns",
540           __func__);
541       semaphore_post(alarm_expired);
542     }
543   }
544 }
545 
alarm_register_processing_queue(fixed_queue_t * queue,thread_t * thread)546 static void alarm_register_processing_queue(fixed_queue_t* queue,
547                                             thread_t* thread) {
548   CHECK(queue != NULL);
549   CHECK(thread != NULL);
550 
551   fixed_queue_register_dequeue(queue, thread_get_reactor(thread),
552                                alarm_queue_ready, NULL);
553 }
554 
alarm_ready_generic(alarm_t * alarm,std::unique_lock<std::mutex> & lock)555 static void alarm_ready_generic(alarm_t* alarm,
556                                 std::unique_lock<std::mutex>& lock) {
557   if (alarm == NULL) {
558     return;  // The alarm was probably canceled
559   }
560 
561   //
562   // If the alarm is not periodic, we've fully serviced it now, and can reset
563   // some of its internal state. This is useful to distinguish between expired
564   // alarms and active ones.
565   //
566   if (!alarm->callback) {
567     LOG(FATAL) << __func__
568                << ": timer callback is NULL! Name=" << alarm->stats.name;
569   }
570   alarm_callback_t callback = alarm->callback;
571   void* data = alarm->data;
572   uint64_t deadline_ms = alarm->deadline_ms;
573   if (alarm->is_periodic) {
574     // The periodic alarm has been rescheduled and alarm->deadline has been
575     // updated, hence we need to use the previous deadline.
576     deadline_ms = alarm->prev_deadline_ms;
577   } else {
578     alarm->deadline_ms = 0;
579     alarm->callback = NULL;
580     alarm->data = NULL;
581     alarm->queue = NULL;
582   }
583 
584   // Increment the reference count of the mutex so it doesn't get freed
585   // before the callback gets finished executing.
586   std::shared_ptr<std::recursive_mutex> local_mutex_ref = alarm->callback_mutex;
587   std::lock_guard<std::recursive_mutex> cb_lock(*local_mutex_ref);
588   lock.unlock();
589 
590   // Update the statistics
591   update_scheduling_stats(&alarm->stats, now_ms(), deadline_ms);
592 
593   // NOTE: Do NOT access "alarm" after the callback, as a safety precaution
594   // in case the callback itself deleted the alarm.
595   callback(data);
596 }
597 
alarm_ready_mloop(alarm_t * alarm)598 static void alarm_ready_mloop(alarm_t* alarm) {
599   std::unique_lock<std::mutex> lock(alarms_mutex);
600   alarm_ready_generic(alarm, lock);
601 }
602 
alarm_queue_ready(fixed_queue_t * queue,UNUSED_ATTR void * context)603 static void alarm_queue_ready(fixed_queue_t* queue, UNUSED_ATTR void* context) {
604   CHECK(queue != NULL);
605 
606   std::unique_lock<std::mutex> lock(alarms_mutex);
607   alarm_t* alarm = (alarm_t*)fixed_queue_try_dequeue(queue);
608   alarm_ready_generic(alarm, lock);
609 }
610 
611 // Callback function for wake alarms and our posix timer
timer_callback(UNUSED_ATTR void * ptr)612 static void timer_callback(UNUSED_ATTR void* ptr) {
613   semaphore_post(alarm_expired);
614 }
615 
616 // Function running on |dispatcher_thread| that performs the following:
617 //   (1) Receives a signal using |alarm_exired| that the alarm has expired
618 //   (2) Dispatches the alarm callback for processing by the corresponding
619 // thread for that alarm.
callback_dispatch(UNUSED_ATTR void * context)620 static void callback_dispatch(UNUSED_ATTR void* context) {
621   while (true) {
622     semaphore_wait(alarm_expired);
623     if (!dispatcher_thread_active) break;
624 
625     std::lock_guard<std::mutex> lock(alarms_mutex);
626     alarm_t* alarm;
627 
628     // Take into account that the alarm may get cancelled before we get to it.
629     // We're done here if there are no alarms or the alarm at the front is in
630     // the future. Exit right away since there's nothing left to do.
631     if (list_is_empty(alarms) ||
632         (alarm = static_cast<alarm_t*>(list_front(alarms)))->deadline_ms >
633             now_ms()) {
634       reschedule_root_alarm();
635       continue;
636     }
637 
638     list_remove(alarms, alarm);
639 
640     if (alarm->is_periodic) {
641       alarm->prev_deadline_ms = alarm->deadline_ms;
642       schedule_next_instance(alarm);
643       alarm->stats.rescheduled_count++;
644     }
645     reschedule_root_alarm();
646 
647     // Enqueue the alarm for processing
648     if (alarm->for_msg_loop) {
649       if (!get_main_thread()) {
650         LOG_ERROR("%s: message loop already NULL. Alarm: %s", __func__,
651                   alarm->stats.name);
652         continue;
653       }
654 
655       alarm->closure.i.Reset(Bind(alarm_ready_mloop, alarm));
656       get_main_thread()->DoInThread(FROM_HERE, alarm->closure.i.callback());
657     } else {
658       fixed_queue_enqueue(alarm->queue, alarm);
659     }
660   }
661 
662   LOG_INFO("%s Callback thread exited", __func__);
663 }
664 
timer_create_internal(const clockid_t clock_id,timer_t * timer)665 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer) {
666   CHECK(timer != NULL);
667 
668   struct sigevent sigevent;
669   // create timer with RT priority thread
670   pthread_attr_t thread_attr;
671   pthread_attr_init(&thread_attr);
672   pthread_attr_setschedpolicy(&thread_attr, SCHED_FIFO);
673   struct sched_param param;
674   param.sched_priority = THREAD_RT_PRIORITY;
675   pthread_attr_setschedparam(&thread_attr, &param);
676 
677   memset(&sigevent, 0, sizeof(sigevent));
678   sigevent.sigev_notify = SIGEV_THREAD;
679   sigevent.sigev_notify_function = (void (*)(union sigval))timer_callback;
680   sigevent.sigev_notify_attributes = &thread_attr;
681   if (timer_create(clock_id, &sigevent, timer) == -1) {
682     LOG_ERROR("%s unable to create timer with clock %d: %s", __func__, clock_id,
683               strerror(errno));
684     if (clock_id == CLOCK_BOOTTIME_ALARM) {
685       LOG_ERROR(
686           "The kernel might not have support for "
687           "timer_create(CLOCK_BOOTTIME_ALARM): "
688           "https://lwn.net/Articles/429925/");
689       LOG_ERROR(
690           "See following patches: "
691           "https://git.kernel.org/cgit/linux/kernel/git/torvalds/"
692           "linux.git/log/?qt=grep&q=CLOCK_BOOTTIME_ALARM");
693     }
694     return false;
695   }
696 
697   return true;
698 }
699 
update_scheduling_stats(alarm_stats_t * stats,uint64_t now_ms,uint64_t deadline_ms)700 static void update_scheduling_stats(alarm_stats_t* stats, uint64_t now_ms,
701                                     uint64_t deadline_ms) {
702   stats->total_updates++;
703   stats->last_update_ms = now_ms;
704 
705   if (deadline_ms < now_ms) {
706     // Overdue scheduling
707     uint64_t delta_ms = now_ms - deadline_ms;
708     update_stat(&stats->overdue_scheduling, delta_ms);
709   } else if (deadline_ms > now_ms) {
710     // Premature scheduling
711     uint64_t delta_ms = deadline_ms - now_ms;
712     update_stat(&stats->premature_scheduling, delta_ms);
713   }
714 }
715 
dump_stat(int fd,stat_t * stat,const char * description)716 static void dump_stat(int fd, stat_t* stat, const char* description) {
717   uint64_t average_time_ms = 0;
718   if (stat->count != 0) average_time_ms = stat->total_ms / stat->count;
719 
720   dprintf(fd, "%-51s: %llu / %llu / %llu\n", description,
721           (unsigned long long)stat->total_ms, (unsigned long long)stat->max_ms,
722           (unsigned long long)average_time_ms);
723 }
724 
alarm_debug_dump(int fd)725 void alarm_debug_dump(int fd) {
726   dprintf(fd, "\nBluetooth Alarms Statistics:\n");
727 
728   std::lock_guard<std::mutex> lock(alarms_mutex);
729 
730   if (alarms == NULL) {
731     dprintf(fd, "  None\n");
732     return;
733   }
734 
735   uint64_t just_now_ms = now_ms();
736 
737   dprintf(fd, "  Total Alarms: %zu\n\n", list_length(alarms));
738 
739   // Dump info for each alarm
740   for (list_node_t* node = list_begin(alarms); node != list_end(alarms);
741        node = list_next(node)) {
742     alarm_t* alarm = (alarm_t*)list_node(node);
743     alarm_stats_t* stats = &alarm->stats;
744 
745     dprintf(fd, "  Alarm : %s (%s)\n", stats->name,
746             (alarm->is_periodic) ? "PERIODIC" : "SINGLE");
747 
748     dprintf(fd, "%-51s: %zu / %zu / %zu / %zu\n",
749             "    Action counts (sched/resched/exec/cancel)",
750             stats->scheduled_count, stats->rescheduled_count,
751             stats->total_updates, stats->canceled_count);
752 
753     dprintf(fd, "%-51s: %zu / %zu\n",
754             "    Deviation counts (overdue/premature)",
755             stats->overdue_scheduling.count, stats->premature_scheduling.count);
756 
757     dprintf(fd, "%-51s: %llu / %llu / %lld\n",
758             "    Time in ms (since creation/interval/remaining)",
759             (unsigned long long)(just_now_ms - alarm->creation_time_ms),
760             (unsigned long long)alarm->period_ms,
761             (long long)(alarm->deadline_ms - just_now_ms));
762 
763     dump_stat(fd, &stats->overdue_scheduling,
764               "    Overdue scheduling time in ms (total/max/avg)");
765 
766     dump_stat(fd, &stats->premature_scheduling,
767               "    Premature scheduling time in ms (total/max/avg)");
768 
769     dprintf(fd, "\n");
770   }
771 }
772