1 /******************************************************************************
2 *
3 * Copyright (C) 2014 Google, Inc.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 ******************************************************************************/
18
19 #include "include/bt_target.h"
20
21 #define LOG_TAG "bt_osi_alarm"
22
23 #include "osi/include/alarm.h"
24
25 #include <base/logging.h>
26 #include <errno.h>
27 #include <fcntl.h>
28 #include <inttypes.h>
29 #include <malloc.h>
30 #include <pthread.h>
31 #include <signal.h>
32 #include <string.h>
33 #include <time.h>
34
35 #include <hardware/bluetooth.h>
36
37 #include <mutex>
38
39 #include "osi/include/allocator.h"
40 #include "osi/include/fixed_queue.h"
41 #include "osi/include/list.h"
42 #include "osi/include/log.h"
43 #include "osi/include/osi.h"
44 #include "osi/include/semaphore.h"
45 #include "osi/include/thread.h"
46 #include "osi/include/wakelock.h"
47
48 // Callback and timer threads should run at RT priority in order to ensure they
49 // meet audio deadlines. Use this priority for all audio/timer related thread.
50 static const int THREAD_RT_PRIORITY = 1;
51
52 typedef struct {
53 size_t count;
54 period_ms_t total_ms;
55 period_ms_t max_ms;
56 } stat_t;
57
58 // Alarm-related information and statistics
59 typedef struct {
60 const char* name;
61 size_t scheduled_count;
62 size_t canceled_count;
63 size_t rescheduled_count;
64 size_t total_updates;
65 period_ms_t last_update_ms;
66 stat_t callback_execution;
67 stat_t overdue_scheduling;
68 stat_t premature_scheduling;
69 } alarm_stats_t;
70
71 struct alarm_t {
72 // The mutex is held while the callback for this alarm is being executed.
73 // It allows us to release the coarse-grained monitor lock while a
74 // potentially long-running callback is executing. |alarm_cancel| uses this
75 // mutex to provide a guarantee to its caller that the callback will not be
76 // in progress when it returns.
77 std::recursive_mutex* callback_mutex;
78 period_ms_t creation_time;
79 period_ms_t period;
80 period_ms_t deadline;
81 period_ms_t prev_deadline; // Previous deadline - used for accounting of
82 // periodic timers
83 bool is_periodic;
84 fixed_queue_t* queue; // The processing queue to add this alarm to
85 alarm_callback_t callback;
86 void* data;
87 alarm_stats_t stats;
88 };
89
90 // If the next wakeup time is less than this threshold, we should acquire
91 // a wakelock instead of setting a wake alarm so we're not bouncing in
92 // and out of suspend frequently. This value is externally visible to allow
93 // unit tests to run faster. It should not be modified by production code.
94 int64_t TIMER_INTERVAL_FOR_WAKELOCK_IN_MS = 3000;
95 static const clockid_t CLOCK_ID = CLOCK_BOOTTIME;
96
97 #if (KERNEL_MISSING_CLOCK_BOOTTIME_ALARM == TRUE)
98 static const clockid_t CLOCK_ID_ALARM = CLOCK_BOOTTIME;
99 #else
100 static const clockid_t CLOCK_ID_ALARM = CLOCK_BOOTTIME_ALARM;
101 #endif
102
103 // This mutex ensures that the |alarm_set|, |alarm_cancel|, and alarm callback
104 // functions execute serially and not concurrently. As a result, this mutex
105 // also protects the |alarms| list.
106 static std::mutex alarms_mutex;
107 static list_t* alarms;
108 static timer_t timer;
109 static timer_t wakeup_timer;
110 static bool timer_set;
111
112 // All alarm callbacks are dispatched from |dispatcher_thread|
113 static thread_t* dispatcher_thread;
114 static bool dispatcher_thread_active;
115 static semaphore_t* alarm_expired;
116
117 // Default alarm callback thread and queue
118 static thread_t* default_callback_thread;
119 static fixed_queue_t* default_callback_queue;
120
121 static alarm_t* alarm_new_internal(const char* name, bool is_periodic);
122 static bool lazy_initialize(void);
123 static period_ms_t now(void);
124 static void alarm_set_internal(alarm_t* alarm, period_ms_t period,
125 alarm_callback_t cb, void* data,
126 fixed_queue_t* queue);
127 static void alarm_cancel_internal(alarm_t* alarm);
128 static void remove_pending_alarm(alarm_t* alarm);
129 static void schedule_next_instance(alarm_t* alarm);
130 static void reschedule_root_alarm(void);
131 static void alarm_queue_ready(fixed_queue_t* queue, void* context);
132 static void timer_callback(void* data);
133 static void callback_dispatch(void* context);
134 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer);
135 static void update_scheduling_stats(alarm_stats_t* stats, period_ms_t now_ms,
136 period_ms_t deadline_ms,
137 period_ms_t execution_delta_ms);
138
update_stat(stat_t * stat,period_ms_t delta)139 static void update_stat(stat_t* stat, period_ms_t delta) {
140 if (stat->max_ms < delta) stat->max_ms = delta;
141 stat->total_ms += delta;
142 stat->count++;
143 }
144
alarm_new(const char * name)145 alarm_t* alarm_new(const char* name) { return alarm_new_internal(name, false); }
146
alarm_new_periodic(const char * name)147 alarm_t* alarm_new_periodic(const char* name) {
148 return alarm_new_internal(name, true);
149 }
150
alarm_new_internal(const char * name,bool is_periodic)151 static alarm_t* alarm_new_internal(const char* name, bool is_periodic) {
152 // Make sure we have a list we can insert alarms into.
153 if (!alarms && !lazy_initialize()) {
154 CHECK(false); // if initialization failed, we should not continue
155 return NULL;
156 }
157
158 alarm_t* ret = static_cast<alarm_t*>(osi_calloc(sizeof(alarm_t)));
159
160 ret->callback_mutex = new std::recursive_mutex;
161 ret->is_periodic = is_periodic;
162 ret->stats.name = osi_strdup(name);
163 // NOTE: The stats were reset by osi_calloc() above
164
165 return ret;
166 }
167
alarm_free(alarm_t * alarm)168 void alarm_free(alarm_t* alarm) {
169 if (!alarm) return;
170
171 alarm_cancel(alarm);
172 delete alarm->callback_mutex;
173 osi_free((void*)alarm->stats.name);
174 osi_free(alarm);
175 }
176
alarm_get_remaining_ms(const alarm_t * alarm)177 period_ms_t alarm_get_remaining_ms(const alarm_t* alarm) {
178 CHECK(alarm != NULL);
179 period_ms_t remaining_ms = 0;
180 period_ms_t just_now = now();
181
182 std::lock_guard<std::mutex> lock(alarms_mutex);
183 if (alarm->deadline > just_now) remaining_ms = alarm->deadline - just_now;
184
185 return remaining_ms;
186 }
187
alarm_set(alarm_t * alarm,period_ms_t interval_ms,alarm_callback_t cb,void * data)188 void alarm_set(alarm_t* alarm, period_ms_t interval_ms, alarm_callback_t cb,
189 void* data) {
190 alarm_set_on_queue(alarm, interval_ms, cb, data, default_callback_queue);
191 }
192
alarm_set_on_queue(alarm_t * alarm,period_ms_t interval_ms,alarm_callback_t cb,void * data,fixed_queue_t * queue)193 void alarm_set_on_queue(alarm_t* alarm, period_ms_t interval_ms,
194 alarm_callback_t cb, void* data, fixed_queue_t* queue) {
195 CHECK(queue != NULL);
196 alarm_set_internal(alarm, interval_ms, cb, data, queue);
197 }
198
199 // Runs in exclusion with alarm_cancel and timer_callback.
alarm_set_internal(alarm_t * alarm,period_ms_t period,alarm_callback_t cb,void * data,fixed_queue_t * queue)200 static void alarm_set_internal(alarm_t* alarm, period_ms_t period,
201 alarm_callback_t cb, void* data,
202 fixed_queue_t* queue) {
203 CHECK(alarms != NULL);
204 CHECK(alarm != NULL);
205 CHECK(cb != NULL);
206
207 std::lock_guard<std::mutex> lock(alarms_mutex);
208
209 alarm->creation_time = now();
210 alarm->period = period;
211 alarm->queue = queue;
212 alarm->callback = cb;
213 alarm->data = data;
214
215 schedule_next_instance(alarm);
216 alarm->stats.scheduled_count++;
217 }
218
alarm_cancel(alarm_t * alarm)219 void alarm_cancel(alarm_t* alarm) {
220 CHECK(alarms != NULL);
221 if (!alarm) return;
222
223 {
224 std::lock_guard<std::mutex> lock(alarms_mutex);
225 alarm_cancel_internal(alarm);
226 }
227
228 // If the callback for |alarm| is in progress, wait here until it completes.
229 std::lock_guard<std::recursive_mutex> lock(*alarm->callback_mutex);
230 }
231
232 // Internal implementation of canceling an alarm.
233 // The caller must hold the |alarms_mutex|
alarm_cancel_internal(alarm_t * alarm)234 static void alarm_cancel_internal(alarm_t* alarm) {
235 bool needs_reschedule =
236 (!list_is_empty(alarms) && list_front(alarms) == alarm);
237
238 remove_pending_alarm(alarm);
239
240 alarm->deadline = 0;
241 alarm->prev_deadline = 0;
242 alarm->callback = NULL;
243 alarm->data = NULL;
244 alarm->stats.canceled_count++;
245 alarm->queue = NULL;
246
247 if (needs_reschedule) reschedule_root_alarm();
248 }
249
alarm_is_scheduled(const alarm_t * alarm)250 bool alarm_is_scheduled(const alarm_t* alarm) {
251 if ((alarms == NULL) || (alarm == NULL)) return false;
252 return (alarm->callback != NULL);
253 }
254
alarm_cleanup(void)255 void alarm_cleanup(void) {
256 // If lazy_initialize never ran there is nothing else to do
257 if (!alarms) return;
258
259 dispatcher_thread_active = false;
260 semaphore_post(alarm_expired);
261 thread_free(dispatcher_thread);
262 dispatcher_thread = NULL;
263
264 std::lock_guard<std::mutex> lock(alarms_mutex);
265
266 fixed_queue_free(default_callback_queue, NULL);
267 default_callback_queue = NULL;
268 thread_free(default_callback_thread);
269 default_callback_thread = NULL;
270
271 timer_delete(wakeup_timer);
272 timer_delete(timer);
273 semaphore_free(alarm_expired);
274 alarm_expired = NULL;
275
276 list_free(alarms);
277 alarms = NULL;
278 }
279
lazy_initialize(void)280 static bool lazy_initialize(void) {
281 CHECK(alarms == NULL);
282
283 // timer_t doesn't have an invalid value so we must track whether
284 // the |timer| variable is valid ourselves.
285 bool timer_initialized = false;
286 bool wakeup_timer_initialized = false;
287
288 std::lock_guard<std::mutex> lock(alarms_mutex);
289
290 alarms = list_new(NULL);
291 if (!alarms) {
292 LOG_ERROR(LOG_TAG, "%s unable to allocate alarm list.", __func__);
293 goto error;
294 }
295
296 if (!timer_create_internal(CLOCK_ID, &timer)) goto error;
297 timer_initialized = true;
298
299 if (!timer_create_internal(CLOCK_ID_ALARM, &wakeup_timer)) goto error;
300 wakeup_timer_initialized = true;
301
302 alarm_expired = semaphore_new(0);
303 if (!alarm_expired) {
304 LOG_ERROR(LOG_TAG, "%s unable to create alarm expired semaphore", __func__);
305 goto error;
306 }
307
308 default_callback_thread =
309 thread_new_sized("alarm_default_callbacks", SIZE_MAX);
310 if (default_callback_thread == NULL) {
311 LOG_ERROR(LOG_TAG, "%s unable to create default alarm callbacks thread.",
312 __func__);
313 goto error;
314 }
315 thread_set_rt_priority(default_callback_thread, THREAD_RT_PRIORITY);
316 default_callback_queue = fixed_queue_new(SIZE_MAX);
317 if (default_callback_queue == NULL) {
318 LOG_ERROR(LOG_TAG, "%s unable to create default alarm callbacks queue.",
319 __func__);
320 goto error;
321 }
322 alarm_register_processing_queue(default_callback_queue,
323 default_callback_thread);
324
325 dispatcher_thread_active = true;
326 dispatcher_thread = thread_new("alarm_dispatcher");
327 if (!dispatcher_thread) {
328 LOG_ERROR(LOG_TAG, "%s unable to create alarm callback thread.", __func__);
329 goto error;
330 }
331 thread_set_rt_priority(dispatcher_thread, THREAD_RT_PRIORITY);
332 thread_post(dispatcher_thread, callback_dispatch, NULL);
333 return true;
334
335 error:
336 fixed_queue_free(default_callback_queue, NULL);
337 default_callback_queue = NULL;
338 thread_free(default_callback_thread);
339 default_callback_thread = NULL;
340
341 thread_free(dispatcher_thread);
342 dispatcher_thread = NULL;
343
344 dispatcher_thread_active = false;
345
346 semaphore_free(alarm_expired);
347 alarm_expired = NULL;
348
349 if (wakeup_timer_initialized) timer_delete(wakeup_timer);
350
351 if (timer_initialized) timer_delete(timer);
352
353 list_free(alarms);
354 alarms = NULL;
355
356 return false;
357 }
358
now(void)359 static period_ms_t now(void) {
360 CHECK(alarms != NULL);
361
362 struct timespec ts;
363 if (clock_gettime(CLOCK_ID, &ts) == -1) {
364 LOG_ERROR(LOG_TAG, "%s unable to get current time: %s", __func__,
365 strerror(errno));
366 return 0;
367 }
368
369 return (ts.tv_sec * 1000LL) + (ts.tv_nsec / 1000000LL);
370 }
371
372 // Remove alarm from internal alarm list and the processing queue
373 // The caller must hold the |alarms_mutex|
remove_pending_alarm(alarm_t * alarm)374 static void remove_pending_alarm(alarm_t* alarm) {
375 list_remove(alarms, alarm);
376 while (fixed_queue_try_remove_from_queue(alarm->queue, alarm) != NULL) {
377 // Remove all repeated alarm instances from the queue.
378 // NOTE: We are defensive here - we shouldn't have repeated alarm instances
379 }
380 }
381
382 // Must be called with |alarms_mutex| held
schedule_next_instance(alarm_t * alarm)383 static void schedule_next_instance(alarm_t* alarm) {
384 // If the alarm is currently set and it's at the start of the list,
385 // we'll need to re-schedule since we've adjusted the earliest deadline.
386 bool needs_reschedule =
387 (!list_is_empty(alarms) && list_front(alarms) == alarm);
388 if (alarm->callback) remove_pending_alarm(alarm);
389
390 // Calculate the next deadline for this alarm
391 period_ms_t just_now = now();
392 period_ms_t ms_into_period = 0;
393 if ((alarm->is_periodic) && (alarm->period != 0))
394 ms_into_period = ((just_now - alarm->creation_time) % alarm->period);
395 alarm->deadline = just_now + (alarm->period - ms_into_period);
396
397 // Add it into the timer list sorted by deadline (earliest deadline first).
398 if (list_is_empty(alarms) ||
399 ((alarm_t*)list_front(alarms))->deadline > alarm->deadline) {
400 list_prepend(alarms, alarm);
401 } else {
402 for (list_node_t* node = list_begin(alarms); node != list_end(alarms);
403 node = list_next(node)) {
404 list_node_t* next = list_next(node);
405 if (next == list_end(alarms) ||
406 ((alarm_t*)list_node(next))->deadline > alarm->deadline) {
407 list_insert_after(alarms, node, alarm);
408 break;
409 }
410 }
411 }
412
413 // If the new alarm has the earliest deadline, we need to re-evaluate our
414 // schedule.
415 if (needs_reschedule ||
416 (!list_is_empty(alarms) && list_front(alarms) == alarm)) {
417 reschedule_root_alarm();
418 }
419 }
420
421 // NOTE: must be called with |alarms_mutex| held
reschedule_root_alarm(void)422 static void reschedule_root_alarm(void) {
423 CHECK(alarms != NULL);
424
425 const bool timer_was_set = timer_set;
426 alarm_t* next;
427 int64_t next_expiration;
428
429 // If used in a zeroed state, disarms the timer.
430 struct itimerspec timer_time;
431 memset(&timer_time, 0, sizeof(timer_time));
432
433 if (list_is_empty(alarms)) goto done;
434
435 next = static_cast<alarm_t*>(list_front(alarms));
436 next_expiration = next->deadline - now();
437 if (next_expiration < TIMER_INTERVAL_FOR_WAKELOCK_IN_MS) {
438 if (!timer_set) {
439 if (!wakelock_acquire()) {
440 LOG_ERROR(LOG_TAG, "%s unable to acquire wake lock", __func__);
441 goto done;
442 }
443 }
444
445 timer_time.it_value.tv_sec = (next->deadline / 1000);
446 timer_time.it_value.tv_nsec = (next->deadline % 1000) * 1000000LL;
447
448 // It is entirely unsafe to call timer_settime(2) with a zeroed timerspec
449 // for timers with *_ALARM clock IDs. Although the man page states that the
450 // timer would be canceled, the current behavior (as of Linux kernel 3.17)
451 // is that the callback is issued immediately. The only way to cancel an
452 // *_ALARM timer is to delete the timer. But unfortunately, deleting and
453 // re-creating a timer is rather expensive; every timer_create(2) spawns a
454 // new thread. So we simply set the timer to fire at the largest possible
455 // time.
456 //
457 // If we've reached this code path, we're going to grab a wake lock and
458 // wait for the next timer to fire. In that case, there's no reason to
459 // have a pending wakeup timer so we simply cancel it.
460 struct itimerspec end_of_time;
461 memset(&end_of_time, 0, sizeof(end_of_time));
462 end_of_time.it_value.tv_sec = (time_t)(1LL << (sizeof(time_t) * 8 - 2));
463 timer_settime(wakeup_timer, TIMER_ABSTIME, &end_of_time, NULL);
464 } else {
465 // WARNING: do not attempt to use relative timers with *_ALARM clock IDs
466 // in kernels before 3.17 unless you have the following patch:
467 // https://lkml.org/lkml/2014/7/7/576
468 struct itimerspec wakeup_time;
469 memset(&wakeup_time, 0, sizeof(wakeup_time));
470
471 wakeup_time.it_value.tv_sec = (next->deadline / 1000);
472 wakeup_time.it_value.tv_nsec = (next->deadline % 1000) * 1000000LL;
473 if (timer_settime(wakeup_timer, TIMER_ABSTIME, &wakeup_time, NULL) == -1)
474 LOG_ERROR(LOG_TAG, "%s unable to set wakeup timer: %s", __func__,
475 strerror(errno));
476 }
477
478 done:
479 timer_set =
480 timer_time.it_value.tv_sec != 0 || timer_time.it_value.tv_nsec != 0;
481 if (timer_was_set && !timer_set) {
482 wakelock_release();
483 }
484
485 if (timer_settime(timer, TIMER_ABSTIME, &timer_time, NULL) == -1)
486 LOG_ERROR(LOG_TAG, "%s unable to set timer: %s", __func__, strerror(errno));
487
488 // If next expiration was in the past (e.g. short timer that got context
489 // switched) then the timer might have diarmed itself. Detect this case and
490 // work around it by manually signalling the |alarm_expired| semaphore.
491 //
492 // It is possible that the timer was actually super short (a few
493 // milliseconds) and the timer expired normally before we called
494 // |timer_gettime|. Worst case, |alarm_expired| is signaled twice for that
495 // alarm. Nothing bad should happen in that case though since the callback
496 // dispatch function checks to make sure the timer at the head of the list
497 // actually expired.
498 if (timer_set) {
499 struct itimerspec time_to_expire;
500 timer_gettime(timer, &time_to_expire);
501 if (time_to_expire.it_value.tv_sec == 0 &&
502 time_to_expire.it_value.tv_nsec == 0) {
503 LOG_DEBUG(
504 LOG_TAG,
505 "%s alarm expiration too close for posix timers, switching to guns",
506 __func__);
507 semaphore_post(alarm_expired);
508 }
509 }
510 }
511
alarm_register_processing_queue(fixed_queue_t * queue,thread_t * thread)512 void alarm_register_processing_queue(fixed_queue_t* queue, thread_t* thread) {
513 CHECK(queue != NULL);
514 CHECK(thread != NULL);
515
516 fixed_queue_register_dequeue(queue, thread_get_reactor(thread),
517 alarm_queue_ready, NULL);
518 }
519
alarm_unregister_processing_queue(fixed_queue_t * queue)520 void alarm_unregister_processing_queue(fixed_queue_t* queue) {
521 CHECK(alarms != NULL);
522 CHECK(queue != NULL);
523
524 fixed_queue_unregister_dequeue(queue);
525
526 // Cancel all alarms that are using this queue
527 std::lock_guard<std::mutex> lock(alarms_mutex);
528 for (list_node_t* node = list_begin(alarms); node != list_end(alarms);) {
529 alarm_t* alarm = (alarm_t*)list_node(node);
530 node = list_next(node);
531 // TODO: Each module is responsible for tearing down its alarms; currently,
532 // this is not the case. In the future, this check should be replaced by
533 // an assert.
534 if (alarm->queue == queue) alarm_cancel_internal(alarm);
535 }
536 }
537
alarm_queue_ready(fixed_queue_t * queue,UNUSED_ATTR void * context)538 static void alarm_queue_ready(fixed_queue_t* queue, UNUSED_ATTR void* context) {
539 CHECK(queue != NULL);
540
541 std::unique_lock<std::mutex> lock(alarms_mutex);
542 alarm_t* alarm = (alarm_t*)fixed_queue_try_dequeue(queue);
543 if (alarm == NULL) {
544 return; // The alarm was probably canceled
545 }
546
547 //
548 // If the alarm is not periodic, we've fully serviced it now, and can reset
549 // some of its internal state. This is useful to distinguish between expired
550 // alarms and active ones.
551 //
552 alarm_callback_t callback = alarm->callback;
553 void* data = alarm->data;
554 period_ms_t deadline = alarm->deadline;
555 if (alarm->is_periodic) {
556 // The periodic alarm has been rescheduled and alarm->deadline has been
557 // updated, hence we need to use the previous deadline.
558 deadline = alarm->prev_deadline;
559 } else {
560 alarm->deadline = 0;
561 alarm->callback = NULL;
562 alarm->data = NULL;
563 alarm->queue = NULL;
564 }
565
566 std::lock_guard<std::recursive_mutex> cb_lock(*alarm->callback_mutex);
567 lock.unlock();
568
569 period_ms_t t0 = now();
570 callback(data);
571 period_ms_t t1 = now();
572
573 // Update the statistics
574 CHECK(t1 >= t0);
575 period_ms_t delta = t1 - t0;
576 update_scheduling_stats(&alarm->stats, t0, deadline, delta);
577 }
578
579 // Callback function for wake alarms and our posix timer
timer_callback(UNUSED_ATTR void * ptr)580 static void timer_callback(UNUSED_ATTR void* ptr) {
581 semaphore_post(alarm_expired);
582 }
583
584 // Function running on |dispatcher_thread| that performs the following:
585 // (1) Receives a signal using |alarm_exired| that the alarm has expired
586 // (2) Dispatches the alarm callback for processing by the corresponding
587 // thread for that alarm.
callback_dispatch(UNUSED_ATTR void * context)588 static void callback_dispatch(UNUSED_ATTR void* context) {
589 while (true) {
590 semaphore_wait(alarm_expired);
591 if (!dispatcher_thread_active) break;
592
593 std::lock_guard<std::mutex> lock(alarms_mutex);
594 alarm_t* alarm;
595
596 // Take into account that the alarm may get cancelled before we get to it.
597 // We're done here if there are no alarms or the alarm at the front is in
598 // the future. Exit right away since there's nothing left to do.
599 if (list_is_empty(alarms) ||
600 (alarm = static_cast<alarm_t*>(list_front(alarms)))->deadline > now()) {
601 reschedule_root_alarm();
602 continue;
603 }
604
605 list_remove(alarms, alarm);
606
607 if (alarm->is_periodic) {
608 alarm->prev_deadline = alarm->deadline;
609 schedule_next_instance(alarm);
610 alarm->stats.rescheduled_count++;
611 }
612 reschedule_root_alarm();
613
614 // Enqueue the alarm for processing
615 fixed_queue_enqueue(alarm->queue, alarm);
616 }
617
618 LOG_DEBUG(LOG_TAG, "%s Callback thread exited", __func__);
619 }
620
timer_create_internal(const clockid_t clock_id,timer_t * timer)621 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer) {
622 CHECK(timer != NULL);
623
624 struct sigevent sigevent;
625 // create timer with RT priority thread
626 pthread_attr_t thread_attr;
627 pthread_attr_init(&thread_attr);
628 pthread_attr_setschedpolicy(&thread_attr, SCHED_FIFO);
629 struct sched_param param;
630 param.sched_priority = THREAD_RT_PRIORITY;
631 pthread_attr_setschedparam(&thread_attr, ¶m);
632
633 memset(&sigevent, 0, sizeof(sigevent));
634 sigevent.sigev_notify = SIGEV_THREAD;
635 sigevent.sigev_notify_function = (void (*)(union sigval))timer_callback;
636 sigevent.sigev_notify_attributes = &thread_attr;
637 if (timer_create(clock_id, &sigevent, timer) == -1) {
638 LOG_ERROR(LOG_TAG, "%s unable to create timer with clock %d: %s", __func__,
639 clock_id, strerror(errno));
640 if (clock_id == CLOCK_BOOTTIME_ALARM) {
641 LOG_ERROR(LOG_TAG,
642 "The kernel might not have support for "
643 "timer_create(CLOCK_BOOTTIME_ALARM): "
644 "https://lwn.net/Articles/429925/");
645 LOG_ERROR(LOG_TAG,
646 "See following patches: "
647 "https://git.kernel.org/cgit/linux/kernel/git/torvalds/"
648 "linux.git/log/?qt=grep&q=CLOCK_BOOTTIME_ALARM");
649 }
650 return false;
651 }
652
653 return true;
654 }
655
update_scheduling_stats(alarm_stats_t * stats,period_ms_t now_ms,period_ms_t deadline_ms,period_ms_t execution_delta_ms)656 static void update_scheduling_stats(alarm_stats_t* stats, period_ms_t now_ms,
657 period_ms_t deadline_ms,
658 period_ms_t execution_delta_ms) {
659 stats->total_updates++;
660 stats->last_update_ms = now_ms;
661
662 update_stat(&stats->callback_execution, execution_delta_ms);
663
664 if (deadline_ms < now_ms) {
665 // Overdue scheduling
666 period_ms_t delta_ms = now_ms - deadline_ms;
667 update_stat(&stats->overdue_scheduling, delta_ms);
668 } else if (deadline_ms > now_ms) {
669 // Premature scheduling
670 period_ms_t delta_ms = deadline_ms - now_ms;
671 update_stat(&stats->premature_scheduling, delta_ms);
672 }
673 }
674
dump_stat(int fd,stat_t * stat,const char * description)675 static void dump_stat(int fd, stat_t* stat, const char* description) {
676 period_ms_t average_time_ms = 0;
677 if (stat->count != 0) average_time_ms = stat->total_ms / stat->count;
678
679 dprintf(fd, "%-51s: %llu / %llu / %llu\n", description,
680 (unsigned long long)stat->total_ms, (unsigned long long)stat->max_ms,
681 (unsigned long long)average_time_ms);
682 }
683
alarm_debug_dump(int fd)684 void alarm_debug_dump(int fd) {
685 dprintf(fd, "\nBluetooth Alarms Statistics:\n");
686
687 std::lock_guard<std::mutex> lock(alarms_mutex);
688
689 if (alarms == NULL) {
690 dprintf(fd, " None\n");
691 return;
692 }
693
694 period_ms_t just_now = now();
695
696 dprintf(fd, " Total Alarms: %zu\n\n", list_length(alarms));
697
698 // Dump info for each alarm
699 for (list_node_t* node = list_begin(alarms); node != list_end(alarms);
700 node = list_next(node)) {
701 alarm_t* alarm = (alarm_t*)list_node(node);
702 alarm_stats_t* stats = &alarm->stats;
703
704 dprintf(fd, " Alarm : %s (%s)\n", stats->name,
705 (alarm->is_periodic) ? "PERIODIC" : "SINGLE");
706
707 dprintf(fd, "%-51s: %zu / %zu / %zu / %zu\n",
708 " Action counts (sched/resched/exec/cancel)",
709 stats->scheduled_count, stats->rescheduled_count,
710 stats->callback_execution.count, stats->canceled_count);
711
712 dprintf(fd, "%-51s: %zu / %zu\n",
713 " Deviation counts (overdue/premature)",
714 stats->overdue_scheduling.count, stats->premature_scheduling.count);
715
716 dprintf(fd, "%-51s: %llu / %llu / %lld\n",
717 " Time in ms (since creation/interval/remaining)",
718 (unsigned long long)(just_now - alarm->creation_time),
719 (unsigned long long)alarm->period,
720 (long long)(alarm->deadline - just_now));
721
722 dump_stat(fd, &stats->callback_execution,
723 " Callback execution time in ms (total/max/avg)");
724
725 dump_stat(fd, &stats->overdue_scheduling,
726 " Overdue scheduling time in ms (total/max/avg)");
727
728 dump_stat(fd, &stats->premature_scheduling,
729 " Premature scheduling time in ms (total/max/avg)");
730
731 dprintf(fd, "\n");
732 }
733 }
734