1 /****************************************************************************** 2 * 3 * Copyright (C) 2014 Google, Inc. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at: 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 ******************************************************************************/ 18 19 #include "include/bt_target.h" 20 21 #define LOG_TAG "bt_osi_alarm" 22 23 #include "osi/include/alarm.h" 24 25 #include <assert.h> 26 #include <errno.h> 27 #include <fcntl.h> 28 #include <inttypes.h> 29 #include <malloc.h> 30 #include <pthread.h> 31 #include <signal.h> 32 #include <string.h> 33 #include <time.h> 34 35 #include <hardware/bluetooth.h> 36 37 #include "osi/include/allocator.h" 38 #include "osi/include/fixed_queue.h" 39 #include "osi/include/list.h" 40 #include "osi/include/log.h" 41 #include "osi/include/osi.h" 42 #include "osi/include/semaphore.h" 43 #include "osi/include/thread.h" 44 #include "osi/include/wakelock.h" 45 46 // Make callbacks run at high thread priority. Some callbacks are used for audio 47 // related timer tasks as well as re-transmissions etc. Since we at this point 48 // cannot differentiate what callback we are dealing with, assume high priority 49 // for now. 50 // TODO(eisenbach): Determine correct thread priority (from parent?/per alarm?) 51 static const int CALLBACK_THREAD_PRIORITY_HIGH = -19; 52 53 typedef struct { 54 size_t count; 55 period_ms_t total_ms; 56 period_ms_t max_ms; 57 } stat_t; 58 59 // Alarm-related information and statistics 60 typedef struct { 61 const char *name; 62 size_t scheduled_count; 63 size_t canceled_count; 64 size_t rescheduled_count; 65 size_t total_updates; 66 period_ms_t last_update_ms; 67 stat_t callback_execution; 68 stat_t overdue_scheduling; 69 stat_t premature_scheduling; 70 } alarm_stats_t; 71 72 struct alarm_t { 73 // The lock is held while the callback for this alarm is being executed. 74 // It allows us to release the coarse-grained monitor lock while a 75 // potentially long-running callback is executing. |alarm_cancel| uses this 76 // lock to provide a guarantee to its caller that the callback will not be 77 // in progress when it returns. 78 pthread_mutex_t callback_lock; 79 period_ms_t creation_time; 80 period_ms_t period; 81 period_ms_t deadline; 82 period_ms_t prev_deadline; // Previous deadline - used for accounting of 83 // periodic timers 84 bool is_periodic; 85 fixed_queue_t *queue; // The processing queue to add this alarm to 86 alarm_callback_t callback; 87 void *data; 88 alarm_stats_t stats; 89 }; 90 91 92 // If the next wakeup time is less than this threshold, we should acquire 93 // a wakelock instead of setting a wake alarm so we're not bouncing in 94 // and out of suspend frequently. This value is externally visible to allow 95 // unit tests to run faster. It should not be modified by production code. 96 int64_t TIMER_INTERVAL_FOR_WAKELOCK_IN_MS = 3000; 97 static const clockid_t CLOCK_ID = CLOCK_BOOTTIME; 98 99 #if defined(KERNEL_MISSING_CLOCK_BOOTTIME_ALARM) && (KERNEL_MISSING_CLOCK_BOOTTIME_ALARM == TRUE) 100 static const clockid_t CLOCK_ID_ALARM = CLOCK_BOOTTIME; 101 #else 102 static const clockid_t CLOCK_ID_ALARM = CLOCK_BOOTTIME_ALARM; 103 #endif 104 105 // This mutex ensures that the |alarm_set|, |alarm_cancel|, and alarm callback 106 // functions execute serially and not concurrently. As a result, this mutex 107 // also protects the |alarms| list. 108 static pthread_mutex_t monitor; 109 static list_t *alarms; 110 static timer_t timer; 111 static timer_t wakeup_timer; 112 static bool timer_set; 113 114 // All alarm callbacks are dispatched from |dispatcher_thread| 115 static thread_t *dispatcher_thread; 116 static bool dispatcher_thread_active; 117 static semaphore_t *alarm_expired; 118 119 // Default alarm callback thread and queue 120 static thread_t *default_callback_thread; 121 static fixed_queue_t *default_callback_queue; 122 123 static alarm_t *alarm_new_internal(const char *name, bool is_periodic); 124 static bool lazy_initialize(void); 125 static period_ms_t now(void); 126 static void alarm_set_internal(alarm_t *alarm, period_ms_t period, 127 alarm_callback_t cb, void *data, 128 fixed_queue_t *queue); 129 static void alarm_cancel_internal(alarm_t *alarm); 130 static void remove_pending_alarm(alarm_t *alarm); 131 static void schedule_next_instance(alarm_t *alarm); 132 static void reschedule_root_alarm(void); 133 static void alarm_queue_ready(fixed_queue_t *queue, void *context); 134 static void timer_callback(void *data); 135 static void callback_dispatch(void *context); 136 static bool timer_create_internal(const clockid_t clock_id, timer_t *timer); 137 static void update_scheduling_stats(alarm_stats_t *stats, 138 period_ms_t now_ms, 139 period_ms_t deadline_ms, 140 period_ms_t execution_delta_ms); 141 142 static void update_stat(stat_t *stat, period_ms_t delta) 143 { 144 if (stat->max_ms < delta) 145 stat->max_ms = delta; 146 stat->total_ms += delta; 147 stat->count++; 148 } 149 150 alarm_t *alarm_new(const char *name) { 151 return alarm_new_internal(name, false); 152 } 153 154 alarm_t *alarm_new_periodic(const char *name) { 155 return alarm_new_internal(name, true); 156 } 157 158 static alarm_t *alarm_new_internal(const char *name, bool is_periodic) { 159 // Make sure we have a list we can insert alarms into. 160 if (!alarms && !lazy_initialize()) { 161 assert(false); // if initialization failed, we should not continue 162 return NULL; 163 } 164 165 pthread_mutexattr_t attr; 166 pthread_mutexattr_init(&attr); 167 168 alarm_t *ret = osi_calloc(sizeof(alarm_t)); 169 170 // Make this a recursive mutex to make it safe to call |alarm_cancel| from 171 // within the callback function of the alarm. 172 int error = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); 173 if (error) { 174 LOG_ERROR(LOG_TAG, "%s unable to create a recursive mutex: %s", 175 __func__, strerror(error)); 176 goto error; 177 } 178 179 error = pthread_mutex_init(&ret->callback_lock, &attr); 180 if (error) { 181 LOG_ERROR(LOG_TAG, "%s unable to initialize mutex: %s", 182 __func__, strerror(error)); 183 goto error; 184 } 185 186 ret->is_periodic = is_periodic; 187 188 alarm_stats_t *stats = &ret->stats; 189 stats->name = osi_strdup(name); 190 // NOTE: The stats were reset by osi_calloc() above 191 192 pthread_mutexattr_destroy(&attr); 193 return ret; 194 195 error: 196 pthread_mutexattr_destroy(&attr); 197 osi_free(ret); 198 return NULL; 199 } 200 201 void alarm_free(alarm_t *alarm) { 202 if (!alarm) 203 return; 204 205 alarm_cancel(alarm); 206 pthread_mutex_destroy(&alarm->callback_lock); 207 osi_free((void *)alarm->stats.name); 208 osi_free(alarm); 209 } 210 211 period_ms_t alarm_get_remaining_ms(const alarm_t *alarm) { 212 assert(alarm != NULL); 213 period_ms_t remaining_ms = 0; 214 period_ms_t just_now = now(); 215 216 pthread_mutex_lock(&monitor); 217 if (alarm->deadline > just_now) 218 remaining_ms = alarm->deadline - just_now; 219 pthread_mutex_unlock(&monitor); 220 221 return remaining_ms; 222 } 223 224 void alarm_set(alarm_t *alarm, period_ms_t interval_ms, 225 alarm_callback_t cb, void *data) { 226 alarm_set_on_queue(alarm, interval_ms, cb, data, default_callback_queue); 227 } 228 229 void alarm_set_on_queue(alarm_t *alarm, period_ms_t interval_ms, 230 alarm_callback_t cb, void *data, 231 fixed_queue_t *queue) { 232 assert(queue != NULL); 233 alarm_set_internal(alarm, interval_ms, cb, data, queue); 234 } 235 236 // Runs in exclusion with alarm_cancel and timer_callback. 237 static void alarm_set_internal(alarm_t *alarm, period_ms_t period, 238 alarm_callback_t cb, void *data, 239 fixed_queue_t *queue) { 240 assert(alarms != NULL); 241 assert(alarm != NULL); 242 assert(cb != NULL); 243 244 pthread_mutex_lock(&monitor); 245 246 alarm->creation_time = now(); 247 alarm->period = period; 248 alarm->queue = queue; 249 alarm->callback = cb; 250 alarm->data = data; 251 252 schedule_next_instance(alarm); 253 alarm->stats.scheduled_count++; 254 255 pthread_mutex_unlock(&monitor); 256 } 257 258 void alarm_cancel(alarm_t *alarm) { 259 assert(alarms != NULL); 260 if (!alarm) 261 return; 262 263 pthread_mutex_lock(&monitor); 264 alarm_cancel_internal(alarm); 265 pthread_mutex_unlock(&monitor); 266 267 // If the callback for |alarm| is in progress, wait here until it completes. 268 pthread_mutex_lock(&alarm->callback_lock); 269 pthread_mutex_unlock(&alarm->callback_lock); 270 } 271 272 // Internal implementation of canceling an alarm. 273 // The caller must hold the |monitor| lock. 274 static void alarm_cancel_internal(alarm_t *alarm) { 275 bool needs_reschedule = (!list_is_empty(alarms) && list_front(alarms) == alarm); 276 277 remove_pending_alarm(alarm); 278 279 alarm->deadline = 0; 280 alarm->prev_deadline = 0; 281 alarm->callback = NULL; 282 alarm->data = NULL; 283 alarm->stats.canceled_count++; 284 alarm->queue = NULL; 285 286 if (needs_reschedule) 287 reschedule_root_alarm(); 288 } 289 290 bool alarm_is_scheduled(const alarm_t *alarm) { 291 if ((alarms == NULL) || (alarm == NULL)) 292 return false; 293 return (alarm->callback != NULL); 294 } 295 296 void alarm_cleanup(void) { 297 // If lazy_initialize never ran there is nothing else to do 298 if (!alarms) 299 return; 300 301 dispatcher_thread_active = false; 302 semaphore_post(alarm_expired); 303 thread_free(dispatcher_thread); 304 dispatcher_thread = NULL; 305 306 pthread_mutex_lock(&monitor); 307 308 fixed_queue_free(default_callback_queue, NULL); 309 default_callback_queue = NULL; 310 thread_free(default_callback_thread); 311 default_callback_thread = NULL; 312 313 timer_delete(wakeup_timer); 314 timer_delete(timer); 315 semaphore_free(alarm_expired); 316 alarm_expired = NULL; 317 318 list_free(alarms); 319 alarms = NULL; 320 321 pthread_mutex_unlock(&monitor); 322 pthread_mutex_destroy(&monitor); 323 } 324 325 static bool lazy_initialize(void) { 326 assert(alarms == NULL); 327 328 // timer_t doesn't have an invalid value so we must track whether 329 // the |timer| variable is valid ourselves. 330 bool timer_initialized = false; 331 bool wakeup_timer_initialized = false; 332 333 pthread_mutex_init(&monitor, NULL); 334 335 alarms = list_new(NULL); 336 if (!alarms) { 337 LOG_ERROR(LOG_TAG, "%s unable to allocate alarm list.", __func__); 338 goto error; 339 } 340 341 if (!timer_create_internal(CLOCK_ID, &timer)) 342 goto error; 343 timer_initialized = true; 344 345 if (!timer_create_internal(CLOCK_ID_ALARM, &wakeup_timer)) 346 goto error; 347 wakeup_timer_initialized = true; 348 349 alarm_expired = semaphore_new(0); 350 if (!alarm_expired) { 351 LOG_ERROR(LOG_TAG, "%s unable to create alarm expired semaphore", __func__); 352 goto error; 353 } 354 355 default_callback_thread = thread_new_sized("alarm_default_callbacks", 356 SIZE_MAX); 357 if (default_callback_thread == NULL) { 358 LOG_ERROR(LOG_TAG, "%s unable to create default alarm callbacks thread.", 359 __func__); 360 goto error; 361 } 362 thread_set_priority(default_callback_thread, CALLBACK_THREAD_PRIORITY_HIGH); 363 default_callback_queue = fixed_queue_new(SIZE_MAX); 364 if (default_callback_queue == NULL) { 365 LOG_ERROR(LOG_TAG, "%s unable to create default alarm callbacks queue.", 366 __func__); 367 goto error; 368 } 369 alarm_register_processing_queue(default_callback_queue, 370 default_callback_thread); 371 372 dispatcher_thread_active = true; 373 dispatcher_thread = thread_new("alarm_dispatcher"); 374 if (!dispatcher_thread) { 375 LOG_ERROR(LOG_TAG, "%s unable to create alarm callback thread.", __func__); 376 goto error; 377 } 378 379 thread_set_priority(dispatcher_thread, CALLBACK_THREAD_PRIORITY_HIGH); 380 thread_post(dispatcher_thread, callback_dispatch, NULL); 381 return true; 382 383 error: 384 fixed_queue_free(default_callback_queue, NULL); 385 default_callback_queue = NULL; 386 thread_free(default_callback_thread); 387 default_callback_thread = NULL; 388 389 thread_free(dispatcher_thread); 390 dispatcher_thread = NULL; 391 392 dispatcher_thread_active = false; 393 394 semaphore_free(alarm_expired); 395 alarm_expired = NULL; 396 397 if (wakeup_timer_initialized) 398 timer_delete(wakeup_timer); 399 400 if (timer_initialized) 401 timer_delete(timer); 402 403 list_free(alarms); 404 alarms = NULL; 405 406 pthread_mutex_destroy(&monitor); 407 408 return false; 409 } 410 411 static period_ms_t now(void) { 412 assert(alarms != NULL); 413 414 struct timespec ts; 415 if (clock_gettime(CLOCK_ID, &ts) == -1) { 416 LOG_ERROR(LOG_TAG, "%s unable to get current time: %s", 417 __func__, strerror(errno)); 418 return 0; 419 } 420 421 return (ts.tv_sec * 1000LL) + (ts.tv_nsec / 1000000LL); 422 } 423 424 // Remove alarm from internal alarm list and the processing queue 425 // The caller must hold the |monitor| lock. 426 static void remove_pending_alarm(alarm_t *alarm) { 427 list_remove(alarms, alarm); 428 while (fixed_queue_try_remove_from_queue(alarm->queue, alarm) != NULL) { 429 // Remove all repeated alarm instances from the queue. 430 // NOTE: We are defensive here - we shouldn't have repeated alarm instances 431 } 432 } 433 434 // Must be called with monitor held 435 static void schedule_next_instance(alarm_t *alarm) { 436 // If the alarm is currently set and it's at the start of the list, 437 // we'll need to re-schedule since we've adjusted the earliest deadline. 438 bool needs_reschedule = (!list_is_empty(alarms) && list_front(alarms) == alarm); 439 if (alarm->callback) 440 remove_pending_alarm(alarm); 441 442 // Calculate the next deadline for this alarm 443 period_ms_t just_now = now(); 444 period_ms_t ms_into_period = 0; 445 if ((alarm->is_periodic) && (alarm->period != 0)) 446 ms_into_period = ((just_now - alarm->creation_time) % alarm->period); 447 alarm->deadline = just_now + (alarm->period - ms_into_period); 448 449 // Add it into the timer list sorted by deadline (earliest deadline first). 450 if (list_is_empty(alarms) || 451 ((alarm_t *)list_front(alarms))->deadline > alarm->deadline) { 452 list_prepend(alarms, alarm); 453 } else { 454 for (list_node_t *node = list_begin(alarms); node != list_end(alarms); node = list_next(node)) { 455 list_node_t *next = list_next(node); 456 if (next == list_end(alarms) || ((alarm_t *)list_node(next))->deadline > alarm->deadline) { 457 list_insert_after(alarms, node, alarm); 458 break; 459 } 460 } 461 } 462 463 // If the new alarm has the earliest deadline, we need to re-evaluate our schedule. 464 if (needs_reschedule || 465 (!list_is_empty(alarms) && list_front(alarms) == alarm)) { 466 reschedule_root_alarm(); 467 } 468 } 469 470 // NOTE: must be called with monitor lock. 471 static void reschedule_root_alarm(void) { 472 assert(alarms != NULL); 473 474 const bool timer_was_set = timer_set; 475 476 // If used in a zeroed state, disarms the timer. 477 struct itimerspec timer_time; 478 memset(&timer_time, 0, sizeof(timer_time)); 479 480 if (list_is_empty(alarms)) 481 goto done; 482 483 const alarm_t *next = list_front(alarms); 484 const int64_t next_expiration = next->deadline - now(); 485 if (next_expiration < TIMER_INTERVAL_FOR_WAKELOCK_IN_MS) { 486 if (!timer_set) { 487 if (!wakelock_acquire()) { 488 LOG_ERROR(LOG_TAG, "%s unable to acquire wake lock", __func__); 489 goto done; 490 } 491 } 492 493 timer_time.it_value.tv_sec = (next->deadline / 1000); 494 timer_time.it_value.tv_nsec = (next->deadline % 1000) * 1000000LL; 495 496 // It is entirely unsafe to call timer_settime(2) with a zeroed timerspec 497 // for timers with *_ALARM clock IDs. Although the man page states that the 498 // timer would be canceled, the current behavior (as of Linux kernel 3.17) 499 // is that the callback is issued immediately. The only way to cancel an 500 // *_ALARM timer is to delete the timer. But unfortunately, deleting and 501 // re-creating a timer is rather expensive; every timer_create(2) spawns a 502 // new thread. So we simply set the timer to fire at the largest possible 503 // time. 504 // 505 // If we've reached this code path, we're going to grab a wake lock and 506 // wait for the next timer to fire. In that case, there's no reason to 507 // have a pending wakeup timer so we simply cancel it. 508 struct itimerspec end_of_time; 509 memset(&end_of_time, 0, sizeof(end_of_time)); 510 end_of_time.it_value.tv_sec = (time_t)(1LL << (sizeof(time_t) * 8 - 2)); 511 timer_settime(wakeup_timer, TIMER_ABSTIME, &end_of_time, NULL); 512 } else { 513 // WARNING: do not attempt to use relative timers with *_ALARM clock IDs 514 // in kernels before 3.17 unless you have the following patch: 515 // https://lkml.org/lkml/2014/7/7/576 516 struct itimerspec wakeup_time; 517 memset(&wakeup_time, 0, sizeof(wakeup_time)); 518 519 520 wakeup_time.it_value.tv_sec = (next->deadline / 1000); 521 wakeup_time.it_value.tv_nsec = (next->deadline % 1000) * 1000000LL; 522 if (timer_settime(wakeup_timer, TIMER_ABSTIME, &wakeup_time, NULL) == -1) 523 LOG_ERROR(LOG_TAG, "%s unable to set wakeup timer: %s", 524 __func__, strerror(errno)); 525 } 526 527 done: 528 timer_set = timer_time.it_value.tv_sec != 0 || timer_time.it_value.tv_nsec != 0; 529 if (timer_was_set && !timer_set) { 530 wakelock_release(); 531 } 532 533 if (timer_settime(timer, TIMER_ABSTIME, &timer_time, NULL) == -1) 534 LOG_ERROR(LOG_TAG, "%s unable to set timer: %s", __func__, strerror(errno)); 535 536 // If next expiration was in the past (e.g. short timer that got context 537 // switched) then the timer might have diarmed itself. Detect this case and 538 // work around it by manually signalling the |alarm_expired| semaphore. 539 // 540 // It is possible that the timer was actually super short (a few 541 // milliseconds) and the timer expired normally before we called 542 // |timer_gettime|. Worst case, |alarm_expired| is signaled twice for that 543 // alarm. Nothing bad should happen in that case though since the callback 544 // dispatch function checks to make sure the timer at the head of the list 545 // actually expired. 546 if (timer_set) { 547 struct itimerspec time_to_expire; 548 timer_gettime(timer, &time_to_expire); 549 if (time_to_expire.it_value.tv_sec == 0 && 550 time_to_expire.it_value.tv_nsec == 0) { 551 LOG_DEBUG(LOG_TAG, "%s alarm expiration too close for posix timers, switching to guns", __func__); 552 semaphore_post(alarm_expired); 553 } 554 } 555 } 556 557 void alarm_register_processing_queue(fixed_queue_t *queue, thread_t *thread) { 558 assert(queue != NULL); 559 assert(thread != NULL); 560 561 fixed_queue_register_dequeue(queue, thread_get_reactor(thread), 562 alarm_queue_ready, NULL); 563 } 564 565 void alarm_unregister_processing_queue(fixed_queue_t *queue) { 566 assert(alarms != NULL); 567 assert(queue != NULL); 568 569 fixed_queue_unregister_dequeue(queue); 570 571 // Cancel all alarms that are using this queue 572 pthread_mutex_lock(&monitor); 573 for (list_node_t *node = list_begin(alarms); node != list_end(alarms); ) { 574 alarm_t *alarm = (alarm_t *)list_node(node); 575 node = list_next(node); 576 // TODO: Each module is responsible for tearing down its alarms; currently, 577 // this is not the case. In the future, this check should be replaced by 578 // an assert. 579 if (alarm->queue == queue) 580 alarm_cancel_internal(alarm); 581 } 582 pthread_mutex_unlock(&monitor); 583 } 584 585 static void alarm_queue_ready(fixed_queue_t *queue, 586 UNUSED_ATTR void *context) { 587 assert(queue != NULL); 588 589 pthread_mutex_lock(&monitor); 590 alarm_t *alarm = (alarm_t *)fixed_queue_try_dequeue(queue); 591 if (alarm == NULL) { 592 pthread_mutex_unlock(&monitor); 593 return; // The alarm was probably canceled 594 } 595 596 // 597 // If the alarm is not periodic, we've fully serviced it now, and can reset 598 // some of its internal state. This is useful to distinguish between expired 599 // alarms and active ones. 600 // 601 alarm_callback_t callback = alarm->callback; 602 void *data = alarm->data; 603 period_ms_t deadline = alarm->deadline; 604 if (alarm->is_periodic) { 605 // The periodic alarm has been rescheduled and alarm->deadline has been 606 // updated, hence we need to use the previous deadline. 607 deadline = alarm->prev_deadline; 608 } else { 609 alarm->deadline = 0; 610 alarm->callback = NULL; 611 alarm->data = NULL; 612 } 613 614 pthread_mutex_lock(&alarm->callback_lock); 615 pthread_mutex_unlock(&monitor); 616 617 period_ms_t t0 = now(); 618 callback(data); 619 period_ms_t t1 = now(); 620 621 // Update the statistics 622 assert(t1 >= t0); 623 period_ms_t delta = t1 - t0; 624 update_scheduling_stats(&alarm->stats, t0, deadline, delta); 625 626 pthread_mutex_unlock(&alarm->callback_lock); 627 } 628 629 // Callback function for wake alarms and our posix timer 630 static void timer_callback(UNUSED_ATTR void *ptr) { 631 semaphore_post(alarm_expired); 632 } 633 634 // Function running on |dispatcher_thread| that performs the following: 635 // (1) Receives a signal using |alarm_exired| that the alarm has expired 636 // (2) Dispatches the alarm callback for processing by the corresponding 637 // thread for that alarm. 638 static void callback_dispatch(UNUSED_ATTR void *context) { 639 while (true) { 640 semaphore_wait(alarm_expired); 641 if (!dispatcher_thread_active) 642 break; 643 644 pthread_mutex_lock(&monitor); 645 alarm_t *alarm; 646 647 // Take into account that the alarm may get cancelled before we get to it. 648 // We're done here if there are no alarms or the alarm at the front is in 649 // the future. Release the monitor lock and exit right away since there's 650 // nothing left to do. 651 if (list_is_empty(alarms) || 652 (alarm = list_front(alarms))->deadline > now()) { 653 reschedule_root_alarm(); 654 pthread_mutex_unlock(&monitor); 655 continue; 656 } 657 658 list_remove(alarms, alarm); 659 660 if (alarm->is_periodic) { 661 alarm->prev_deadline = alarm->deadline; 662 schedule_next_instance(alarm); 663 alarm->stats.rescheduled_count++; 664 } 665 reschedule_root_alarm(); 666 667 // Enqueue the alarm for processing 668 fixed_queue_enqueue(alarm->queue, alarm); 669 670 pthread_mutex_unlock(&monitor); 671 } 672 673 LOG_DEBUG(LOG_TAG, "%s Callback thread exited", __func__); 674 } 675 676 static bool timer_create_internal(const clockid_t clock_id, timer_t *timer) { 677 assert(timer != NULL); 678 679 struct sigevent sigevent; 680 memset(&sigevent, 0, sizeof(sigevent)); 681 sigevent.sigev_notify = SIGEV_THREAD; 682 sigevent.sigev_notify_function = (void (*)(union sigval))timer_callback; 683 if (timer_create(clock_id, &sigevent, timer) == -1) { 684 LOG_ERROR(LOG_TAG, "%s unable to create timer with clock %d: %s", 685 __func__, clock_id, strerror(errno)); 686 if (clock_id == CLOCK_BOOTTIME_ALARM) { 687 LOG_ERROR(LOG_TAG, "The kernel might not have support for timer_create(CLOCK_BOOTTIME_ALARM): https://lwn.net/Articles/429925/"); 688 LOG_ERROR(LOG_TAG, "See following patches: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/log/?qt=grep&q=CLOCK_BOOTTIME_ALARM"); 689 } 690 return false; 691 } 692 693 return true; 694 } 695 696 static void update_scheduling_stats(alarm_stats_t *stats, 697 period_ms_t now_ms, 698 period_ms_t deadline_ms, 699 period_ms_t execution_delta_ms) 700 { 701 stats->total_updates++; 702 stats->last_update_ms = now_ms; 703 704 update_stat(&stats->callback_execution, execution_delta_ms); 705 706 if (deadline_ms < now_ms) { 707 // Overdue scheduling 708 period_ms_t delta_ms = now_ms - deadline_ms; 709 update_stat(&stats->overdue_scheduling, delta_ms); 710 } else if (deadline_ms > now_ms) { 711 // Premature scheduling 712 period_ms_t delta_ms = deadline_ms - now_ms; 713 update_stat(&stats->premature_scheduling, delta_ms); 714 } 715 } 716 717 static void dump_stat(int fd, stat_t *stat, const char *description) 718 { 719 period_ms_t average_time_ms = 0; 720 if (stat->count != 0) 721 average_time_ms = stat->total_ms / stat->count; 722 723 dprintf(fd, "%-51s: %llu / %llu / %llu\n", 724 description, 725 (unsigned long long)stat->total_ms, 726 (unsigned long long)stat->max_ms, 727 (unsigned long long)average_time_ms); 728 } 729 730 void alarm_debug_dump(int fd) 731 { 732 dprintf(fd, "\nBluetooth Alarms Statistics:\n"); 733 734 pthread_mutex_lock(&monitor); 735 736 if (alarms == NULL) { 737 pthread_mutex_unlock(&monitor); 738 dprintf(fd, " None\n"); 739 return; 740 } 741 742 period_ms_t just_now = now(); 743 744 dprintf(fd, " Total Alarms: %zu\n\n", list_length(alarms)); 745 746 // Dump info for each alarm 747 for (list_node_t *node = list_begin(alarms); node != list_end(alarms); 748 node = list_next(node)) { 749 alarm_t *alarm = (alarm_t *)list_node(node); 750 alarm_stats_t *stats = &alarm->stats; 751 752 dprintf(fd, " Alarm : %s (%s)\n", stats->name, 753 (alarm->is_periodic) ? "PERIODIC" : "SINGLE"); 754 755 dprintf(fd, "%-51s: %zu / %zu / %zu / %zu\n", 756 " Action counts (sched/resched/exec/cancel)", 757 stats->scheduled_count, stats->rescheduled_count, 758 stats->callback_execution.count, stats->canceled_count); 759 760 dprintf(fd, "%-51s: %zu / %zu\n", 761 " Deviation counts (overdue/premature)", 762 stats->overdue_scheduling.count, 763 stats->premature_scheduling.count); 764 765 dprintf(fd, "%-51s: %llu / %llu / %lld\n", 766 " Time in ms (since creation/interval/remaining)", 767 (unsigned long long)(just_now - alarm->creation_time), 768 (unsigned long long) alarm->period, 769 (long long)(alarm->deadline - just_now)); 770 771 dump_stat(fd, &stats->callback_execution, 772 " Callback execution time in ms (total/max/avg)"); 773 774 dump_stat(fd, &stats->overdue_scheduling, 775 " Overdue scheduling time in ms (total/max/avg)"); 776 777 dump_stat(fd, &stats->premature_scheduling, 778 " Premature scheduling time in ms (total/max/avg)"); 779 780 dprintf(fd, "\n"); 781 } 782 pthread_mutex_unlock(&monitor); 783 } 784