1 /****************************************************************************** 2 * 3 * Copyright 2014 Google, Inc. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at: 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 ******************************************************************************/ 18 19 #include "internal_include/bt_target.h" 20 21 #define LOG_TAG "bt_osi_alarm" 22 23 #include "osi/include/alarm.h" 24 25 #include <base/cancelable_callback.h> 26 #include <base/logging.h> 27 #include <base/message_loop/message_loop.h> 28 #include <errno.h> 29 #include <fcntl.h> 30 #include <inttypes.h> 31 #include <malloc.h> 32 #include <pthread.h> 33 #include <signal.h> 34 #include <string.h> 35 #include <time.h> 36 37 #include <hardware/bluetooth.h> 38 39 #include <mutex> 40 41 #include "osi/include/allocator.h" 42 #include "osi/include/fixed_queue.h" 43 #include "osi/include/list.h" 44 #include "osi/include/log.h" 45 #include "osi/include/osi.h" 46 #include "osi/include/semaphore.h" 47 #include "osi/include/thread.h" 48 #include "osi/include/wakelock.h" 49 50 using base::Bind; 51 using base::CancelableClosure; 52 using base::MessageLoop; 53 54 extern base::MessageLoop* get_message_loop(); 55 56 // Callback and timer threads should run at RT priority in order to ensure they 57 // meet audio deadlines. Use this priority for all audio/timer related thread. 58 static const int THREAD_RT_PRIORITY = 1; 59 60 typedef struct { 61 size_t count; 62 period_ms_t total_ms; 63 period_ms_t max_ms; 64 } stat_t; 65 66 // Alarm-related information and statistics 67 typedef struct { 68 const char* name; 69 size_t scheduled_count; 70 size_t canceled_count; 71 size_t rescheduled_count; 72 size_t total_updates; 73 period_ms_t last_update_ms; 74 stat_t overdue_scheduling; 75 stat_t premature_scheduling; 76 } alarm_stats_t; 77 78 /* Wrapper around CancellableClosure that let it be embedded in structs, without 79 * need to define copy operator. */ 80 struct CancelableClosureInStruct { 81 base::CancelableClosure i; 82 83 CancelableClosureInStruct& operator=(const CancelableClosureInStruct& in) { 84 if (!in.i.callback().is_null()) i.Reset(in.i.callback()); 85 return *this; 86 } 87 }; 88 89 struct alarm_t { 90 // The mutex is held while the callback for this alarm is being executed. 91 // It allows us to release the coarse-grained monitor lock while a 92 // potentially long-running callback is executing. |alarm_cancel| uses this 93 // mutex to provide a guarantee to its caller that the callback will not be 94 // in progress when it returns. 95 std::recursive_mutex* callback_mutex; 96 period_ms_t creation_time; 97 period_ms_t period; 98 period_ms_t deadline; 99 period_ms_t prev_deadline; // Previous deadline - used for accounting of 100 // periodic timers 101 bool is_periodic; 102 fixed_queue_t* queue; // The processing queue to add this alarm to 103 alarm_callback_t callback; 104 void* data; 105 alarm_stats_t stats; 106 107 bool for_msg_loop; // True, if the alarm should be processed on message loop 108 CancelableClosureInStruct closure; // posted to message loop for processing 109 }; 110 111 // If the next wakeup time is less than this threshold, we should acquire 112 // a wakelock instead of setting a wake alarm so we're not bouncing in 113 // and out of suspend frequently. This value is externally visible to allow 114 // unit tests to run faster. It should not be modified by production code. 115 int64_t TIMER_INTERVAL_FOR_WAKELOCK_IN_MS = 3000; 116 static const clockid_t CLOCK_ID = CLOCK_BOOTTIME; 117 118 #if (KERNEL_MISSING_CLOCK_BOOTTIME_ALARM == TRUE) 119 static const clockid_t CLOCK_ID_ALARM = CLOCK_BOOTTIME; 120 #else 121 static const clockid_t CLOCK_ID_ALARM = CLOCK_BOOTTIME_ALARM; 122 #endif 123 124 // This mutex ensures that the |alarm_set|, |alarm_cancel|, and alarm callback 125 // functions execute serially and not concurrently. As a result, this mutex 126 // also protects the |alarms| list. 127 static std::mutex alarms_mutex; 128 static list_t* alarms; 129 static timer_t timer; 130 static timer_t wakeup_timer; 131 static bool timer_set; 132 133 // All alarm callbacks are dispatched from |dispatcher_thread| 134 static thread_t* dispatcher_thread; 135 static bool dispatcher_thread_active; 136 static semaphore_t* alarm_expired; 137 138 // Default alarm callback thread and queue 139 static thread_t* default_callback_thread; 140 static fixed_queue_t* default_callback_queue; 141 142 static alarm_t* alarm_new_internal(const char* name, bool is_periodic); 143 static bool lazy_initialize(void); 144 static period_ms_t now(void); 145 static void alarm_set_internal(alarm_t* alarm, period_ms_t period, 146 alarm_callback_t cb, void* data, 147 fixed_queue_t* queue, bool for_msg_loop); 148 static void alarm_cancel_internal(alarm_t* alarm); 149 static void remove_pending_alarm(alarm_t* alarm); 150 static void schedule_next_instance(alarm_t* alarm); 151 static void reschedule_root_alarm(void); 152 static void alarm_queue_ready(fixed_queue_t* queue, void* context); 153 static void timer_callback(void* data); 154 static void callback_dispatch(void* context); 155 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer); 156 static void update_scheduling_stats(alarm_stats_t* stats, period_ms_t now_ms, 157 period_ms_t deadline_ms); 158 // Registers |queue| for processing alarm callbacks on |thread|. 159 // |queue| may not be NULL. |thread| may not be NULL. 160 static void alarm_register_processing_queue(fixed_queue_t* queue, 161 thread_t* thread); 162 163 static void update_stat(stat_t* stat, period_ms_t delta) { 164 if (stat->max_ms < delta) stat->max_ms = delta; 165 stat->total_ms += delta; 166 stat->count++; 167 } 168 169 alarm_t* alarm_new(const char* name) { return alarm_new_internal(name, false); } 170 171 alarm_t* alarm_new_periodic(const char* name) { 172 return alarm_new_internal(name, true); 173 } 174 175 static alarm_t* alarm_new_internal(const char* name, bool is_periodic) { 176 // Make sure we have a list we can insert alarms into. 177 if (!alarms && !lazy_initialize()) { 178 CHECK(false); // if initialization failed, we should not continue 179 return NULL; 180 } 181 182 alarm_t* ret = static_cast<alarm_t*>(osi_calloc(sizeof(alarm_t))); 183 184 ret->callback_mutex = new std::recursive_mutex; 185 ret->is_periodic = is_periodic; 186 ret->stats.name = osi_strdup(name); 187 188 ret->for_msg_loop = false; 189 // placement new 190 new (&ret->closure) CancelableClosureInStruct(); 191 192 // NOTE: The stats were reset by osi_calloc() above 193 194 return ret; 195 } 196 197 void alarm_free(alarm_t* alarm) { 198 if (!alarm) return; 199 200 alarm_cancel(alarm); 201 delete alarm->callback_mutex; 202 osi_free((void*)alarm->stats.name); 203 alarm->closure.~CancelableClosureInStruct(); 204 osi_free(alarm); 205 } 206 207 period_ms_t alarm_get_remaining_ms(const alarm_t* alarm) { 208 CHECK(alarm != NULL); 209 period_ms_t remaining_ms = 0; 210 period_ms_t just_now = now(); 211 212 std::lock_guard<std::mutex> lock(alarms_mutex); 213 if (alarm->deadline > just_now) remaining_ms = alarm->deadline - just_now; 214 215 return remaining_ms; 216 } 217 218 void alarm_set(alarm_t* alarm, period_ms_t interval_ms, alarm_callback_t cb, 219 void* data) { 220 alarm_set_internal(alarm, interval_ms, cb, data, default_callback_queue, 221 false); 222 } 223 224 void alarm_set_on_mloop(alarm_t* alarm, period_ms_t interval_ms, 225 alarm_callback_t cb, void* data) { 226 alarm_set_internal(alarm, interval_ms, cb, data, NULL, true); 227 } 228 229 // Runs in exclusion with alarm_cancel and timer_callback. 230 static void alarm_set_internal(alarm_t* alarm, period_ms_t period, 231 alarm_callback_t cb, void* data, 232 fixed_queue_t* queue, bool for_msg_loop) { 233 CHECK(alarms != NULL); 234 CHECK(alarm != NULL); 235 CHECK(cb != NULL); 236 237 std::lock_guard<std::mutex> lock(alarms_mutex); 238 239 alarm->creation_time = now(); 240 alarm->period = period; 241 alarm->queue = queue; 242 alarm->callback = cb; 243 alarm->data = data; 244 alarm->for_msg_loop = for_msg_loop; 245 246 schedule_next_instance(alarm); 247 alarm->stats.scheduled_count++; 248 } 249 250 void alarm_cancel(alarm_t* alarm) { 251 CHECK(alarms != NULL); 252 if (!alarm) return; 253 254 { 255 std::lock_guard<std::mutex> lock(alarms_mutex); 256 alarm_cancel_internal(alarm); 257 } 258 259 // If the callback for |alarm| is in progress, wait here until it completes. 260 std::lock_guard<std::recursive_mutex> lock(*alarm->callback_mutex); 261 } 262 263 // Internal implementation of canceling an alarm. 264 // The caller must hold the |alarms_mutex| 265 static void alarm_cancel_internal(alarm_t* alarm) { 266 bool needs_reschedule = 267 (!list_is_empty(alarms) && list_front(alarms) == alarm); 268 269 remove_pending_alarm(alarm); 270 271 alarm->deadline = 0; 272 alarm->prev_deadline = 0; 273 alarm->callback = NULL; 274 alarm->data = NULL; 275 alarm->stats.canceled_count++; 276 alarm->queue = NULL; 277 278 if (needs_reschedule) reschedule_root_alarm(); 279 } 280 281 bool alarm_is_scheduled(const alarm_t* alarm) { 282 if ((alarms == NULL) || (alarm == NULL)) return false; 283 return (alarm->callback != NULL); 284 } 285 286 void alarm_cleanup(void) { 287 // If lazy_initialize never ran there is nothing else to do 288 if (!alarms) return; 289 290 dispatcher_thread_active = false; 291 semaphore_post(alarm_expired); 292 thread_free(dispatcher_thread); 293 dispatcher_thread = NULL; 294 295 std::lock_guard<std::mutex> lock(alarms_mutex); 296 297 fixed_queue_free(default_callback_queue, NULL); 298 default_callback_queue = NULL; 299 thread_free(default_callback_thread); 300 default_callback_thread = NULL; 301 302 timer_delete(wakeup_timer); 303 timer_delete(timer); 304 semaphore_free(alarm_expired); 305 alarm_expired = NULL; 306 307 list_free(alarms); 308 alarms = NULL; 309 } 310 311 static bool lazy_initialize(void) { 312 CHECK(alarms == NULL); 313 314 // timer_t doesn't have an invalid value so we must track whether 315 // the |timer| variable is valid ourselves. 316 bool timer_initialized = false; 317 bool wakeup_timer_initialized = false; 318 319 std::lock_guard<std::mutex> lock(alarms_mutex); 320 321 alarms = list_new(NULL); 322 if (!alarms) { 323 LOG_ERROR(LOG_TAG, "%s unable to allocate alarm list.", __func__); 324 goto error; 325 } 326 327 if (!timer_create_internal(CLOCK_ID, &timer)) goto error; 328 timer_initialized = true; 329 330 if (!timer_create_internal(CLOCK_ID_ALARM, &wakeup_timer)) goto error; 331 wakeup_timer_initialized = true; 332 333 alarm_expired = semaphore_new(0); 334 if (!alarm_expired) { 335 LOG_ERROR(LOG_TAG, "%s unable to create alarm expired semaphore", __func__); 336 goto error; 337 } 338 339 default_callback_thread = 340 thread_new_sized("alarm_default_callbacks", SIZE_MAX); 341 if (default_callback_thread == NULL) { 342 LOG_ERROR(LOG_TAG, "%s unable to create default alarm callbacks thread.", 343 __func__); 344 goto error; 345 } 346 thread_set_rt_priority(default_callback_thread, THREAD_RT_PRIORITY); 347 default_callback_queue = fixed_queue_new(SIZE_MAX); 348 if (default_callback_queue == NULL) { 349 LOG_ERROR(LOG_TAG, "%s unable to create default alarm callbacks queue.", 350 __func__); 351 goto error; 352 } 353 alarm_register_processing_queue(default_callback_queue, 354 default_callback_thread); 355 356 dispatcher_thread_active = true; 357 dispatcher_thread = thread_new("alarm_dispatcher"); 358 if (!dispatcher_thread) { 359 LOG_ERROR(LOG_TAG, "%s unable to create alarm callback thread.", __func__); 360 goto error; 361 } 362 thread_set_rt_priority(dispatcher_thread, THREAD_RT_PRIORITY); 363 thread_post(dispatcher_thread, callback_dispatch, NULL); 364 return true; 365 366 error: 367 fixed_queue_free(default_callback_queue, NULL); 368 default_callback_queue = NULL; 369 thread_free(default_callback_thread); 370 default_callback_thread = NULL; 371 372 thread_free(dispatcher_thread); 373 dispatcher_thread = NULL; 374 375 dispatcher_thread_active = false; 376 377 semaphore_free(alarm_expired); 378 alarm_expired = NULL; 379 380 if (wakeup_timer_initialized) timer_delete(wakeup_timer); 381 382 if (timer_initialized) timer_delete(timer); 383 384 list_free(alarms); 385 alarms = NULL; 386 387 return false; 388 } 389 390 static period_ms_t now(void) { 391 CHECK(alarms != NULL); 392 393 struct timespec ts; 394 if (clock_gettime(CLOCK_ID, &ts) == -1) { 395 LOG_ERROR(LOG_TAG, "%s unable to get current time: %s", __func__, 396 strerror(errno)); 397 return 0; 398 } 399 400 return (ts.tv_sec * 1000LL) + (ts.tv_nsec / 1000000LL); 401 } 402 403 // Remove alarm from internal alarm list and the processing queue 404 // The caller must hold the |alarms_mutex| 405 static void remove_pending_alarm(alarm_t* alarm) { 406 list_remove(alarms, alarm); 407 408 if (alarm->for_msg_loop) { 409 alarm->closure.i.Cancel(); 410 } else { 411 while (fixed_queue_try_remove_from_queue(alarm->queue, alarm) != NULL) { 412 // Remove all repeated alarm instances from the queue. 413 // NOTE: We are defensive here - we shouldn't have repeated alarm 414 // instances 415 } 416 } 417 } 418 419 // Must be called with |alarms_mutex| held 420 static void schedule_next_instance(alarm_t* alarm) { 421 // If the alarm is currently set and it's at the start of the list, 422 // we'll need to re-schedule since we've adjusted the earliest deadline. 423 bool needs_reschedule = 424 (!list_is_empty(alarms) && list_front(alarms) == alarm); 425 if (alarm->callback) remove_pending_alarm(alarm); 426 427 // Calculate the next deadline for this alarm 428 period_ms_t just_now = now(); 429 period_ms_t ms_into_period = 0; 430 if ((alarm->is_periodic) && (alarm->period != 0)) 431 ms_into_period = ((just_now - alarm->creation_time) % alarm->period); 432 alarm->deadline = just_now + (alarm->period - ms_into_period); 433 434 // Add it into the timer list sorted by deadline (earliest deadline first). 435 if (list_is_empty(alarms) || 436 ((alarm_t*)list_front(alarms))->deadline > alarm->deadline) { 437 list_prepend(alarms, alarm); 438 } else { 439 for (list_node_t* node = list_begin(alarms); node != list_end(alarms); 440 node = list_next(node)) { 441 list_node_t* next = list_next(node); 442 if (next == list_end(alarms) || 443 ((alarm_t*)list_node(next))->deadline > alarm->deadline) { 444 list_insert_after(alarms, node, alarm); 445 break; 446 } 447 } 448 } 449 450 // If the new alarm has the earliest deadline, we need to re-evaluate our 451 // schedule. 452 if (needs_reschedule || 453 (!list_is_empty(alarms) && list_front(alarms) == alarm)) { 454 reschedule_root_alarm(); 455 } 456 } 457 458 // NOTE: must be called with |alarms_mutex| held 459 static void reschedule_root_alarm(void) { 460 CHECK(alarms != NULL); 461 462 const bool timer_was_set = timer_set; 463 alarm_t* next; 464 int64_t next_expiration; 465 466 // If used in a zeroed state, disarms the timer. 467 struct itimerspec timer_time; 468 memset(&timer_time, 0, sizeof(timer_time)); 469 470 if (list_is_empty(alarms)) goto done; 471 472 next = static_cast<alarm_t*>(list_front(alarms)); 473 next_expiration = next->deadline - now(); 474 if (next_expiration < TIMER_INTERVAL_FOR_WAKELOCK_IN_MS) { 475 if (!timer_set) { 476 if (!wakelock_acquire()) { 477 LOG_ERROR(LOG_TAG, "%s unable to acquire wake lock", __func__); 478 goto done; 479 } 480 } 481 482 timer_time.it_value.tv_sec = (next->deadline / 1000); 483 timer_time.it_value.tv_nsec = (next->deadline % 1000) * 1000000LL; 484 485 // It is entirely unsafe to call timer_settime(2) with a zeroed timerspec 486 // for timers with *_ALARM clock IDs. Although the man page states that the 487 // timer would be canceled, the current behavior (as of Linux kernel 3.17) 488 // is that the callback is issued immediately. The only way to cancel an 489 // *_ALARM timer is to delete the timer. But unfortunately, deleting and 490 // re-creating a timer is rather expensive; every timer_create(2) spawns a 491 // new thread. So we simply set the timer to fire at the largest possible 492 // time. 493 // 494 // If we've reached this code path, we're going to grab a wake lock and 495 // wait for the next timer to fire. In that case, there's no reason to 496 // have a pending wakeup timer so we simply cancel it. 497 struct itimerspec end_of_time; 498 memset(&end_of_time, 0, sizeof(end_of_time)); 499 end_of_time.it_value.tv_sec = (time_t)(1LL << (sizeof(time_t) * 8 - 2)); 500 timer_settime(wakeup_timer, TIMER_ABSTIME, &end_of_time, NULL); 501 } else { 502 // WARNING: do not attempt to use relative timers with *_ALARM clock IDs 503 // in kernels before 3.17 unless you have the following patch: 504 // https://lkml.org/lkml/2014/7/7/576 505 struct itimerspec wakeup_time; 506 memset(&wakeup_time, 0, sizeof(wakeup_time)); 507 508 wakeup_time.it_value.tv_sec = (next->deadline / 1000); 509 wakeup_time.it_value.tv_nsec = (next->deadline % 1000) * 1000000LL; 510 if (timer_settime(wakeup_timer, TIMER_ABSTIME, &wakeup_time, NULL) == -1) 511 LOG_ERROR(LOG_TAG, "%s unable to set wakeup timer: %s", __func__, 512 strerror(errno)); 513 } 514 515 done: 516 timer_set = 517 timer_time.it_value.tv_sec != 0 || timer_time.it_value.tv_nsec != 0; 518 if (timer_was_set && !timer_set) { 519 wakelock_release(); 520 } 521 522 if (timer_settime(timer, TIMER_ABSTIME, &timer_time, NULL) == -1) 523 LOG_ERROR(LOG_TAG, "%s unable to set timer: %s", __func__, strerror(errno)); 524 525 // If next expiration was in the past (e.g. short timer that got context 526 // switched) then the timer might have diarmed itself. Detect this case and 527 // work around it by manually signalling the |alarm_expired| semaphore. 528 // 529 // It is possible that the timer was actually super short (a few 530 // milliseconds) and the timer expired normally before we called 531 // |timer_gettime|. Worst case, |alarm_expired| is signaled twice for that 532 // alarm. Nothing bad should happen in that case though since the callback 533 // dispatch function checks to make sure the timer at the head of the list 534 // actually expired. 535 if (timer_set) { 536 struct itimerspec time_to_expire; 537 timer_gettime(timer, &time_to_expire); 538 if (time_to_expire.it_value.tv_sec == 0 && 539 time_to_expire.it_value.tv_nsec == 0) { 540 LOG_DEBUG( 541 LOG_TAG, 542 "%s alarm expiration too close for posix timers, switching to guns", 543 __func__); 544 semaphore_post(alarm_expired); 545 } 546 } 547 } 548 549 static void alarm_register_processing_queue(fixed_queue_t* queue, 550 thread_t* thread) { 551 CHECK(queue != NULL); 552 CHECK(thread != NULL); 553 554 fixed_queue_register_dequeue(queue, thread_get_reactor(thread), 555 alarm_queue_ready, NULL); 556 } 557 558 static void alarm_ready_generic(alarm_t* alarm, 559 std::unique_lock<std::mutex>& lock) { 560 if (alarm == NULL) { 561 return; // The alarm was probably canceled 562 } 563 // 564 // If the alarm is not periodic, we've fully serviced it now, and can reset 565 // some of its internal state. This is useful to distinguish between expired 566 // alarms and active ones. 567 // 568 alarm_callback_t callback = alarm->callback; 569 void* data = alarm->data; 570 period_ms_t deadline = alarm->deadline; 571 if (alarm->is_periodic) { 572 // The periodic alarm has been rescheduled and alarm->deadline has been 573 // updated, hence we need to use the previous deadline. 574 deadline = alarm->prev_deadline; 575 } else { 576 alarm->deadline = 0; 577 alarm->callback = NULL; 578 alarm->data = NULL; 579 alarm->queue = NULL; 580 } 581 582 std::lock_guard<std::recursive_mutex> cb_lock(*alarm->callback_mutex); 583 lock.unlock(); 584 585 // Update the statistics 586 update_scheduling_stats(&alarm->stats, now(), deadline); 587 588 // NOTE: Do NOT access "alarm" after the callback, as a safety precaution 589 // in case the callback itself deleted the alarm. 590 callback(data); 591 } 592 593 static void alarm_ready_mloop(alarm_t* alarm) { 594 std::unique_lock<std::mutex> lock(alarms_mutex); 595 alarm_ready_generic(alarm, lock); 596 } 597 598 static void alarm_queue_ready(fixed_queue_t* queue, UNUSED_ATTR void* context) { 599 CHECK(queue != NULL); 600 601 std::unique_lock<std::mutex> lock(alarms_mutex); 602 alarm_t* alarm = (alarm_t*)fixed_queue_try_dequeue(queue); 603 alarm_ready_generic(alarm, lock); 604 } 605 606 // Callback function for wake alarms and our posix timer 607 static void timer_callback(UNUSED_ATTR void* ptr) { 608 semaphore_post(alarm_expired); 609 } 610 611 // Function running on |dispatcher_thread| that performs the following: 612 // (1) Receives a signal using |alarm_exired| that the alarm has expired 613 // (2) Dispatches the alarm callback for processing by the corresponding 614 // thread for that alarm. 615 static void callback_dispatch(UNUSED_ATTR void* context) { 616 while (true) { 617 semaphore_wait(alarm_expired); 618 if (!dispatcher_thread_active) break; 619 620 std::lock_guard<std::mutex> lock(alarms_mutex); 621 alarm_t* alarm; 622 623 // Take into account that the alarm may get cancelled before we get to it. 624 // We're done here if there are no alarms or the alarm at the front is in 625 // the future. Exit right away since there's nothing left to do. 626 if (list_is_empty(alarms) || 627 (alarm = static_cast<alarm_t*>(list_front(alarms)))->deadline > now()) { 628 reschedule_root_alarm(); 629 continue; 630 } 631 632 list_remove(alarms, alarm); 633 634 if (alarm->is_periodic) { 635 alarm->prev_deadline = alarm->deadline; 636 schedule_next_instance(alarm); 637 alarm->stats.rescheduled_count++; 638 } 639 reschedule_root_alarm(); 640 641 // Enqueue the alarm for processing 642 if (alarm->for_msg_loop) { 643 if (!get_message_loop()) { 644 LOG_ERROR(LOG_TAG, "%s: message loop already NULL. Alarm: %s", __func__, 645 alarm->stats.name); 646 continue; 647 } 648 649 alarm->closure.i.Reset(Bind(alarm_ready_mloop, alarm)); 650 get_message_loop()->task_runner()->PostTask(FROM_HERE, 651 alarm->closure.i.callback()); 652 } else { 653 fixed_queue_enqueue(alarm->queue, alarm); 654 } 655 } 656 657 LOG_DEBUG(LOG_TAG, "%s Callback thread exited", __func__); 658 } 659 660 static bool timer_create_internal(const clockid_t clock_id, timer_t* timer) { 661 CHECK(timer != NULL); 662 663 struct sigevent sigevent; 664 // create timer with RT priority thread 665 pthread_attr_t thread_attr; 666 pthread_attr_init(&thread_attr); 667 pthread_attr_setschedpolicy(&thread_attr, SCHED_FIFO); 668 struct sched_param param; 669 param.sched_priority = THREAD_RT_PRIORITY; 670 pthread_attr_setschedparam(&thread_attr, ¶m); 671 672 memset(&sigevent, 0, sizeof(sigevent)); 673 sigevent.sigev_notify = SIGEV_THREAD; 674 sigevent.sigev_notify_function = (void (*)(union sigval))timer_callback; 675 sigevent.sigev_notify_attributes = &thread_attr; 676 if (timer_create(clock_id, &sigevent, timer) == -1) { 677 LOG_ERROR(LOG_TAG, "%s unable to create timer with clock %d: %s", __func__, 678 clock_id, strerror(errno)); 679 if (clock_id == CLOCK_BOOTTIME_ALARM) { 680 LOG_ERROR(LOG_TAG, 681 "The kernel might not have support for " 682 "timer_create(CLOCK_BOOTTIME_ALARM): " 683 "https://lwn.net/Articles/429925/"); 684 LOG_ERROR(LOG_TAG, 685 "See following patches: " 686 "https://git.kernel.org/cgit/linux/kernel/git/torvalds/" 687 "linux.git/log/?qt=grep&q=CLOCK_BOOTTIME_ALARM"); 688 } 689 return false; 690 } 691 692 return true; 693 } 694 695 static void update_scheduling_stats(alarm_stats_t* stats, period_ms_t now_ms, 696 period_ms_t deadline_ms) { 697 stats->total_updates++; 698 stats->last_update_ms = now_ms; 699 700 if (deadline_ms < now_ms) { 701 // Overdue scheduling 702 period_ms_t delta_ms = now_ms - deadline_ms; 703 update_stat(&stats->overdue_scheduling, delta_ms); 704 } else if (deadline_ms > now_ms) { 705 // Premature scheduling 706 period_ms_t delta_ms = deadline_ms - now_ms; 707 update_stat(&stats->premature_scheduling, delta_ms); 708 } 709 } 710 711 static void dump_stat(int fd, stat_t* stat, const char* description) { 712 period_ms_t average_time_ms = 0; 713 if (stat->count != 0) average_time_ms = stat->total_ms / stat->count; 714 715 dprintf(fd, "%-51s: %llu / %llu / %llu\n", description, 716 (unsigned long long)stat->total_ms, (unsigned long long)stat->max_ms, 717 (unsigned long long)average_time_ms); 718 } 719 720 void alarm_debug_dump(int fd) { 721 dprintf(fd, "\nBluetooth Alarms Statistics:\n"); 722 723 std::lock_guard<std::mutex> lock(alarms_mutex); 724 725 if (alarms == NULL) { 726 dprintf(fd, " None\n"); 727 return; 728 } 729 730 period_ms_t just_now = now(); 731 732 dprintf(fd, " Total Alarms: %zu\n\n", list_length(alarms)); 733 734 // Dump info for each alarm 735 for (list_node_t* node = list_begin(alarms); node != list_end(alarms); 736 node = list_next(node)) { 737 alarm_t* alarm = (alarm_t*)list_node(node); 738 alarm_stats_t* stats = &alarm->stats; 739 740 dprintf(fd, " Alarm : %s (%s)\n", stats->name, 741 (alarm->is_periodic) ? "PERIODIC" : "SINGLE"); 742 743 dprintf(fd, "%-51s: %zu / %zu / %zu / %zu\n", 744 " Action counts (sched/resched/exec/cancel)", 745 stats->scheduled_count, stats->rescheduled_count, 746 stats->total_updates, stats->canceled_count); 747 748 dprintf(fd, "%-51s: %zu / %zu\n", 749 " Deviation counts (overdue/premature)", 750 stats->overdue_scheduling.count, stats->premature_scheduling.count); 751 752 dprintf(fd, "%-51s: %llu / %llu / %lld\n", 753 " Time in ms (since creation/interval/remaining)", 754 (unsigned long long)(just_now - alarm->creation_time), 755 (unsigned long long)alarm->period, 756 (long long)(alarm->deadline - just_now)); 757 758 dump_stat(fd, &stats->overdue_scheduling, 759 " Overdue scheduling time in ms (total/max/avg)"); 760 761 dump_stat(fd, &stats->premature_scheduling, 762 " Premature scheduling time in ms (total/max/avg)"); 763 764 dprintf(fd, "\n"); 765 } 766 } 767