1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "android/charpipe.h" 26 #include "android/log-rotate.h" 27 #include "android/snaphost-android.h" 28 #include "block/aio.h" 29 #include "exec/hax.h" 30 #include "hw/hw.h" 31 #include "monitor/monitor.h" 32 #include "net/net.h" 33 #include "qemu-common.h" 34 #include "qemu/sockets.h" 35 #include "qemu/timer.h" 36 #include "slirp-android/libslirp.h" 37 #include "sysemu/cpus.h" 38 #include "sysemu/sysemu.h" 39 40 #ifdef __linux__ 41 #include <sys/ioctl.h> 42 #endif 43 44 #ifdef _WIN32 45 #include <windows.h> 46 #include <mmsystem.h> 47 #endif 48 49 int qemu_calculate_timeout(void); 50 51 #ifndef CONFIG_ANDROID 52 /* Conversion factor from emulated instructions to virtual clock ticks. */ 53 int icount_time_shift; 54 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ 55 #define MAX_ICOUNT_SHIFT 10 56 /* Compensate for varying guest execution speed. */ 57 int64_t qemu_icount_bias; 58 static QEMUTimer *icount_rt_timer; 59 static QEMUTimer *icount_vm_timer; 60 #endif // !CONFIG_ANDROID 61 62 #ifndef _WIN32 63 static int io_thread_fd = -1; 64 65 static void qemu_event_read(void *opaque) 66 { 67 int fd = (unsigned long)opaque; 68 ssize_t len; 69 70 /* Drain the notify pipe */ 71 do { 72 char buffer[512]; 73 len = read(fd, buffer, sizeof(buffer)); 74 } while ((len == -1 && errno == EINTR) || len > 0); 75 } 76 77 static int qemu_main_loop_event_init(void) 78 { 79 int err; 80 int fds[2]; 81 82 err = pipe(fds); 83 if (err == -1) 84 return -errno; 85 86 err = fcntl_setfl(fds[0], O_NONBLOCK); 87 if (err < 0) 88 goto fail; 89 90 err = fcntl_setfl(fds[1], O_NONBLOCK); 91 if (err < 0) 92 goto fail; 93 94 qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, 95 (void *)(unsigned long)fds[0]); 96 97 io_thread_fd = fds[1]; 98 return 0; 99 100 fail: 101 close(fds[0]); 102 close(fds[1]); 103 return err; 104 } 105 #else 106 HANDLE qemu_event_handle; 107 108 static void dummy_event_handler(void *opaque) 109 { 110 } 111 112 static int qemu_main_loop_event_init(void) 113 { 114 qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); 115 if (!qemu_event_handle) { 116 perror("Failed CreateEvent"); 117 return -1; 118 } 119 qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL); 120 return 0; 121 } 122 #endif 123 124 int qemu_init_main_loop(void) 125 { 126 return qemu_main_loop_event_init(); 127 } 128 129 #ifndef _WIN32 130 131 static inline void os_host_main_loop_wait(int *timeout) 132 { 133 } 134 135 #else // _WIN32 136 137 /***********************************************************/ 138 /* Polling handling */ 139 140 typedef struct PollingEntry { 141 PollingFunc *func; 142 void *opaque; 143 struct PollingEntry *next; 144 } PollingEntry; 145 146 static PollingEntry *first_polling_entry; 147 148 int qemu_add_polling_cb(PollingFunc *func, void *opaque) 149 { 150 PollingEntry **ppe, *pe; 151 pe = g_malloc0(sizeof(PollingEntry)); 152 pe->func = func; 153 pe->opaque = opaque; 154 for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next); 155 *ppe = pe; 156 return 0; 157 } 158 159 void qemu_del_polling_cb(PollingFunc *func, void *opaque) 160 { 161 PollingEntry **ppe, *pe; 162 for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) { 163 pe = *ppe; 164 if (pe->func == func && pe->opaque == opaque) { 165 *ppe = pe->next; 166 g_free(pe); 167 break; 168 } 169 } 170 } 171 172 /***********************************************************/ 173 /* Wait objects support */ 174 typedef struct WaitObjects { 175 int num; 176 HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; 177 WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1]; 178 void *opaque[MAXIMUM_WAIT_OBJECTS + 1]; 179 } WaitObjects; 180 181 static WaitObjects wait_objects = {0}; 182 183 int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque) 184 { 185 WaitObjects *w = &wait_objects; 186 187 if (w->num >= MAXIMUM_WAIT_OBJECTS) 188 return -1; 189 w->events[w->num] = handle; 190 w->func[w->num] = func; 191 w->opaque[w->num] = opaque; 192 w->num++; 193 return 0; 194 } 195 196 void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque) 197 { 198 int i, found; 199 WaitObjects *w = &wait_objects; 200 201 found = 0; 202 for (i = 0; i < w->num; i++) { 203 if (w->events[i] == handle) 204 found = 1; 205 if (found) { 206 w->events[i] = w->events[i + 1]; 207 w->func[i] = w->func[i + 1]; 208 w->opaque[i] = w->opaque[i + 1]; 209 } 210 } 211 if (found) 212 w->num--; 213 } 214 215 void os_host_main_loop_wait(int *timeout) 216 { 217 int ret, ret2, i; 218 PollingEntry *pe; 219 220 /* XXX: need to suppress polling by better using win32 events */ 221 ret = 0; 222 for(pe = first_polling_entry; pe != NULL; pe = pe->next) { 223 ret |= pe->func(pe->opaque); 224 } 225 if (ret == 0) { 226 int err; 227 WaitObjects *w = &wait_objects; 228 229 qemu_mutex_unlock_iothread(); 230 ret = WaitForMultipleObjects(w->num, w->events, FALSE, *timeout); 231 qemu_mutex_lock_iothread(); 232 if (WAIT_OBJECT_0 + 0 <= ret && ret <= WAIT_OBJECT_0 + w->num - 1) { 233 if (w->func[ret - WAIT_OBJECT_0]) 234 w->func[ret - WAIT_OBJECT_0](w->opaque[ret - WAIT_OBJECT_0]); 235 236 /* Check for additional signaled events */ 237 for(i = (ret - WAIT_OBJECT_0 + 1); i < w->num; i++) { 238 239 /* Check if event is signaled */ 240 ret2 = WaitForSingleObject(w->events[i], 0); 241 if(ret2 == WAIT_OBJECT_0) { 242 if (w->func[i]) 243 w->func[i](w->opaque[i]); 244 } else if (ret2 == WAIT_TIMEOUT) { 245 } else { 246 err = GetLastError(); 247 fprintf(stderr, "WaitForSingleObject error %d %d\n", i, err); 248 } 249 } 250 } else if (ret == WAIT_TIMEOUT) { 251 } else { 252 err = GetLastError(); 253 fprintf(stderr, "WaitForMultipleObjects error %d %d\n", ret, err); 254 } 255 } 256 257 *timeout = 0; 258 } 259 260 #endif // _WIN32 261 262 static void qemu_run_alarm_timer(void); // forward 263 264 void main_loop_wait(int timeout) 265 { 266 fd_set rfds, wfds, xfds; 267 int ret, nfds; 268 struct timeval tv; 269 270 qemu_bh_update_timeout(&timeout); 271 272 os_host_main_loop_wait(&timeout); 273 274 tv.tv_sec = timeout / 1000; 275 tv.tv_usec = (timeout % 1000) * 1000; 276 277 /* poll any events */ 278 279 /* XXX: separate device handlers from system ones */ 280 nfds = -1; 281 FD_ZERO(&rfds); 282 FD_ZERO(&wfds); 283 FD_ZERO(&xfds); 284 qemu_iohandler_fill(&nfds, &rfds, &wfds, &xfds); 285 if (slirp_is_inited()) { 286 slirp_select_fill(&nfds, &rfds, &wfds, &xfds); 287 } 288 289 qemu_mutex_unlock_iothread(); 290 ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv); 291 qemu_mutex_lock_iothread(); 292 qemu_iohandler_poll(&rfds, &wfds, &xfds, ret); 293 if (slirp_is_inited()) { 294 if (ret < 0) { 295 FD_ZERO(&rfds); 296 FD_ZERO(&wfds); 297 FD_ZERO(&xfds); 298 } 299 slirp_select_poll(&rfds, &wfds, &xfds); 300 } 301 charpipe_poll(); 302 303 qemu_clock_run_all_timers(); 304 305 qemu_run_alarm_timer(); 306 307 /* Check bottom-halves last in case any of the earlier events triggered 308 them. */ 309 qemu_bh_poll(); 310 311 } 312 313 void main_loop(void) 314 { 315 int r; 316 317 #ifdef CONFIG_HAX 318 if (hax_enabled()) 319 hax_sync_vcpus(); 320 #endif 321 322 for (;;) { 323 do { 324 #ifdef CONFIG_PROFILER 325 int64_t ti; 326 #endif 327 tcg_cpu_exec(); 328 #ifdef CONFIG_PROFILER 329 ti = profile_getclock(); 330 #endif 331 main_loop_wait(qemu_calculate_timeout()); 332 #ifdef CONFIG_PROFILER 333 dev_time += profile_getclock() - ti; 334 #endif 335 336 qemu_log_rotation_poll(); 337 338 } while (vm_can_run()); 339 340 if (qemu_debug_requested()) 341 vm_stop(EXCP_DEBUG); 342 if (qemu_shutdown_requested()) { 343 if (no_shutdown) { 344 vm_stop(0); 345 no_shutdown = 0; 346 } else { 347 if (savevm_on_exit != NULL) { 348 /* Prior to saving VM to the snapshot file, save HW config 349 * settings for that VM, so we can match them when VM gets 350 * loaded from the snapshot. */ 351 snaphost_save_config(savevm_on_exit); 352 do_savevm(cur_mon, savevm_on_exit); 353 } 354 break; 355 } 356 } 357 if (qemu_reset_requested()) { 358 pause_all_vcpus(); 359 qemu_system_reset(); 360 resume_all_vcpus(); 361 } 362 if (qemu_powerdown_requested()) 363 qemu_system_powerdown(); 364 if ((r = qemu_vmstop_requested())) 365 vm_stop(r); 366 } 367 pause_all_vcpus(); 368 } 369 370 // TODO(digit): Re-enable icount handling int he future. 371 void configure_icount(const char* opts) { 372 } 373 374 struct qemu_alarm_timer { 375 char const *name; 376 int (*start)(struct qemu_alarm_timer *t); 377 void (*stop)(struct qemu_alarm_timer *t); 378 void (*rearm)(struct qemu_alarm_timer *t); 379 #if defined(__linux__) 380 int fd; 381 timer_t timer; 382 #elif defined(_WIN32) 383 HANDLE timer; 384 #endif 385 char expired; 386 }; 387 388 static struct qemu_alarm_timer *alarm_timer; 389 390 static inline int alarm_has_dynticks(struct qemu_alarm_timer *t) 391 { 392 return t->rearm != NULL; 393 } 394 395 static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t) 396 { 397 if (t->rearm) { 398 t->rearm(t); 399 } 400 } 401 402 static void qemu_run_alarm_timer(void) { 403 /* rearm timer, if not periodic */ 404 if (alarm_timer->expired) { 405 alarm_timer->expired = 0; 406 qemu_rearm_alarm_timer(alarm_timer); 407 } 408 } 409 410 /* TODO: MIN_TIMER_REARM_NS should be optimized */ 411 #define MIN_TIMER_REARM_NS 250000 412 413 #ifdef _WIN32 414 415 static int mm_start_timer(struct qemu_alarm_timer *t); 416 static void mm_stop_timer(struct qemu_alarm_timer *t); 417 static void mm_rearm_timer(struct qemu_alarm_timer *t); 418 419 static int win32_start_timer(struct qemu_alarm_timer *t); 420 static void win32_stop_timer(struct qemu_alarm_timer *t); 421 static void win32_rearm_timer(struct qemu_alarm_timer *t); 422 423 #else 424 425 static int unix_start_timer(struct qemu_alarm_timer *t); 426 static void unix_stop_timer(struct qemu_alarm_timer *t); 427 428 #ifdef __linux__ 429 430 static int dynticks_start_timer(struct qemu_alarm_timer *t); 431 static void dynticks_stop_timer(struct qemu_alarm_timer *t); 432 static void dynticks_rearm_timer(struct qemu_alarm_timer *t); 433 434 #endif /* __linux__ */ 435 436 #endif /* _WIN32 */ 437 438 int64_t qemu_icount_round(int64_t count) 439 { 440 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift; 441 } 442 443 static struct qemu_alarm_timer alarm_timers[] = { 444 #ifndef _WIN32 445 {"unix", unix_start_timer, unix_stop_timer, NULL}, 446 #ifdef __linux__ 447 /* on Linux, the 'dynticks' clock sometimes doesn't work 448 * properly. this results in the UI freezing while emulation 449 * continues, for several seconds... So move it to the end 450 * of the list. */ 451 {"dynticks", dynticks_start_timer, 452 dynticks_stop_timer, dynticks_rearm_timer}, 453 #endif 454 #else 455 {"mmtimer", mm_start_timer, mm_stop_timer, NULL}, 456 {"mmtimer2", mm_start_timer, mm_stop_timer, mm_rearm_timer}, 457 {"dynticks", win32_start_timer, win32_stop_timer, win32_rearm_timer}, 458 {"win32", win32_start_timer, win32_stop_timer, NULL}, 459 #endif 460 {NULL, } 461 }; 462 463 static void show_available_alarms(void) 464 { 465 int i; 466 467 printf("Available alarm timers, in order of precedence:\n"); 468 for (i = 0; alarm_timers[i].name; i++) 469 printf("%s\n", alarm_timers[i].name); 470 } 471 472 void configure_alarms(char const *opt) 473 { 474 int i; 475 int cur = 0; 476 int count = ARRAY_SIZE(alarm_timers) - 1; 477 char *arg; 478 char *name; 479 struct qemu_alarm_timer tmp; 480 481 if (!strcmp(opt, "?")) { 482 show_available_alarms(); 483 exit(0); 484 } 485 486 arg = g_strdup(opt); 487 488 /* Reorder the array */ 489 name = strtok(arg, ","); 490 while (name) { 491 for (i = 0; i < count && alarm_timers[i].name; i++) { 492 if (!strcmp(alarm_timers[i].name, name)) 493 break; 494 } 495 496 if (i == count) { 497 fprintf(stderr, "Unknown clock %s\n", name); 498 goto next; 499 } 500 501 if (i < cur) 502 /* Ignore */ 503 goto next; 504 505 /* Swap */ 506 tmp = alarm_timers[i]; 507 alarm_timers[i] = alarm_timers[cur]; 508 alarm_timers[cur] = tmp; 509 510 cur++; 511 next: 512 name = strtok(NULL, ","); 513 } 514 515 g_free(arg); 516 517 if (cur) { 518 /* Disable remaining timers */ 519 for (i = cur; i < count; i++) 520 alarm_timers[i].name = NULL; 521 } else { 522 show_available_alarms(); 523 exit(1); 524 } 525 } 526 527 // This variable is used to notify the qemu_timer_alarm_pending() caller 528 // (really tcg_cpu_exec()) that an alarm has expired. It is set in the 529 // timer callback, which can be a signal handler on non-Windows platforms. 530 static volatile sig_atomic_t timer_alarm_pending = 1; 531 532 int qemu_timer_alarm_pending(void) 533 { 534 int ret = timer_alarm_pending; 535 timer_alarm_pending = 0; 536 return ret; 537 } 538 539 // Compute the next alarm deadline, return a timeout in nanoseconds. 540 // NOTE: This function cannot be called from a signal handler since 541 // it calls qemu-timer.c functions that acquire/release global mutexes. 542 static int64_t qemu_next_alarm_deadline(void) 543 { 544 int64_t delta = INT32_MAX; 545 if (!use_icount) { 546 delta = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); 547 } 548 int64_t hdelta = qemu_clock_deadline_ns_all(QEMU_CLOCK_HOST); 549 if (hdelta < delta) { 550 delta = hdelta; 551 } 552 int64_t rtdelta = qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME); 553 if (rtdelta < delta) { 554 delta = rtdelta; 555 } 556 return delta; 557 } 558 559 #ifdef _WIN32 560 static void CALLBACK host_alarm_handler(PVOID lpParam, BOOLEAN unused) 561 #else 562 static void host_alarm_handler(int host_signum) 563 #endif 564 { 565 struct qemu_alarm_timer *t = alarm_timer; 566 if (!t) 567 return; 568 569 // It's not possible to call qemu_next_alarm_deadline() to know 570 // if a timer has really expired, in the case of non-dynamic alarms, 571 // so just signal and let the main loop thread do the checks instead. 572 timer_alarm_pending = 1; 573 574 // Ensure a dynamic alarm will be properly rescheduled. 575 if (alarm_has_dynticks(t)) 576 t->expired = 1; 577 578 // This forces a cpu_exit() call that will end the current CPU 579 // execution ASAP. 580 qemu_notify_event(); 581 } 582 583 #if defined(__linux__) 584 585 static int dynticks_start_timer(struct qemu_alarm_timer *t) 586 { 587 struct sigevent ev; 588 timer_t host_timer; 589 struct sigaction act; 590 591 sigfillset(&act.sa_mask); 592 act.sa_flags = 0; 593 act.sa_handler = host_alarm_handler; 594 595 sigaction(SIGALRM, &act, NULL); 596 597 /* 598 * Initialize ev struct to 0 to avoid valgrind complaining 599 * about uninitialized data in timer_create call 600 */ 601 memset(&ev, 0, sizeof(ev)); 602 ev.sigev_value.sival_int = 0; 603 ev.sigev_notify = SIGEV_SIGNAL; 604 ev.sigev_signo = SIGALRM; 605 606 if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) { 607 perror("timer_create"); 608 609 /* disable dynticks */ 610 fprintf(stderr, "Dynamic Ticks disabled\n"); 611 612 return -1; 613 } 614 615 t->timer = host_timer; 616 617 return 0; 618 } 619 620 static void dynticks_stop_timer(struct qemu_alarm_timer *t) 621 { 622 timer_t host_timer = t->timer; 623 624 timer_delete(host_timer); 625 } 626 627 static void dynticks_rearm_timer(struct qemu_alarm_timer *t) 628 { 629 timer_t host_timer = t->timer; 630 struct itimerspec timeout; 631 int64_t nearest_delta_ns = INT64_MAX; 632 int64_t current_ns; 633 634 assert(alarm_has_dynticks(t)); 635 if (!qemu_clock_has_timers(QEMU_CLOCK_REALTIME) && 636 !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL) && 637 !qemu_clock_has_timers(QEMU_CLOCK_HOST)) 638 return; 639 640 nearest_delta_ns = qemu_next_alarm_deadline(); 641 if (nearest_delta_ns < MIN_TIMER_REARM_NS) 642 nearest_delta_ns = MIN_TIMER_REARM_NS; 643 644 /* check whether a timer is already running */ 645 if (timer_gettime(host_timer, &timeout)) { 646 perror("gettime"); 647 fprintf(stderr, "Internal timer error: aborting\n"); 648 exit(1); 649 } 650 current_ns = timeout.it_value.tv_sec * 1000000000LL + timeout.it_value.tv_nsec; 651 if (current_ns && current_ns <= nearest_delta_ns) 652 return; 653 654 timeout.it_interval.tv_sec = 0; 655 timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */ 656 timeout.it_value.tv_sec = nearest_delta_ns / 1000000000; 657 timeout.it_value.tv_nsec = nearest_delta_ns % 1000000000; 658 if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) { 659 perror("settime"); 660 fprintf(stderr, "Internal timer error: aborting\n"); 661 exit(1); 662 } 663 } 664 665 #endif /* defined(__linux__) */ 666 667 #if !defined(_WIN32) 668 669 static int unix_start_timer(struct qemu_alarm_timer *t) 670 { 671 struct sigaction act; 672 struct itimerval itv; 673 int err; 674 675 /* timer signal */ 676 sigfillset(&act.sa_mask); 677 act.sa_flags = 0; 678 act.sa_handler = host_alarm_handler; 679 680 sigaction(SIGALRM, &act, NULL); 681 682 itv.it_interval.tv_sec = 0; 683 /* for i386 kernel 2.6 to get 1 ms */ 684 itv.it_interval.tv_usec = 999; 685 itv.it_value.tv_sec = 0; 686 itv.it_value.tv_usec = 10 * 1000; 687 688 err = setitimer(ITIMER_REAL, &itv, NULL); 689 if (err) 690 return -1; 691 692 return 0; 693 } 694 695 static void unix_stop_timer(struct qemu_alarm_timer *t) 696 { 697 struct itimerval itv; 698 699 memset(&itv, 0, sizeof(itv)); 700 setitimer(ITIMER_REAL, &itv, NULL); 701 } 702 703 #endif /* !defined(_WIN32) */ 704 705 706 #ifdef _WIN32 707 708 static MMRESULT mm_timer; 709 static unsigned mm_period; 710 711 static void CALLBACK mm_alarm_handler(UINT uTimerID, UINT uMsg, 712 DWORD_PTR dwUser, DWORD_PTR dw1, 713 DWORD_PTR dw2) 714 { 715 struct qemu_alarm_timer *t = alarm_timer; 716 if (!t) { 717 return; 718 } 719 // We can actually call qemu_next_alarm_deadline() here since this 720 // doesn't run in a signal handler, but a different thread. 721 if (alarm_has_dynticks(t) || qemu_next_alarm_deadline() <= 0) { 722 t->expired = 1; 723 timer_alarm_pending = 1; 724 qemu_notify_event(); 725 } 726 } 727 728 static int mm_start_timer(struct qemu_alarm_timer *t) 729 { 730 TIMECAPS tc; 731 UINT flags; 732 733 memset(&tc, 0, sizeof(tc)); 734 timeGetDevCaps(&tc, sizeof(tc)); 735 736 mm_period = tc.wPeriodMin; 737 timeBeginPeriod(mm_period); 738 739 flags = TIME_CALLBACK_FUNCTION; 740 if (alarm_has_dynticks(t)) { 741 flags |= TIME_ONESHOT; 742 } else { 743 flags |= TIME_PERIODIC; 744 } 745 746 mm_timer = timeSetEvent(1, /* interval (ms) */ 747 mm_period, /* resolution */ 748 mm_alarm_handler, /* function */ 749 (DWORD_PTR)t, /* parameter */ 750 flags); 751 752 if (!mm_timer) { 753 fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n", 754 GetLastError()); 755 timeEndPeriod(mm_period); 756 return -1; 757 } 758 759 return 0; 760 } 761 762 static void mm_stop_timer(struct qemu_alarm_timer *t) 763 { 764 timeKillEvent(mm_timer); 765 timeEndPeriod(mm_period); 766 } 767 768 static void mm_rearm_timer(struct qemu_alarm_timer *t) 769 { 770 int nearest_delta_ms; 771 772 assert(alarm_has_dynticks(t)); 773 if (!qemu_clock_has_timers(QEMU_CLOCK_REALTIME) && 774 !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL) && 775 !qemu_clock_has_timers(QEMU_CLOCK_HOST)) { 776 return; 777 } 778 779 timeKillEvent(mm_timer); 780 781 nearest_delta_ms = (qemu_next_alarm_deadline() + 999999) / 1000000; 782 if (nearest_delta_ms < 1) { 783 nearest_delta_ms = 1; 784 } 785 mm_timer = timeSetEvent(nearest_delta_ms, 786 mm_period, 787 mm_alarm_handler, 788 (DWORD_PTR)t, 789 TIME_ONESHOT | TIME_CALLBACK_FUNCTION); 790 791 if (!mm_timer) { 792 fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n", 793 GetLastError()); 794 795 timeEndPeriod(mm_period); 796 exit(1); 797 } 798 } 799 800 static int win32_start_timer(struct qemu_alarm_timer *t) 801 { 802 HANDLE hTimer; 803 BOOLEAN success; 804 805 /* If you call ChangeTimerQueueTimer on a one-shot timer (its period 806 is zero) that has already expired, the timer is not updated. Since 807 creating a new timer is relatively expensive, set a bogus one-hour 808 interval in the dynticks case. */ 809 success = CreateTimerQueueTimer(&hTimer, 810 NULL, 811 host_alarm_handler, 812 t, 813 1, 814 alarm_has_dynticks(t) ? 3600000 : 1, 815 WT_EXECUTEINTIMERTHREAD); 816 817 if (!success) { 818 fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n", 819 GetLastError()); 820 return -1; 821 } 822 823 t->timer = hTimer; 824 return 0; 825 } 826 827 static void win32_stop_timer(struct qemu_alarm_timer *t) 828 { 829 HANDLE hTimer = t->timer; 830 831 if (hTimer) { 832 DeleteTimerQueueTimer(NULL, hTimer, NULL); 833 } 834 } 835 836 static void win32_rearm_timer(struct qemu_alarm_timer *t) 837 { 838 HANDLE hTimer = t->timer; 839 int nearest_delta_ms; 840 BOOLEAN success; 841 842 assert(alarm_has_dynticks(t)); 843 if (!qemu_clock_has_timers(QEMU_CLOCK_REALTIME) && 844 !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL) && 845 !qemu_clock_has_timers(QEMU_CLOCK_HOST)) { 846 return; 847 } 848 nearest_delta_ms = (qemu_next_alarm_deadline() + 999999) / 1000000; 849 if (nearest_delta_ms < 1) { 850 nearest_delta_ms = 1; 851 } 852 success = ChangeTimerQueueTimer(NULL, 853 hTimer, 854 nearest_delta_ms, 855 3600000); 856 857 if (!success) { 858 fprintf(stderr, "Failed to rearm win32 alarm timer: %ld\n", 859 GetLastError()); 860 exit(-1); 861 } 862 } 863 864 #endif /* _WIN32 */ 865 866 static void alarm_timer_on_change_state_rearm(void *opaque, 867 int running, 868 int reason) 869 { 870 if (running) 871 qemu_rearm_alarm_timer((struct qemu_alarm_timer *) opaque); 872 } 873 874 int init_timer_alarm(void) 875 { 876 struct qemu_alarm_timer *t = NULL; 877 int i, err = -1; 878 879 for (i = 0; alarm_timers[i].name; i++) { 880 t = &alarm_timers[i]; 881 882 err = t->start(t); 883 if (!err) 884 break; 885 } 886 887 if (err) { 888 err = -ENOENT; 889 goto fail; 890 } 891 892 /* first event is at time 0 */ 893 alarm_timer = t; 894 timer_alarm_pending = 1; 895 qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t); 896 897 return 0; 898 899 fail: 900 return err; 901 } 902 903 void quit_timers(void) 904 { 905 struct qemu_alarm_timer *t = alarm_timer; 906 alarm_timer = NULL; 907 t->stop(t); 908 } 909 910 int qemu_calculate_timeout(void) 911 { 912 int timeout; 913 914 if (!vm_running) 915 timeout = 5000; 916 else if (tcg_has_work()) 917 timeout = 0; 918 else { 919 #ifdef WIN32 920 /* This corresponds to the case where the emulated system is 921 * totally idle and waiting for i/o. The problem is that on 922 * Windows, the default value will prevent Windows user events 923 * to be delivered in less than 5 seconds. 924 * 925 * Upstream contains a different way to handle this, for now 926 * this hack should be sufficient until we integrate it into 927 * our tree. 928 */ 929 timeout = 1000/15; /* deliver user events every 15/th of second */ 930 #else 931 timeout = 5000; 932 #endif 933 int64_t timeout_ns = (int64_t)timeout * 1000000LL; 934 timeout_ns = qemu_soonest_timeout( 935 timeout_ns, timerlistgroup_deadline_ns(&main_loop_tlg)); 936 timeout = (int)((timeout_ns + 999999LL) / 1000000LL); 937 } 938 939 return timeout; 940 } 941