1 /* 2 * defines common to all virtual CPUs 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA 19 */ 20 #ifndef CPU_ALL_H 21 #define CPU_ALL_H 22 23 #include "qemu-common.h" 24 #include "cpu-common.h" 25 26 /* some important defines: 27 * 28 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned 29 * memory accesses. 30 * 31 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and 32 * otherwise little endian. 33 * 34 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) 35 * 36 * TARGET_WORDS_BIGENDIAN : same for target cpu 37 */ 38 39 #include "softfloat.h" 40 41 #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 42 #define BSWAP_NEEDED 43 #endif 44 45 #ifdef BSWAP_NEEDED 46 47 static inline uint16_t tswap16(uint16_t s) 48 { 49 return bswap16(s); 50 } 51 52 static inline uint32_t tswap32(uint32_t s) 53 { 54 return bswap32(s); 55 } 56 57 static inline uint64_t tswap64(uint64_t s) 58 { 59 return bswap64(s); 60 } 61 62 static inline void tswap16s(uint16_t *s) 63 { 64 *s = bswap16(*s); 65 } 66 67 static inline void tswap32s(uint32_t *s) 68 { 69 *s = bswap32(*s); 70 } 71 72 static inline void tswap64s(uint64_t *s) 73 { 74 *s = bswap64(*s); 75 } 76 77 #else 78 79 static inline uint16_t tswap16(uint16_t s) 80 { 81 return s; 82 } 83 84 static inline uint32_t tswap32(uint32_t s) 85 { 86 return s; 87 } 88 89 static inline uint64_t tswap64(uint64_t s) 90 { 91 return s; 92 } 93 94 static inline void tswap16s(uint16_t *s) 95 { 96 } 97 98 static inline void tswap32s(uint32_t *s) 99 { 100 } 101 102 static inline void tswap64s(uint64_t *s) 103 { 104 } 105 106 #endif 107 108 #if TARGET_LONG_SIZE == 4 109 #define tswapl(s) tswap32(s) 110 #define tswapls(s) tswap32s((uint32_t *)(s)) 111 #define bswaptls(s) bswap32s(s) 112 #else 113 #define tswapl(s) tswap64(s) 114 #define tswapls(s) tswap64s((uint64_t *)(s)) 115 #define bswaptls(s) bswap64s(s) 116 #endif 117 118 typedef union { 119 float32 f; 120 uint32_t l; 121 } CPU_FloatU; 122 123 /* NOTE: arm FPA is horrible as double 32 bit words are stored in big 124 endian ! */ 125 typedef union { 126 float64 d; 127 #if defined(WORDS_BIGENDIAN) \ 128 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) 129 struct { 130 uint32_t upper; 131 uint32_t lower; 132 } l; 133 #else 134 struct { 135 uint32_t lower; 136 uint32_t upper; 137 } l; 138 #endif 139 uint64_t ll; 140 } CPU_DoubleU; 141 142 #ifdef TARGET_SPARC 143 typedef union { 144 float128 q; 145 #if defined(WORDS_BIGENDIAN) \ 146 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) 147 struct { 148 uint32_t upmost; 149 uint32_t upper; 150 uint32_t lower; 151 uint32_t lowest; 152 } l; 153 struct { 154 uint64_t upper; 155 uint64_t lower; 156 } ll; 157 #else 158 struct { 159 uint32_t lowest; 160 uint32_t lower; 161 uint32_t upper; 162 uint32_t upmost; 163 } l; 164 struct { 165 uint64_t lower; 166 uint64_t upper; 167 } ll; 168 #endif 169 } CPU_QuadU; 170 #endif 171 172 /* CPU memory access without any memory or io remapping */ 173 174 /* 175 * the generic syntax for the memory accesses is: 176 * 177 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr) 178 * 179 * store: st{type}{size}{endian}_{access_type}(ptr, val) 180 * 181 * type is: 182 * (empty): integer access 183 * f : float access 184 * 185 * sign is: 186 * (empty): for floats or 32 bit size 187 * u : unsigned 188 * s : signed 189 * 190 * size is: 191 * b: 8 bits 192 * w: 16 bits 193 * l: 32 bits 194 * q: 64 bits 195 * 196 * endian is: 197 * (empty): target cpu endianness or 8 bit access 198 * r : reversed target cpu endianness (not implemented yet) 199 * be : big endian (not implemented yet) 200 * le : little endian (not implemented yet) 201 * 202 * access_type is: 203 * raw : host memory access 204 * user : user mode access using soft MMU 205 * kernel : kernel mode access using soft MMU 206 */ 207 static inline int ldub_p(const void *ptr) 208 { 209 return *(uint8_t *)ptr; 210 } 211 212 static inline int ldsb_p(const void *ptr) 213 { 214 return *(int8_t *)ptr; 215 } 216 217 static inline void stb_p(void *ptr, int v) 218 { 219 *(uint8_t *)ptr = v; 220 } 221 222 /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the 223 kernel handles unaligned load/stores may give better results, but 224 it is a system wide setting : bad */ 225 #if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) 226 227 /* conservative code for little endian unaligned accesses */ 228 static inline int lduw_le_p(const void *ptr) 229 { 230 #ifdef _ARCH_PPC 231 int val; 232 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); 233 return val; 234 #else 235 const uint8_t *p = ptr; 236 return p[0] | (p[1] << 8); 237 #endif 238 } 239 240 static inline int ldsw_le_p(const void *ptr) 241 { 242 #ifdef _ARCH_PPC 243 int val; 244 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr)); 245 return (int16_t)val; 246 #else 247 const uint8_t *p = ptr; 248 return (int16_t)(p[0] | (p[1] << 8)); 249 #endif 250 } 251 252 static inline int ldl_le_p(const void *ptr) 253 { 254 #ifdef _ARCH_PPC 255 int val; 256 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr)); 257 return val; 258 #else 259 const uint8_t *p = ptr; 260 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); 261 #endif 262 } 263 264 static inline uint64_t ldq_le_p(const void *ptr) 265 { 266 const uint8_t *p = ptr; 267 uint32_t v1, v2; 268 v1 = ldl_le_p(p); 269 v2 = ldl_le_p(p + 4); 270 return v1 | ((uint64_t)v2 << 32); 271 } 272 273 static inline void stw_le_p(void *ptr, int v) 274 { 275 #ifdef _ARCH_PPC 276 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); 277 #else 278 uint8_t *p = ptr; 279 p[0] = v; 280 p[1] = v >> 8; 281 #endif 282 } 283 284 static inline void stl_le_p(void *ptr, int v) 285 { 286 #ifdef _ARCH_PPC 287 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); 288 #else 289 uint8_t *p = ptr; 290 p[0] = v; 291 p[1] = v >> 8; 292 p[2] = v >> 16; 293 p[3] = v >> 24; 294 #endif 295 } 296 297 static inline void stq_le_p(void *ptr, uint64_t v) 298 { 299 uint8_t *p = ptr; 300 stl_le_p(p, (uint32_t)v); 301 stl_le_p(p + 4, v >> 32); 302 } 303 304 /* float access */ 305 306 static inline float32 ldfl_le_p(const void *ptr) 307 { 308 union { 309 float32 f; 310 uint32_t i; 311 } u; 312 u.i = ldl_le_p(ptr); 313 return u.f; 314 } 315 316 static inline void stfl_le_p(void *ptr, float32 v) 317 { 318 union { 319 float32 f; 320 uint32_t i; 321 } u; 322 u.f = v; 323 stl_le_p(ptr, u.i); 324 } 325 326 static inline float64 ldfq_le_p(const void *ptr) 327 { 328 CPU_DoubleU u; 329 u.l.lower = ldl_le_p(ptr); 330 u.l.upper = ldl_le_p(ptr + 4); 331 return u.d; 332 } 333 334 static inline void stfq_le_p(void *ptr, float64 v) 335 { 336 CPU_DoubleU u; 337 u.d = v; 338 stl_le_p(ptr, u.l.lower); 339 stl_le_p(ptr + 4, u.l.upper); 340 } 341 342 #else 343 344 static inline int lduw_le_p(const void *ptr) 345 { 346 return *(uint16_t *)ptr; 347 } 348 349 static inline int ldsw_le_p(const void *ptr) 350 { 351 return *(int16_t *)ptr; 352 } 353 354 static inline int ldl_le_p(const void *ptr) 355 { 356 return *(uint32_t *)ptr; 357 } 358 359 static inline uint64_t ldq_le_p(const void *ptr) 360 { 361 return *(uint64_t *)ptr; 362 } 363 364 static inline void stw_le_p(void *ptr, int v) 365 { 366 *(uint16_t *)ptr = v; 367 } 368 369 static inline void stl_le_p(void *ptr, int v) 370 { 371 *(uint32_t *)ptr = v; 372 } 373 374 static inline void stq_le_p(void *ptr, uint64_t v) 375 { 376 *(uint64_t *)ptr = v; 377 } 378 379 /* float access */ 380 381 static inline float32 ldfl_le_p(const void *ptr) 382 { 383 return *(float32 *)ptr; 384 } 385 386 static inline float64 ldfq_le_p(const void *ptr) 387 { 388 return *(float64 *)ptr; 389 } 390 391 static inline void stfl_le_p(void *ptr, float32 v) 392 { 393 *(float32 *)ptr = v; 394 } 395 396 static inline void stfq_le_p(void *ptr, float64 v) 397 { 398 *(float64 *)ptr = v; 399 } 400 #endif 401 402 #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) 403 404 static inline int lduw_be_p(const void *ptr) 405 { 406 #if defined(__i386__) 407 int val; 408 asm volatile ("movzwl %1, %0\n" 409 "xchgb %b0, %h0\n" 410 : "=q" (val) 411 : "m" (*(uint16_t *)ptr)); 412 return val; 413 #else 414 const uint8_t *b = ptr; 415 return ((b[0] << 8) | b[1]); 416 #endif 417 } 418 419 static inline int ldsw_be_p(const void *ptr) 420 { 421 #if defined(__i386__) 422 int val; 423 asm volatile ("movzwl %1, %0\n" 424 "xchgb %b0, %h0\n" 425 : "=q" (val) 426 : "m" (*(uint16_t *)ptr)); 427 return (int16_t)val; 428 #else 429 const uint8_t *b = ptr; 430 return (int16_t)((b[0] << 8) | b[1]); 431 #endif 432 } 433 434 static inline int ldl_be_p(const void *ptr) 435 { 436 #if defined(__i386__) || defined(__x86_64__) 437 int val; 438 asm volatile ("movl %1, %0\n" 439 "bswap %0\n" 440 : "=r" (val) 441 : "m" (*(uint32_t *)ptr)); 442 return val; 443 #else 444 const uint8_t *b = ptr; 445 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]; 446 #endif 447 } 448 449 static inline uint64_t ldq_be_p(const void *ptr) 450 { 451 uint32_t a,b; 452 a = ldl_be_p(ptr); 453 b = ldl_be_p((uint8_t *)ptr + 4); 454 return (((uint64_t)a<<32)|b); 455 } 456 457 static inline void stw_be_p(void *ptr, int v) 458 { 459 #if defined(__i386__) 460 asm volatile ("xchgb %b0, %h0\n" 461 "movw %w0, %1\n" 462 : "=q" (v) 463 : "m" (*(uint16_t *)ptr), "0" (v)); 464 #else 465 uint8_t *d = (uint8_t *) ptr; 466 d[0] = v >> 8; 467 d[1] = v; 468 #endif 469 } 470 471 static inline void stl_be_p(void *ptr, int v) 472 { 473 #if defined(__i386__) || defined(__x86_64__) 474 asm volatile ("bswap %0\n" 475 "movl %0, %1\n" 476 : "=r" (v) 477 : "m" (*(uint32_t *)ptr), "0" (v)); 478 #else 479 uint8_t *d = (uint8_t *) ptr; 480 d[0] = v >> 24; 481 d[1] = v >> 16; 482 d[2] = v >> 8; 483 d[3] = v; 484 #endif 485 } 486 487 static inline void stq_be_p(void *ptr, uint64_t v) 488 { 489 stl_be_p(ptr, v >> 32); 490 stl_be_p((uint8_t *)ptr + 4, v); 491 } 492 493 /* float access */ 494 495 static inline float32 ldfl_be_p(const void *ptr) 496 { 497 union { 498 float32 f; 499 uint32_t i; 500 } u; 501 u.i = ldl_be_p(ptr); 502 return u.f; 503 } 504 505 static inline void stfl_be_p(void *ptr, float32 v) 506 { 507 union { 508 float32 f; 509 uint32_t i; 510 } u; 511 u.f = v; 512 stl_be_p(ptr, u.i); 513 } 514 515 static inline float64 ldfq_be_p(const void *ptr) 516 { 517 CPU_DoubleU u; 518 u.l.upper = ldl_be_p(ptr); 519 u.l.lower = ldl_be_p((uint8_t *)ptr + 4); 520 return u.d; 521 } 522 523 static inline void stfq_be_p(void *ptr, float64 v) 524 { 525 CPU_DoubleU u; 526 u.d = v; 527 stl_be_p(ptr, u.l.upper); 528 stl_be_p((uint8_t *)ptr + 4, u.l.lower); 529 } 530 531 #else 532 533 static inline int lduw_be_p(const void *ptr) 534 { 535 return *(uint16_t *)ptr; 536 } 537 538 static inline int ldsw_be_p(const void *ptr) 539 { 540 return *(int16_t *)ptr; 541 } 542 543 static inline int ldl_be_p(const void *ptr) 544 { 545 return *(uint32_t *)ptr; 546 } 547 548 static inline uint64_t ldq_be_p(const void *ptr) 549 { 550 return *(uint64_t *)ptr; 551 } 552 553 static inline void stw_be_p(void *ptr, int v) 554 { 555 *(uint16_t *)ptr = v; 556 } 557 558 static inline void stl_be_p(void *ptr, int v) 559 { 560 *(uint32_t *)ptr = v; 561 } 562 563 static inline void stq_be_p(void *ptr, uint64_t v) 564 { 565 *(uint64_t *)ptr = v; 566 } 567 568 /* float access */ 569 570 static inline float32 ldfl_be_p(const void *ptr) 571 { 572 return *(float32 *)ptr; 573 } 574 575 static inline float64 ldfq_be_p(const void *ptr) 576 { 577 return *(float64 *)ptr; 578 } 579 580 static inline void stfl_be_p(void *ptr, float32 v) 581 { 582 *(float32 *)ptr = v; 583 } 584 585 static inline void stfq_be_p(void *ptr, float64 v) 586 { 587 *(float64 *)ptr = v; 588 } 589 590 #endif 591 592 /* target CPU memory access functions */ 593 #if defined(TARGET_WORDS_BIGENDIAN) 594 #define lduw_p(p) lduw_be_p(p) 595 #define ldsw_p(p) ldsw_be_p(p) 596 #define ldl_p(p) ldl_be_p(p) 597 #define ldq_p(p) ldq_be_p(p) 598 #define ldfl_p(p) ldfl_be_p(p) 599 #define ldfq_p(p) ldfq_be_p(p) 600 #define stw_p(p, v) stw_be_p(p, v) 601 #define stl_p(p, v) stl_be_p(p, v) 602 #define stq_p(p, v) stq_be_p(p, v) 603 #define stfl_p(p, v) stfl_be_p(p, v) 604 #define stfq_p(p, v) stfq_be_p(p, v) 605 #else 606 #define lduw_p(p) lduw_le_p(p) 607 #define ldsw_p(p) ldsw_le_p(p) 608 #define ldl_p(p) ldl_le_p(p) 609 #define ldq_p(p) ldq_le_p(p) 610 #define ldfl_p(p) ldfl_le_p(p) 611 #define ldfq_p(p) ldfq_le_p(p) 612 #define stw_p(p, v) stw_le_p(p, v) 613 #define stl_p(p, v) stl_le_p(p, v) 614 #define stq_p(p, v) stq_le_p(p, v) 615 #define stfl_p(p, v) stfl_le_p(p, v) 616 #define stfq_p(p, v) stfq_le_p(p, v) 617 #endif 618 619 /* MMU memory access macros */ 620 621 #if defined(CONFIG_USER_ONLY) 622 #include <assert.h> 623 #include "qemu-types.h" 624 625 /* On some host systems the guest address space is reserved on the host. 626 * This allows the guest address space to be offset to a convenient location. 627 */ 628 //#define GUEST_BASE 0x20000000 629 #define GUEST_BASE 0 630 631 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ 632 #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) 633 #define h2g(x) ({ \ 634 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ 635 /* Check if given address fits target address space */ \ 636 assert(__ret == (abi_ulong)__ret); \ 637 (abi_ulong)__ret; \ 638 }) 639 #define h2g_valid(x) ({ \ 640 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ 641 (__guest == (abi_ulong)__guest); \ 642 }) 643 644 #define saddr(x) g2h(x) 645 #define laddr(x) g2h(x) 646 647 #else /* !CONFIG_USER_ONLY */ 648 /* NOTE: we use double casts if pointers and target_ulong have 649 different sizes */ 650 #define saddr(x) (uint8_t *)(long)(x) 651 #define laddr(x) (uint8_t *)(long)(x) 652 #endif 653 654 #define ldub_raw(p) ldub_p(laddr((p))) 655 #define ldsb_raw(p) ldsb_p(laddr((p))) 656 #define lduw_raw(p) lduw_p(laddr((p))) 657 #define ldsw_raw(p) ldsw_p(laddr((p))) 658 #define ldl_raw(p) ldl_p(laddr((p))) 659 #define ldq_raw(p) ldq_p(laddr((p))) 660 #define ldfl_raw(p) ldfl_p(laddr((p))) 661 #define ldfq_raw(p) ldfq_p(laddr((p))) 662 #define stb_raw(p, v) stb_p(saddr((p)), v) 663 #define stw_raw(p, v) stw_p(saddr((p)), v) 664 #define stl_raw(p, v) stl_p(saddr((p)), v) 665 #define stq_raw(p, v) stq_p(saddr((p)), v) 666 #define stfl_raw(p, v) stfl_p(saddr((p)), v) 667 #define stfq_raw(p, v) stfq_p(saddr((p)), v) 668 669 670 #if defined(CONFIG_USER_ONLY) 671 672 /* if user mode, no other memory access functions */ 673 #define ldub(p) ldub_raw(p) 674 #define ldsb(p) ldsb_raw(p) 675 #define lduw(p) lduw_raw(p) 676 #define ldsw(p) ldsw_raw(p) 677 #define ldl(p) ldl_raw(p) 678 #define ldq(p) ldq_raw(p) 679 #define ldfl(p) ldfl_raw(p) 680 #define ldfq(p) ldfq_raw(p) 681 #define stb(p, v) stb_raw(p, v) 682 #define stw(p, v) stw_raw(p, v) 683 #define stl(p, v) stl_raw(p, v) 684 #define stq(p, v) stq_raw(p, v) 685 #define stfl(p, v) stfl_raw(p, v) 686 #define stfq(p, v) stfq_raw(p, v) 687 688 #define ldub_code(p) ldub_raw(p) 689 #define ldsb_code(p) ldsb_raw(p) 690 #define lduw_code(p) lduw_raw(p) 691 #define ldsw_code(p) ldsw_raw(p) 692 #define ldl_code(p) ldl_raw(p) 693 #define ldq_code(p) ldq_raw(p) 694 695 #define ldub_kernel(p) ldub_raw(p) 696 #define ldsb_kernel(p) ldsb_raw(p) 697 #define lduw_kernel(p) lduw_raw(p) 698 #define ldsw_kernel(p) ldsw_raw(p) 699 #define ldl_kernel(p) ldl_raw(p) 700 #define ldq_kernel(p) ldq_raw(p) 701 #define ldfl_kernel(p) ldfl_raw(p) 702 #define ldfq_kernel(p) ldfq_raw(p) 703 #define stb_kernel(p, v) stb_raw(p, v) 704 #define stw_kernel(p, v) stw_raw(p, v) 705 #define stl_kernel(p, v) stl_raw(p, v) 706 #define stq_kernel(p, v) stq_raw(p, v) 707 #define stfl_kernel(p, v) stfl_raw(p, v) 708 #define stfq_kernel(p, vt) stfq_raw(p, v) 709 710 #endif /* defined(CONFIG_USER_ONLY) */ 711 712 /* page related stuff */ 713 714 #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) 715 #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) 716 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) 717 718 /* ??? These should be the larger of unsigned long and target_ulong. */ 719 extern unsigned long qemu_real_host_page_size; 720 extern unsigned long qemu_host_page_bits; 721 extern unsigned long qemu_host_page_size; 722 extern unsigned long qemu_host_page_mask; 723 724 #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) 725 726 /* same as PROT_xxx */ 727 #define PAGE_READ 0x0001 728 #define PAGE_WRITE 0x0002 729 #define PAGE_EXEC 0x0004 730 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) 731 #define PAGE_VALID 0x0008 732 /* original state of the write flag (used when tracking self-modifying 733 code */ 734 #define PAGE_WRITE_ORG 0x0010 735 #define PAGE_RESERVED 0x0020 736 737 void page_dump(FILE *f); 738 int walk_memory_regions(void *, 739 int (*fn)(void *, unsigned long, unsigned long, unsigned long)); 740 int page_get_flags(target_ulong address); 741 void page_set_flags(target_ulong start, target_ulong end, int flags); 742 int page_check_range(target_ulong start, target_ulong len, int flags); 743 744 void cpu_exec_init_all(unsigned long tb_size); 745 CPUState *cpu_copy(CPUState *env); 746 CPUState *qemu_get_cpu(int cpu); 747 748 void cpu_dump_state(CPUState *env, FILE *f, 749 int (*cpu_fprintf)(FILE *f, const char *fmt, ...), 750 int flags); 751 void cpu_dump_statistics (CPUState *env, FILE *f, 752 int (*cpu_fprintf)(FILE *f, const char *fmt, ...), 753 int flags); 754 755 void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) 756 __attribute__ ((__format__ (__printf__, 2, 3))); 757 extern CPUState *first_cpu; 758 extern CPUState *cpu_single_env; 759 extern int64_t qemu_icount; 760 extern int use_icount; 761 762 #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ 763 #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ 764 #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ 765 #define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */ 766 #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ 767 #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ 768 #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ 769 #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ 770 #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ 771 772 void cpu_interrupt(CPUState *s, int mask); 773 void cpu_reset_interrupt(CPUState *env, int mask); 774 775 void cpu_exit(CPUState *s); 776 777 int qemu_cpu_has_work(CPUState *env); 778 779 /* Breakpoint/watchpoint flags */ 780 #define BP_MEM_READ 0x01 781 #define BP_MEM_WRITE 0x02 782 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) 783 #define BP_STOP_BEFORE_ACCESS 0x04 784 #define BP_WATCHPOINT_HIT 0x08 785 #define BP_GDB 0x10 786 #define BP_CPU 0x20 787 788 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, 789 CPUBreakpoint **breakpoint); 790 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags); 791 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint); 792 void cpu_breakpoint_remove_all(CPUState *env, int mask); 793 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, 794 int flags, CPUWatchpoint **watchpoint); 795 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, 796 target_ulong len, int flags); 797 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint); 798 void cpu_watchpoint_remove_all(CPUState *env, int mask); 799 800 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ 801 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ 802 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ 803 804 void cpu_single_step(CPUState *env, int enabled); 805 void cpu_reset(CPUState *s); 806 807 /* Return the physical page corresponding to a virtual one. Use it 808 only for debugging because no protection checks are done. Return -1 809 if no page found. */ 810 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); 811 812 #define CPU_LOG_TB_OUT_ASM (1 << 0) 813 #define CPU_LOG_TB_IN_ASM (1 << 1) 814 #define CPU_LOG_TB_OP (1 << 2) 815 #define CPU_LOG_TB_OP_OPT (1 << 3) 816 #define CPU_LOG_INT (1 << 4) 817 #define CPU_LOG_EXEC (1 << 5) 818 #define CPU_LOG_PCALL (1 << 6) 819 #define CPU_LOG_IOPORT (1 << 7) 820 #define CPU_LOG_TB_CPU (1 << 8) 821 #define CPU_LOG_RESET (1 << 9) 822 823 /* define log items */ 824 typedef struct CPULogItem { 825 int mask; 826 const char *name; 827 const char *help; 828 } CPULogItem; 829 830 extern const CPULogItem cpu_log_items[]; 831 832 void cpu_set_log(int log_flags); 833 void cpu_set_log_filename(const char *filename); 834 int cpu_str_to_log_mask(const char *str); 835 836 /* IO ports API */ 837 838 /* NOTE: as these functions may be even used when there is an isa 839 brige on non x86 targets, we always defined them */ 840 #ifndef NO_CPU_IO_DEFS 841 void cpu_outb(CPUState *env, int addr, int val); 842 void cpu_outw(CPUState *env, int addr, int val); 843 void cpu_outl(CPUState *env, int addr, int val); 844 int cpu_inb(CPUState *env, int addr); 845 int cpu_inw(CPUState *env, int addr); 846 int cpu_inl(CPUState *env, int addr); 847 #endif 848 849 /* memory API */ 850 851 extern int phys_ram_fd; 852 extern uint8_t *phys_ram_dirty; 853 extern ram_addr_t ram_size; 854 extern ram_addr_t last_ram_offset; 855 856 /* physical memory access */ 857 858 /* MMIO pages are identified by a combination of an IO device index and 859 3 flags. The ROMD code stores the page ram offset in iotlb entry, 860 so only a limited number of ids are avaiable. */ 861 862 #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) 863 864 /* Flags stored in the low bits of the TLB virtual address. These are 865 defined so that fast path ram access is all zeros. */ 866 /* Zero if TLB entry is valid. */ 867 #define TLB_INVALID_MASK (1 << 3) 868 /* Set if TLB entry references a clean RAM page. The iotlb entry will 869 contain the page physical address. */ 870 #define TLB_NOTDIRTY (1 << 4) 871 /* Set if TLB entry is an IO callback. */ 872 #define TLB_MMIO (1 << 5) 873 874 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 875 uint8_t *buf, int len, int is_write); 876 877 #define VGA_DIRTY_FLAG 0x01 878 #define CODE_DIRTY_FLAG 0x02 879 #define KQEMU_DIRTY_FLAG 0x04 880 #define MIGRATION_DIRTY_FLAG 0x08 881 882 /* read dirty bit (return 0 or 1) */ 883 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) 884 { 885 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 886 } 887 888 static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, 889 int dirty_flags) 890 { 891 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; 892 } 893 894 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) 895 { 896 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; 897 } 898 899 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, 900 int dirty_flags); 901 void cpu_tlb_update_dirty(CPUState *env); 902 903 int cpu_physical_memory_set_dirty_tracking(int enable); 904 905 int cpu_physical_memory_get_dirty_tracking(void); 906 907 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, 908 target_phys_addr_t end_addr); 909 910 void dump_exec_info(FILE *f, 911 int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); 912 913 /* Coalesced MMIO regions are areas where write operations can be reordered. 914 * This usually implies that write operations are side-effect free. This allows 915 * batching which can make a major impact on performance when using 916 * virtualization. 917 */ 918 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); 919 920 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); 921 922 /*******************************************/ 923 /* host CPU ticks (if available) */ 924 925 #if defined(_ARCH_PPC) 926 927 static inline int64_t cpu_get_real_ticks(void) 928 { 929 int64_t retval; 930 #ifdef _ARCH_PPC64 931 /* This reads timebase in one 64bit go and includes Cell workaround from: 932 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html 933 */ 934 __asm__ __volatile__ ( 935 "mftb %0\n\t" 936 "cmpwi %0,0\n\t" 937 "beq- $-8" 938 : "=r" (retval)); 939 #else 940 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ 941 unsigned long junk; 942 __asm__ __volatile__ ( 943 "mftbu %1\n\t" 944 "mftb %L0\n\t" 945 "mftbu %0\n\t" 946 "cmpw %0,%1\n\t" 947 "bne $-16" 948 : "=r" (retval), "=r" (junk)); 949 #endif 950 return retval; 951 } 952 953 #elif defined(__i386__) 954 955 static inline int64_t cpu_get_real_ticks(void) 956 { 957 int64_t val; 958 asm volatile ("rdtsc" : "=A" (val)); 959 return val; 960 } 961 962 #elif defined(__x86_64__) 963 964 static inline int64_t cpu_get_real_ticks(void) 965 { 966 uint32_t low,high; 967 int64_t val; 968 asm volatile("rdtsc" : "=a" (low), "=d" (high)); 969 val = high; 970 val <<= 32; 971 val |= low; 972 return val; 973 } 974 975 #elif defined(__hppa__) 976 977 static inline int64_t cpu_get_real_ticks(void) 978 { 979 int val; 980 asm volatile ("mfctl %%cr16, %0" : "=r"(val)); 981 return val; 982 } 983 984 #elif defined(__ia64) 985 986 static inline int64_t cpu_get_real_ticks(void) 987 { 988 int64_t val; 989 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); 990 return val; 991 } 992 993 #elif defined(__s390__) 994 995 static inline int64_t cpu_get_real_ticks(void) 996 { 997 int64_t val; 998 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); 999 return val; 1000 } 1001 1002 #elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__) 1003 1004 static inline int64_t cpu_get_real_ticks (void) 1005 { 1006 #if defined(_LP64) 1007 uint64_t rval; 1008 asm volatile("rd %%tick,%0" : "=r"(rval)); 1009 return rval; 1010 #else 1011 union { 1012 uint64_t i64; 1013 struct { 1014 uint32_t high; 1015 uint32_t low; 1016 } i32; 1017 } rval; 1018 asm volatile("rd %%tick,%1; srlx %1,32,%0" 1019 : "=r"(rval.i32.high), "=r"(rval.i32.low)); 1020 return rval.i64; 1021 #endif 1022 } 1023 1024 #elif defined(__mips__) 1025 1026 static inline int64_t cpu_get_real_ticks(void) 1027 { 1028 #if __mips_isa_rev >= 2 1029 uint32_t count; 1030 static uint32_t cyc_per_count = 0; 1031 1032 if (!cyc_per_count) 1033 __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count)); 1034 1035 __asm__ __volatile__("rdhwr %1, $2" : "=r" (count)); 1036 return (int64_t)(count * cyc_per_count); 1037 #else 1038 /* FIXME */ 1039 static int64_t ticks = 0; 1040 return ticks++; 1041 #endif 1042 } 1043 1044 #else 1045 /* The host CPU doesn't have an easily accessible cycle counter. 1046 Just return a monotonically increasing value. This will be 1047 totally wrong, but hopefully better than nothing. */ 1048 static inline int64_t cpu_get_real_ticks (void) 1049 { 1050 static int64_t ticks = 0; 1051 return ticks++; 1052 } 1053 #endif 1054 1055 /* profiling */ 1056 #ifdef CONFIG_PROFILER 1057 static inline int64_t profile_getclock(void) 1058 { 1059 return cpu_get_real_ticks(); 1060 } 1061 1062 extern int64_t kqemu_time, kqemu_time_start; 1063 extern int64_t qemu_time, qemu_time_start; 1064 extern int64_t tlb_flush_time; 1065 extern int64_t kqemu_exec_count; 1066 extern int64_t dev_time; 1067 extern int64_t kqemu_ret_int_count; 1068 extern int64_t kqemu_ret_excp_count; 1069 extern int64_t kqemu_ret_intr_count; 1070 #endif 1071 1072 #endif /* CPU_ALL_H */ 1073