Home | History | Annotate | Download | only in qemu
      1 /*
      2  * common defines for all CPUs
      3  *
      4  * Copyright (c) 2003 Fabrice Bellard
      5  *
      6  * This library is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU Lesser General Public
      8  * License as published by the Free Software Foundation; either
      9  * version 2 of the License, or (at your option) any later version.
     10  *
     11  * This library is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  * Lesser General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU Lesser General Public
     17  * License along with this library; if not, write to the Free Software
     18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     19  */
     20 #ifndef CPU_DEFS_H
     21 #define CPU_DEFS_H
     22 
     23 #if 0 /* ANDROID */
     24 #ifndef NEED_CPU_H
     25 #error cpu.h included from common code
     26 #endif
     27 #endif /* ANDROID */
     28 
     29 #include "config.h"
     30 #include <setjmp.h>
     31 #include <inttypes.h>
     32 #include <signal.h>
     33 #include "osdep.h"
     34 #include "sys-queue.h"
     35 #include "targphys.h"
     36 
     37 #ifndef TARGET_LONG_BITS
     38 #error TARGET_LONG_BITS must be defined before including this header
     39 #endif
     40 
     41 #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
     42 
     43 /* target_ulong is the type of a virtual address */
     44 #if TARGET_LONG_SIZE == 4
     45 typedef int32_t target_long;
     46 typedef uint32_t target_ulong;
     47 #define TARGET_FMT_lx "%08x"
     48 #define TARGET_FMT_ld "%d"
     49 #define TARGET_FMT_lu "%u"
     50 #elif TARGET_LONG_SIZE == 8
     51 typedef int64_t target_long;
     52 typedef uint64_t target_ulong;
     53 #define TARGET_FMT_lx "%016" PRIx64
     54 #define TARGET_FMT_ld "%" PRId64
     55 #define TARGET_FMT_lu "%" PRIu64
     56 #else
     57 #error TARGET_LONG_SIZE undefined
     58 #endif
     59 
     60 #define HOST_LONG_SIZE (HOST_LONG_BITS / 8)
     61 
     62 #define EXCP_INTERRUPT 	0x10000 /* async interruption */
     63 #define EXCP_HLT        0x10001 /* hlt instruction reached */
     64 #define EXCP_DEBUG      0x10002 /* cpu stopped after a breakpoint or singlestep */
     65 #define EXCP_HALTED     0x10003 /* cpu is halted (waiting for external event) */
     66 
     67 #define TB_JMP_CACHE_BITS 12
     68 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
     69 
     70 /* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
     71    addresses on the same page.  The top bits are the same.  This allows
     72    TLB invalidation to quickly clear a subset of the hash table.  */
     73 #define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
     74 #define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
     75 #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
     76 #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
     77 
     78 #define CPU_TLB_BITS 8
     79 #define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
     80 
     81 #if TARGET_PHYS_ADDR_BITS == 32 && TARGET_LONG_BITS == 32
     82 #define CPU_TLB_ENTRY_BITS 4
     83 #else
     84 #define CPU_TLB_ENTRY_BITS 5
     85 #endif
     86 
     87 typedef struct CPUTLBEntry {
     88     /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
     89        bit TARGET_PAGE_BITS-1..4  : Nonzero for accesses that should not
     90                                     go directly to ram.
     91        bit 3                      : indicates that the entry is invalid
     92        bit 2..0                   : zero
     93     */
     94     target_ulong addr_read;
     95     target_ulong addr_write;
     96     target_ulong addr_code;
     97     /* Addend to virtual address to get physical address.  IO accesses
     98        use the corresponding iotlb value.  */
     99 #if TARGET_PHYS_ADDR_BITS == 64
    100     /* on i386 Linux make sure it is aligned */
    101     target_phys_addr_t addend __attribute__((aligned(8)));
    102 #else
    103     target_phys_addr_t addend;
    104 #endif
    105     /* padding to get a power of two size */
    106     uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
    107                   (sizeof(target_ulong) * 3 +
    108                    ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) +
    109                    sizeof(target_phys_addr_t))];
    110 } CPUTLBEntry;
    111 
    112 #ifdef WORDS_BIGENDIAN
    113 typedef struct icount_decr_u16 {
    114     uint16_t high;
    115     uint16_t low;
    116 } icount_decr_u16;
    117 #else
    118 typedef struct icount_decr_u16 {
    119     uint16_t low;
    120     uint16_t high;
    121 } icount_decr_u16;
    122 #endif
    123 
    124 struct kvm_run;
    125 struct KVMState;
    126 
    127 typedef struct CPUBreakpoint {
    128     target_ulong pc;
    129     int flags; /* BP_* */
    130     TAILQ_ENTRY(CPUBreakpoint) entry;
    131 } CPUBreakpoint;
    132 
    133 typedef struct CPUWatchpoint {
    134     target_ulong vaddr;
    135     target_ulong len_mask;
    136     int flags; /* BP_* */
    137     TAILQ_ENTRY(CPUWatchpoint) entry;
    138 } CPUWatchpoint;
    139 
    140 #define CPU_TEMP_BUF_NLONGS 128
    141 #define CPU_COMMON                                                      \
    142     struct TranslationBlock *current_tb; /* currently executing TB  */  \
    143     /* soft mmu support */                                              \
    144     /* in order to avoid passing too many arguments to the MMIO         \
    145        helpers, we store some rarely used information in the CPU        \
    146        context) */                                                      \
    147     unsigned long mem_io_pc; /* host pc at which the memory was         \
    148                                 accessed */                             \
    149     target_ulong mem_io_vaddr; /* target virtual addr at which the      \
    150                                      memory was accessed */             \
    151     uint32_t halted; /* Nonzero if the CPU is in suspend state */       \
    152     uint32_t stop;   /* Stop request */                                 \
    153     uint32_t stopped; /* Artificially stopped */                        \
    154     uint32_t interrupt_request;                                         \
    155     volatile sig_atomic_t exit_request;                                 \
    156     /* The meaning of the MMU modes is defined in the target code. */   \
    157     CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE];                  \
    158     target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE];               \
    159     struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];           \
    160     /* buffer for temporaries in the code generator */                  \
    161     long temp_buf[CPU_TEMP_BUF_NLONGS];                                 \
    162                                                                         \
    163     int64_t icount_extra; /* Instructions until next timer event.  */   \
    164     /* Number of cycles left, with interrupt flag in high bit.          \
    165        This allows a single read-compare-cbranch-write sequence to test \
    166        for both decrementer underflow and exceptions.  */               \
    167     union {                                                             \
    168         uint32_t u32;                                                   \
    169         icount_decr_u16 u16;                                            \
    170     } icount_decr;                                                      \
    171     uint32_t can_do_io; /* nonzero if memory mapped IO is safe.  */     \
    172                                                                         \
    173     /* from this point: preserved by CPU reset */                       \
    174     /* ice debug support */                                             \
    175     TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;            \
    176     int singlestep_enabled;                                             \
    177                                                                         \
    178     TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;            \
    179     CPUWatchpoint *watchpoint_hit;                                      \
    180                                                                         \
    181     struct GDBRegisterState *gdb_regs;                                  \
    182                                                                         \
    183     /* Core interrupt code */                                           \
    184     jmp_buf jmp_env;                                                    \
    185     int exception_index;                                                \
    186                                                                         \
    187     CPUState *next_cpu; /* next CPU sharing TB cache */                 \
    188     int cpu_index; /* CPU index (informative) */                        \
    189     uint32_t host_tid; /* host thread ID */                             \
    190     int numa_node; /* NUMA node this cpu is belonging to  */            \
    191     int running; /* Nonzero if cpu is currently running(usermode).  */  \
    192     /* user data */                                                     \
    193     void *opaque;                                                       \
    194                                                                         \
    195     uint32_t created;                                                   \
    196     struct QemuThread *thread;                                          \
    197     struct QemuCond *halt_cond;                                         \
    198     const char *cpu_model_str;                                          \
    199     struct KVMState *kvm_state;                                         \
    200     struct kvm_run *kvm_run;                                            \
    201     int kvm_fd;
    202 
    203 #endif
    204