1 /* 2 * common defines for all CPUs 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #ifndef CPU_DEFS_H 20 #define CPU_DEFS_H 21 22 #if 0 /* ANDROID */ 23 #ifndef NEED_CPU_H 24 #error cpu.h included from common code 25 #endif 26 #endif /* ANDROID */ 27 28 #include "config.h" 29 #include <setjmp.h> 30 #include <inttypes.h> 31 #include <signal.h> 32 #include "osdep.h" 33 #include "qemu-queue.h" 34 #include "targphys.h" 35 36 #ifndef TARGET_LONG_BITS 37 #error TARGET_LONG_BITS must be defined before including this header 38 #endif 39 40 #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) 41 42 /* target_ulong is the type of a virtual address */ 43 #if TARGET_LONG_SIZE == 4 44 typedef int32_t target_long; 45 typedef uint32_t target_ulong; 46 #define TARGET_FMT_lx "%08x" 47 #define TARGET_FMT_ld "%d" 48 #define TARGET_FMT_lu "%u" 49 #elif TARGET_LONG_SIZE == 8 50 typedef int64_t target_long; 51 typedef uint64_t target_ulong; 52 #define TARGET_FMT_lx "%016" PRIx64 53 #define TARGET_FMT_ld "%" PRId64 54 #define TARGET_FMT_lu "%" PRIu64 55 #else 56 #error TARGET_LONG_SIZE undefined 57 #endif 58 59 #define HOST_LONG_SIZE (HOST_LONG_BITS / 8) 60 61 #define EXCP_INTERRUPT 0x10000 /* async interruption */ 62 #define EXCP_HLT 0x10001 /* hlt instruction reached */ 63 #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ 64 #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ 65 66 #define TB_JMP_CACHE_BITS 12 67 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) 68 69 /* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for 70 addresses on the same page. The top bits are the same. This allows 71 TLB invalidation to quickly clear a subset of the hash table. */ 72 #define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) 73 #define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) 74 #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) 75 #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) 76 77 #define CPU_TLB_BITS 8 78 #define CPU_TLB_SIZE (1 << CPU_TLB_BITS) 79 80 #if TARGET_PHYS_ADDR_BITS == 32 && TARGET_LONG_BITS == 32 81 #define CPU_TLB_ENTRY_BITS 4 82 #else 83 #define CPU_TLB_ENTRY_BITS 5 84 #endif 85 86 typedef struct CPUTLBEntry { 87 /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address 88 bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not 89 go directly to ram. 90 bit 3 : indicates that the entry is invalid 91 bit 2..0 : zero 92 */ 93 target_ulong addr_read; 94 target_ulong addr_write; 95 target_ulong addr_code; 96 /* Addend to virtual address to get physical address. IO accesses 97 use the corresponding iotlb value. */ 98 #if TARGET_PHYS_ADDR_BITS == 64 99 /* on i386 Linux make sure it is aligned */ 100 target_phys_addr_t addend __attribute__((aligned(8))); 101 #else 102 target_phys_addr_t addend; 103 #endif 104 /* padding to get a power of two size */ 105 uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - 106 (sizeof(target_ulong) * 3 + 107 ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + 108 sizeof(target_phys_addr_t))]; 109 } CPUTLBEntry; 110 111 #ifdef HOST_WORDS_BIGENDIAN 112 typedef struct icount_decr_u16 { 113 uint16_t high; 114 uint16_t low; 115 } icount_decr_u16; 116 #else 117 typedef struct icount_decr_u16 { 118 uint16_t low; 119 uint16_t high; 120 } icount_decr_u16; 121 #endif 122 123 struct kvm_run; 124 struct KVMState; 125 126 typedef struct CPUBreakpoint { 127 target_ulong pc; 128 int flags; /* BP_* */ 129 QTAILQ_ENTRY(CPUBreakpoint) entry; 130 } CPUBreakpoint; 131 132 typedef struct CPUWatchpoint { 133 target_ulong vaddr; 134 target_ulong len_mask; 135 int flags; /* BP_* */ 136 QTAILQ_ENTRY(CPUWatchpoint) entry; 137 } CPUWatchpoint; 138 139 #define CPU_TEMP_BUF_NLONGS 128 140 #define CPU_COMMON \ 141 struct TranslationBlock *current_tb; /* currently executing TB */ \ 142 /* soft mmu support */ \ 143 /* in order to avoid passing too many arguments to the MMIO \ 144 helpers, we store some rarely used information in the CPU \ 145 context) */ \ 146 unsigned long mem_io_pc; /* host pc at which the memory was \ 147 accessed */ \ 148 target_ulong mem_io_vaddr; /* target virtual addr at which the \ 149 memory was accessed */ \ 150 uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ 151 uint32_t stop; /* Stop request */ \ 152 uint32_t stopped; /* Artificially stopped */ \ 153 uint32_t interrupt_request; \ 154 volatile sig_atomic_t exit_request; \ 155 /* The meaning of the MMU modes is defined in the target code. */ \ 156 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ 157 target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ 158 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ 159 /* buffer for temporaries in the code generator */ \ 160 long temp_buf[CPU_TEMP_BUF_NLONGS]; \ 161 \ 162 int64_t icount_extra; /* Instructions until next timer event. */ \ 163 /* Number of cycles left, with interrupt flag in high bit. \ 164 This allows a single read-compare-cbranch-write sequence to test \ 165 for both decrementer underflow and exceptions. */ \ 166 union { \ 167 uint32_t u32; \ 168 icount_decr_u16 u16; \ 169 } icount_decr; \ 170 uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \ 171 \ 172 /* from this point: preserved by CPU reset */ \ 173 /* ice debug support */ \ 174 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \ 175 int singlestep_enabled; \ 176 \ 177 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \ 178 CPUWatchpoint *watchpoint_hit; \ 179 \ 180 struct GDBRegisterState *gdb_regs; \ 181 \ 182 /* Core interrupt code */ \ 183 jmp_buf jmp_env; \ 184 int exception_index; \ 185 \ 186 CPUState *next_cpu; /* next CPU sharing TB cache */ \ 187 int cpu_index; /* CPU index (informative) */ \ 188 uint32_t host_tid; /* host thread ID */ \ 189 int numa_node; /* NUMA node this cpu is belonging to */ \ 190 int nr_cores; /* number of cores within this CPU package */ \ 191 int nr_threads;/* number of threads within this CPU */ \ 192 int running; /* Nonzero if cpu is currently running(usermode). */ \ 193 /* user data */ \ 194 void *opaque; \ 195 \ 196 uint32_t created; \ 197 struct QemuThread *thread; \ 198 struct QemuCond *halt_cond; \ 199 const char *cpu_model_str; \ 200 struct KVMState *kvm_state; \ 201 struct kvm_run *kvm_run; \ 202 int kvm_fd; \ 203 int kvm_vcpu_dirty; 204 205 #endif 206