1 /* 2 * internal execution defines for qemu 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef _EXEC_ALL_H_ 21 #define _EXEC_ALL_H_ 22 23 #include "qemu-common.h" 24 25 /* allow to see translation results - the slowdown should be negligible, so we leave it */ 26 #define DEBUG_DISAS 27 28 /* is_jmp field values */ 29 #define DISAS_NEXT 0 /* next instruction can be analyzed */ 30 #define DISAS_JUMP 1 /* only pc was modified dynamically */ 31 #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ 32 #define DISAS_TB_JUMP 3 /* only pc was modified statically */ 33 34 typedef struct TranslationBlock TranslationBlock; 35 36 /* XXX: make safe guess about sizes */ 37 #define MAX_OP_PER_INSTR 96 38 /* A Call op needs up to 6 + 2N parameters (N = number of arguments). */ 39 #define MAX_OPC_PARAM 10 40 #define OPC_BUF_SIZE 2048 41 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) 42 43 /* Maximum size a TCG op can expand to. This is complicated because a 44 single op may require several host instructions and register reloads. 45 For now take a wild guess at 192 bytes, which should allow at least 46 a couple of fixup instructions per argument. */ 47 #define TCG_MAX_OP_SIZE 192 48 49 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) 50 51 extern target_ulong gen_opc_pc[OPC_BUF_SIZE]; 52 extern target_ulong gen_opc_npc[OPC_BUF_SIZE]; 53 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; 54 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; 55 extern uint16_t gen_opc_icount[OPC_BUF_SIZE]; 56 extern target_ulong gen_opc_jump_pc[2]; 57 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE]; 58 59 #include "qemu-log.h" 60 61 void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb); 62 void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb); 63 void gen_pc_load(CPUState *env, struct TranslationBlock *tb, 64 unsigned long searched_pc, int pc_pos, void *puc); 65 66 unsigned long code_gen_max_block_size(void); 67 void cpu_gen_init(void); 68 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, 69 int *gen_code_size_ptr); 70 int cpu_restore_state(struct TranslationBlock *tb, 71 CPUState *env, unsigned long searched_pc, 72 void *puc); 73 int cpu_restore_state_copy(struct TranslationBlock *tb, 74 CPUState *env, unsigned long searched_pc, 75 void *puc); 76 void cpu_resume_from_signal(CPUState *env1, void *puc); 77 void cpu_io_recompile(CPUState *env, void *retaddr); 78 TranslationBlock *tb_gen_code(CPUState *env, 79 target_ulong pc, target_ulong cs_base, int flags, 80 int cflags); 81 void cpu_exec_init(CPUState *env); 82 void QEMU_NORETURN cpu_loop_exit(void); 83 int page_unprotect(target_ulong address, unsigned long pc, void *puc); 84 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end, 85 int is_cpu_write_access); 86 void tb_invalidate_page_range(target_ulong start, target_ulong end); 87 void tlb_flush_page(CPUState *env, target_ulong addr); 88 void tlb_flush(CPUState *env, int flush_global); 89 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 90 target_phys_addr_t paddr, int prot, 91 int mmu_idx, int is_softmmu); 92 static inline int tlb_set_page(CPUState *env1, target_ulong vaddr, 93 target_phys_addr_t paddr, int prot, 94 int mmu_idx, int is_softmmu) 95 { 96 if (prot & PAGE_READ) 97 prot |= PAGE_EXEC; 98 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu); 99 } 100 101 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ 102 103 #define CODE_GEN_PHYS_HASH_BITS 15 104 #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) 105 106 #define MIN_CODE_GEN_BUFFER_SIZE (1024 * 1024) 107 108 /* estimated block size for TB allocation */ 109 /* XXX: use a per code average code fragment size and modulate it 110 according to the host CPU */ 111 #if defined(CONFIG_SOFTMMU) 112 #define CODE_GEN_AVG_BLOCK_SIZE 128 113 #else 114 #define CODE_GEN_AVG_BLOCK_SIZE 64 115 #endif 116 117 #if defined(_ARCH_PPC) || defined(__x86_64__) || defined(__arm__) || defined(__i386__) 118 #define USE_DIRECT_JUMP 119 #endif 120 121 struct TranslationBlock { 122 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ 123 target_ulong cs_base; /* CS base for this block */ 124 uint64_t flags; /* flags defining in which context the code was generated */ 125 uint16_t size; /* size of target code for this block (1 <= 126 size <= TARGET_PAGE_SIZE) */ 127 uint16_t cflags; /* compile flags */ 128 #define CF_COUNT_MASK 0x7fff 129 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ 130 131 uint8_t *tc_ptr; /* pointer to the translated code */ 132 /* next matching tb for physical address. */ 133 struct TranslationBlock *phys_hash_next; 134 /* first and second physical page containing code. The lower bit 135 of the pointer tells the index in page_next[] */ 136 struct TranslationBlock *page_next[2]; 137 target_ulong page_addr[2]; 138 139 /* the following data are used to directly call another TB from 140 the code of this one. */ 141 uint16_t tb_next_offset[2]; /* offset of original jump target */ 142 #ifdef USE_DIRECT_JUMP 143 uint16_t tb_jmp_offset[4]; /* offset of jump instruction */ 144 #else 145 unsigned long tb_next[2]; /* address of jump generated code */ 146 #endif 147 /* list of TBs jumping to this one. This is a circular list using 148 the two least significant bits of the pointers to tell what is 149 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = 150 jmp_first */ 151 struct TranslationBlock *jmp_next[2]; 152 struct TranslationBlock *jmp_first; 153 #ifdef CONFIG_TRACE 154 struct BBRec *bb_rec; 155 uint64_t prev_time; 156 #endif 157 158 #ifdef CONFIG_MEMCHECK 159 /* Maps PCs in this translation block to corresponding PCs in guest address 160 * space. The array is arranged in such way, that every even entry contains 161 * PC in the translation block, followed by an odd entry that contains 162 * guest PC corresponding to that PC in the translation block. This 163 * arrangement is set by tcg_gen_code_common that initializes this array 164 * when performing guest code translation. */ 165 target_ulong* tpc2gpc; 166 /* Number of pairs (pc_tb, pc_guest) in tpc2gpc array. */ 167 unsigned int tpc2gpc_pairs; 168 #endif // CONFIG_MEMCHECK 169 170 uint32_t icount; 171 }; 172 173 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) 174 { 175 target_ulong tmp; 176 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); 177 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; 178 } 179 180 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) 181 { 182 target_ulong tmp; 183 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); 184 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) 185 | (tmp & TB_JMP_ADDR_MASK)); 186 } 187 188 static inline unsigned int tb_phys_hash_func(unsigned long pc) 189 { 190 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1); 191 } 192 193 #ifdef CONFIG_MEMCHECK 194 /* Gets translated PC for a given (translated PC, guest PC) pair. 195 * Return: 196 * Translated PC, or NULL if pair index was too large. 197 */ 198 static inline target_ulong 199 tb_get_tb_pc(const TranslationBlock* tb, unsigned int pair) 200 { 201 return (tb->tpc2gpc != NULL && pair < tb->tpc2gpc_pairs) ? 202 tb->tpc2gpc[pair * 2] : 0; 203 } 204 205 /* Gets guest PC for a given (translated PC, guest PC) pair. 206 * Return: 207 * Guest PC, or NULL if pair index was too large. 208 */ 209 static inline target_ulong 210 tb_get_guest_pc(const TranslationBlock* tb, unsigned int pair) 211 { 212 return (tb->tpc2gpc != NULL && pair < tb->tpc2gpc_pairs) ? 213 tb->tpc2gpc[pair * 2 + 1] : 0; 214 } 215 216 /* Gets guest PC for a given translated PC. 217 * Return: 218 * Guest PC for a given translated PC, or NULL if there was no pair, matching 219 * translated PC in tb's tpc2gpc array. 220 */ 221 static inline target_ulong 222 tb_search_guest_pc_from_tb_pc(const TranslationBlock* tb, target_ulong tb_pc) 223 { 224 if (tb->tpc2gpc != NULL && tb->tpc2gpc_pairs != 0) { 225 unsigned int m_min = 0; 226 unsigned int m_max = (tb->tpc2gpc_pairs - 1) << 1; 227 /* Make sure that tb_pc is within TB array. */ 228 if (tb_pc < tb->tpc2gpc[0]) { 229 return 0; 230 } 231 while (m_min <= m_max) { 232 const unsigned int m = ((m_min + m_max) >> 1) & ~1; 233 if (tb_pc < tb->tpc2gpc[m]) { 234 m_max = m - 2; 235 } else if (m == m_max || tb_pc < tb->tpc2gpc[m + 2]) { 236 return tb->tpc2gpc[m + 1]; 237 } else { 238 m_min = m + 2; 239 } 240 } 241 return tb->tpc2gpc[m_max + 1]; 242 } 243 return 0; 244 } 245 #endif // CONFIG_MEMCHECK 246 247 TranslationBlock *tb_alloc(target_ulong pc); 248 void tb_free(TranslationBlock *tb); 249 void tb_flush(CPUState *env); 250 void tb_link_phys(TranslationBlock *tb, 251 target_ulong phys_pc, target_ulong phys_page2); 252 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr); 253 254 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 255 extern uint8_t *code_gen_ptr; 256 extern int code_gen_max_blocks; 257 258 #if defined(USE_DIRECT_JUMP) 259 260 #if defined(_ARCH_PPC) 261 extern void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); 262 #define tb_set_jmp_target1 ppc_tb_set_jmp_target 263 #elif defined(__i386__) || defined(__x86_64__) 264 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) 265 { 266 /* patch the branch destination */ 267 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); 268 /* no need to flush icache explicitly */ 269 } 270 #elif defined(__arm__) 271 static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr) 272 { 273 #if QEMU_GNUC_PREREQ(4, 1) 274 void __clear_cache(char *beg, char *end); 275 #else 276 register unsigned long _beg __asm ("a1"); 277 register unsigned long _end __asm ("a2"); 278 register unsigned long _flg __asm ("a3"); 279 #endif 280 281 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ 282 *(uint32_t *)jmp_addr = 283 (*(uint32_t *)jmp_addr & ~0xffffff) 284 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); 285 286 #if QEMU_GNUC_PREREQ(4, 1) 287 __clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); 288 #else 289 /* flush icache */ 290 _beg = jmp_addr; 291 _end = jmp_addr + 4; 292 _flg = 0; 293 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); 294 #endif 295 } 296 #endif 297 298 static inline void tb_set_jmp_target(TranslationBlock *tb, 299 int n, unsigned long addr) 300 { 301 unsigned long offset; 302 303 offset = tb->tb_jmp_offset[n]; 304 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr); 305 offset = tb->tb_jmp_offset[n + 2]; 306 if (offset != 0xffff) 307 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr); 308 } 309 310 #else 311 312 /* set the jump target */ 313 static inline void tb_set_jmp_target(TranslationBlock *tb, 314 int n, unsigned long addr) 315 { 316 tb->tb_next[n] = addr; 317 } 318 319 #endif 320 321 static inline void tb_add_jump(TranslationBlock *tb, int n, 322 TranslationBlock *tb_next) 323 { 324 /* NOTE: this test is only needed for thread safety */ 325 if (!tb->jmp_next[n]) { 326 /* patch the native jump address */ 327 tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr); 328 329 /* add in TB jmp circular list */ 330 tb->jmp_next[n] = tb_next->jmp_first; 331 tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n)); 332 } 333 } 334 335 TranslationBlock *tb_find_pc(unsigned long pc_ptr); 336 337 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; 338 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 339 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; 340 341 #include "qemu-lock.h" 342 343 extern spinlock_t tb_lock; 344 345 extern int tb_invalidated_flag; 346 347 #if !defined(CONFIG_USER_ONLY) 348 349 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, 350 void *retaddr); 351 352 #include "softmmu_defs.h" 353 354 #define ACCESS_TYPE (NB_MMU_MODES + 1) 355 #define MEMSUFFIX _code 356 #define env cpu_single_env 357 358 #define DATA_SIZE 1 359 #include "softmmu_header.h" 360 361 #define DATA_SIZE 2 362 #include "softmmu_header.h" 363 364 #define DATA_SIZE 4 365 #include "softmmu_header.h" 366 367 #define DATA_SIZE 8 368 #include "softmmu_header.h" 369 370 #undef ACCESS_TYPE 371 #undef MEMSUFFIX 372 #undef env 373 374 #endif 375 376 #if defined(CONFIG_USER_ONLY) 377 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) 378 { 379 return addr; 380 } 381 #else 382 /* NOTE: this function can trigger an exception */ 383 /* NOTE2: the returned address is not exactly the physical address: it 384 is the offset relative to phys_ram_base */ 385 static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr) 386 { 387 int mmu_idx, page_index, pd; 388 void *p; 389 390 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 391 mmu_idx = cpu_mmu_index(env1); 392 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != 393 (addr & TARGET_PAGE_MASK))) { 394 ldub_code(addr); 395 } 396 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK; 397 if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { 398 #if defined(TARGET_SPARC) || defined(TARGET_MIPS) 399 do_unassigned_access(addr, 0, 1, 0, 4); 400 #else 401 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); 402 #endif 403 } 404 p = (void *)(unsigned long)addr 405 + env1->tlb_table[mmu_idx][page_index].addend; 406 return qemu_ram_addr_from_host(p); 407 } 408 409 /* Deterministic execution requires that IO only be performed on the last 410 instruction of a TB so that interrupts take effect immediately. */ 411 static inline int can_do_io(CPUState *env) 412 { 413 if (!use_icount) 414 return 1; 415 416 /* If not executing code then assume we are ok. */ 417 if (!env->current_tb) 418 return 1; 419 420 return env->can_do_io != 0; 421 } 422 #endif 423 424 #ifdef CONFIG_KQEMU 425 #define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG)) 426 427 #define MSR_QPI_COMMBASE 0xfabe0010 428 429 int kqemu_init(CPUState *env); 430 int kqemu_cpu_exec(CPUState *env); 431 void kqemu_flush_page(CPUState *env, target_ulong addr); 432 void kqemu_flush(CPUState *env, int global); 433 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr); 434 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr); 435 void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size, 436 ram_addr_t phys_offset); 437 void kqemu_cpu_interrupt(CPUState *env); 438 void kqemu_record_dump(void); 439 440 extern uint32_t kqemu_comm_base; 441 442 extern ram_addr_t kqemu_phys_ram_size; 443 extern uint8_t *kqemu_phys_ram_base; 444 445 static inline int kqemu_is_ok(CPUState *env) 446 { 447 return(env->kqemu_enabled && 448 (env->cr[0] & CR0_PE_MASK) && 449 !(env->hflags & HF_INHIBIT_IRQ_MASK) && 450 (env->eflags & IF_MASK) && 451 !(env->eflags & VM_MASK) && 452 (env->kqemu_enabled == 2 || 453 ((env->hflags & HF_CPL_MASK) == 3 && 454 (env->eflags & IOPL_MASK) != IOPL_MASK))); 455 } 456 457 #endif 458 459 typedef void (CPUDebugExcpHandler)(CPUState *env); 460 461 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); 462 463 /* vl.c */ 464 extern int singlestep; 465 466 #endif 467