1 /* 2 * MIPS emulation helpers for qemu. 3 * 4 * Copyright (c) 2004-2005 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include <stdarg.h> 20 #include <stdlib.h> 21 #include <stdio.h> 22 #include <string.h> 23 #include <inttypes.h> 24 #include <signal.h> 25 26 #include "cpu.h" 27 #include "exec-all.h" 28 29 enum { 30 TLBRET_DIRTY = -4, 31 TLBRET_INVALID = -3, 32 TLBRET_NOMATCH = -2, 33 TLBRET_BADADDR = -1, 34 TLBRET_MATCH = 0 35 }; 36 37 /* no MMU emulation */ 38 int no_mmu_map_address (CPUState *env, target_phys_addr_t *physical, int *prot, 39 target_ulong address, int rw, int access_type) 40 { 41 *physical = address; 42 *prot = PAGE_READ | PAGE_WRITE; 43 return TLBRET_MATCH; 44 } 45 46 /* fixed mapping MMU emulation */ 47 int fixed_mmu_map_address (CPUState *env, target_phys_addr_t *physical, int *prot, 48 target_ulong address, int rw, int access_type) 49 { 50 if (address <= (int32_t)0x7FFFFFFFUL) { 51 if (!(env->CP0_Status & (1 << CP0St_ERL))) 52 *physical = address + 0x40000000UL; 53 else 54 *physical = address; 55 } else if (address <= (int32_t)0xBFFFFFFFUL) 56 *physical = address & 0x1FFFFFFF; 57 else 58 *physical = address; 59 60 *prot = PAGE_READ | PAGE_WRITE; 61 return TLBRET_MATCH; 62 } 63 64 /* MIPS32/MIPS64 R4000-style MMU emulation */ 65 int r4k_map_address (CPUState *env, target_phys_addr_t *physical, int *prot, 66 target_ulong address, int rw, int access_type) 67 { 68 uint8_t ASID = env->CP0_EntryHi & 0xFF; 69 r4k_tlb_t *tlb; 70 target_ulong mask; 71 target_ulong tag; 72 target_ulong VPN; 73 int n; 74 int i; 75 76 for (i = 0; i < env->tlb->nb_tlb; i++) { 77 tlb = &env->tlb->mmu.r4k.tlb[i]; 78 /* 1k pages are not supported. */ 79 mask = ~(TARGET_PAGE_MASK << 1); 80 tag = address & ~mask; 81 VPN = tlb->VPN & ~mask; 82 83 #if defined(TARGET_MIPS64) 84 tag &= env->SEGMask; 85 #endif 86 87 /* Check ASID, virtual page number & size */ 88 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) { 89 /* TLB match */ 90 n = !!(address & mask & ~(mask >> 1)); 91 /* Check access rights */ 92 if (!(n ? tlb->V1 : tlb->V0)) 93 return TLBRET_INVALID; 94 if (rw == 0 || (n ? tlb->D1 : tlb->D0)) { 95 *physical = tlb->PFN[n] | (address & (mask >> 1)); 96 *prot = PAGE_READ; 97 if (n ? tlb->D1 : tlb->D0) 98 *prot |= PAGE_WRITE; 99 return TLBRET_MATCH; 100 } 101 return TLBRET_DIRTY; 102 } 103 } 104 return TLBRET_NOMATCH; 105 } 106 107 #if !defined(CONFIG_USER_ONLY) 108 static int get_physical_address (CPUState *env, target_phys_addr_t *physical, 109 int *prot, target_ulong address, 110 int rw, int access_type) 111 { 112 /* User mode can only access useg/xuseg */ 113 int user_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM; 114 int supervisor_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_SM; 115 int kernel_mode = !user_mode && !supervisor_mode; 116 #if defined(TARGET_MIPS64) 117 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; 118 int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; 119 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; 120 #endif 121 int ret = TLBRET_MATCH; 122 123 #if 0 124 qemu_log("user mode %d h %08x\n", user_mode, env->hflags); 125 #endif 126 127 if (address <= (int32_t)0x7FFFFFFFUL) { 128 /* useg */ 129 if (unlikely(env->CP0_Status & (1 << CP0St_ERL))) { 130 *physical = address & 0xFFFFFFFF; 131 *prot = PAGE_READ | PAGE_WRITE; 132 } else { 133 ret = env->tlb->map_address(env, physical, prot, address, rw, access_type); 134 } 135 #if defined(TARGET_MIPS64) 136 } else if (address < 0x4000000000000000ULL) { 137 /* xuseg */ 138 if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) { 139 ret = env->tlb->map_address(env, physical, prot, address, rw, access_type); 140 } else { 141 ret = TLBRET_BADADDR; 142 } 143 } else if (address < 0x8000000000000000ULL) { 144 /* xsseg */ 145 if ((supervisor_mode || kernel_mode) && 146 SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) { 147 ret = env->tlb->map_address(env, physical, prot, address, rw, access_type); 148 } else { 149 ret = TLBRET_BADADDR; 150 } 151 } else if (address < 0xC000000000000000ULL) { 152 /* xkphys */ 153 if (kernel_mode && KX && 154 (address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) { 155 *physical = address & env->PAMask; 156 *prot = PAGE_READ | PAGE_WRITE; 157 } else { 158 ret = TLBRET_BADADDR; 159 } 160 } else if (address < 0xFFFFFFFF80000000ULL) { 161 /* xkseg */ 162 if (kernel_mode && KX && 163 address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) { 164 ret = env->tlb->map_address(env, physical, prot, address, rw, access_type); 165 } else { 166 ret = TLBRET_BADADDR; 167 } 168 #endif 169 } else if (address < (int32_t)0xA0000000UL) { 170 /* kseg0 */ 171 if (kernel_mode) { 172 *physical = address - (int32_t)0x80000000UL; 173 *prot = PAGE_READ | PAGE_WRITE; 174 } else { 175 ret = TLBRET_BADADDR; 176 } 177 } else if (address < (int32_t)0xC0000000UL) { 178 /* kseg1 */ 179 if (kernel_mode) { 180 *physical = address - (int32_t)0xA0000000UL; 181 *prot = PAGE_READ | PAGE_WRITE; 182 } else { 183 ret = TLBRET_BADADDR; 184 } 185 } else if (address < (int32_t)0xE0000000UL) { 186 /* sseg (kseg2) */ 187 if (supervisor_mode || kernel_mode) { 188 ret = env->tlb->map_address(env, physical, prot, address, rw, access_type); 189 } else { 190 ret = TLBRET_BADADDR; 191 } 192 } else { 193 /* kseg3 */ 194 /* XXX: debug segment is not emulated */ 195 if (kernel_mode) { 196 ret = env->tlb->map_address(env, physical, prot, address, rw, access_type); 197 } else { 198 ret = TLBRET_BADADDR; 199 } 200 } 201 #if 0 202 qemu_log(TARGET_FMT_lx " %d %d => " TARGET_FMT_lx " %d (%d)\n", 203 address, rw, access_type, *physical, *prot, ret); 204 #endif 205 206 return ret; 207 } 208 #endif 209 210 static void raise_mmu_exception(CPUState *env, target_ulong address, 211 int rw, int tlb_error) 212 { 213 int exception = 0, error_code = 0; 214 215 switch (tlb_error) { 216 default: 217 case TLBRET_BADADDR: 218 /* Reference to kernel address from user mode or supervisor mode */ 219 /* Reference to supervisor address from user mode */ 220 if (rw) 221 exception = EXCP_AdES; 222 else 223 exception = EXCP_AdEL; 224 break; 225 case TLBRET_NOMATCH: 226 /* No TLB match for a mapped address */ 227 if (rw) 228 exception = EXCP_TLBS; 229 else 230 exception = EXCP_TLBL; 231 error_code = 1; 232 break; 233 case TLBRET_INVALID: 234 /* TLB match with no valid bit */ 235 if (rw) 236 exception = EXCP_TLBS; 237 else 238 exception = EXCP_TLBL; 239 break; 240 case TLBRET_DIRTY: 241 /* TLB match but 'D' bit is cleared */ 242 exception = EXCP_LTLBL; 243 break; 244 245 } 246 /* Raise exception */ 247 env->CP0_BadVAddr = address; 248 env->CP0_Context = (env->CP0_Context & ~0x007fffff) | 249 ((address >> 9) & 0x007ffff0); 250 env->CP0_EntryHi = 251 (env->CP0_EntryHi & 0xFF) | (address & (TARGET_PAGE_MASK << 1)); 252 #if defined(TARGET_MIPS64) 253 env->CP0_EntryHi &= env->SEGMask; 254 env->CP0_XContext = (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | 255 ((address & 0xC00000000000ULL) >> (55 - env->SEGBITS)) | 256 ((address & ((1ULL << env->SEGBITS) - 1) & 0xFFFFFFFFFFFFE000ULL) >> 9); 257 #endif 258 env->exception_index = exception; 259 env->error_code = error_code; 260 } 261 262 /* 263 * Get the pgd_current from TLB exception handler 264 * The exception handler is generated by function build_r4000_tlb_refill_handler. 265 */ 266 267 static struct { 268 target_ulong pgd_current_p; 269 int softshift; 270 } linux_pte_info = {0}; 271 272 static inline target_ulong cpu_mips_get_pgd(CPUState *env) 273 { 274 if (unlikely(linux_pte_info.pgd_current_p == 0)) { 275 int i; 276 uint32_t lui_ins, lw_ins, srl_ins; 277 uint32_t address; 278 uint32_t ebase; 279 280 /* 281 * The exact TLB refill code varies depeing on the kernel version 282 * and configuration. Examins the TLB handler to extract 283 * pgd_current_p and the shift required to convert in memory PTE 284 * to TLB format 285 */ 286 static struct { 287 struct { 288 uint32_t off; 289 uint32_t op; 290 uint32_t mask; 291 } lui, lw, srl; 292 } handlers[] = { 293 /* 2.6.29+ */ 294 { 295 {0x00, 0x3c1b0000, 0xffff0000}, /* 0x3c1b803f : lui k1,%hi(pgd_current_p) */ 296 {0x08, 0x8f7b0000, 0xffff0000}, /* 0x8f7b3000 : lw k1,%lo(k1) */ 297 {0x34, 0x001ad182, 0xffffffff} /* 0x001ad182 : srl k0,k0,0x6 */ 298 }, 299 /* 3.4+ */ 300 { 301 {0x00, 0x3c1b0000, 0xffff0000}, /* 0x3c1b803f : lui k1,%hi(pgd_current_p) */ 302 {0x08, 0x8f7b0000, 0xffff0000}, /* 0x8f7b3000 : lw k1,%lo(k1) */ 303 {0x34, 0x001ad142, 0xffffffff} /* 0x001ad182 : srl k0,k0,0x5 */ 304 } 305 }; 306 307 ebase = env->CP0_EBase - 0x80000000; 308 309 /* Match the kernel TLB refill exception handler against known code */ 310 for (i = 0; i < sizeof(handlers)/sizeof(handlers[0]); i++) { 311 lui_ins = ldl_phys(ebase + handlers[i].lui.off); 312 lw_ins = ldl_phys(ebase + handlers[i].lw.off); 313 srl_ins = ldl_phys(ebase + handlers[i].srl.off); 314 if (((lui_ins & handlers[i].lui.mask) == handlers[i].lui.op) && 315 ((lw_ins & handlers[i].lw.mask) == handlers[i].lw.op) && 316 ((srl_ins & handlers[i].srl.mask) == handlers[i].srl.op)) 317 break; 318 } 319 if (i >= sizeof(handlers)/sizeof(handlers[0])) { 320 printf("TLBMiss handler dump:\n"); 321 for (i = 0; i < 0x80; i+= 4) 322 printf("0x%08x: 0x%08x\n", ebase + i, ldl_phys(ebase + i)); 323 cpu_abort(env, "TLBMiss handler signature not recognised\n"); 324 } 325 326 address = (lui_ins & 0xffff) << 16; 327 address += (((int32_t)(lw_ins & 0xffff)) << 16) >> 16; 328 if (address >= 0x80000000 && address < 0xa0000000) 329 address -= 0x80000000; 330 else if (address >= 0xa0000000 && address <= 0xc0000000) 331 address -= 0xa0000000; 332 else 333 cpu_abort(env, "pgd_current_p not in KSEG0/KSEG1\n"); 334 335 linux_pte_info.pgd_current_p = address; 336 linux_pte_info.softshift = (srl_ins >> 6) & 0x1f; 337 } 338 339 /* Get pgd_current */ 340 return ldl_phys(linux_pte_info.pgd_current_p); 341 } 342 343 static inline int cpu_mips_tlb_refill(CPUState *env, target_ulong address, int rw , 344 int mmu_idx, int is_softmmu) 345 { 346 int32_t saved_hflags; 347 target_ulong saved_badvaddr,saved_entryhi,saved_context; 348 349 target_ulong pgd_addr,pt_addr,index; 350 target_ulong fault_addr,ptw_phys; 351 target_ulong elo_even,elo_odd; 352 uint32_t page_valid; 353 int ret; 354 355 saved_badvaddr = env->CP0_BadVAddr; 356 saved_context = env->CP0_Context; 357 saved_entryhi = env->CP0_EntryHi; 358 saved_hflags = env->hflags; 359 360 env->CP0_BadVAddr = address; 361 env->CP0_Context = (env->CP0_Context & ~0x007fffff) | 362 ((address >> 9) & 0x007ffff0); 363 env->CP0_EntryHi = 364 (env->CP0_EntryHi & 0xFF) | (address & (TARGET_PAGE_MASK << 1)); 365 366 env->hflags = MIPS_HFLAG_KM; 367 368 fault_addr = env->CP0_BadVAddr; 369 page_valid = 0; 370 371 pgd_addr = cpu_mips_get_pgd(env); 372 if (unlikely(!pgd_addr)) 373 { 374 /*not valid pgd_addr,just return.*/ 375 //return TLBRET_NOMATCH; 376 ret = TLBRET_NOMATCH; 377 goto out; 378 } 379 380 ptw_phys = pgd_addr - (int32_t)0x80000000UL; 381 index = (fault_addr>>22)<<2; 382 ptw_phys += index; 383 384 pt_addr = ldl_phys(ptw_phys); 385 386 ptw_phys = pt_addr - (int32_t)0x80000000UL; 387 index = (env->CP0_Context>>1)&0xff8; 388 ptw_phys += index; 389 390 /* get the page table entry*/ 391 elo_even = ldl_phys(ptw_phys); 392 elo_odd = ldl_phys(ptw_phys+4); 393 elo_even = elo_even >> linux_pte_info.softshift; 394 elo_odd = elo_odd >> linux_pte_info.softshift; 395 env->CP0_EntryLo0 = elo_even; 396 env->CP0_EntryLo1 = elo_odd; 397 /* Done. refill the TLB */ 398 r4k_helper_ptw_tlbrefill(env); 399 400 /* Since we know the value of TLB entry, we can 401 * return the TLB lookup value here. 402 */ 403 404 env->hflags = saved_hflags; 405 406 target_ulong mask = env->CP0_PageMask | ~(TARGET_PAGE_MASK << 1); 407 int n = !!(address & mask & ~(mask >> 1)); 408 /* Check access rights */ 409 if (!(n ? (elo_odd & 2) != 0 : (elo_even & 2) != 0)) 410 { 411 ret = TLBRET_INVALID; 412 goto out; 413 } 414 415 if (rw == 0 || (n ? (elo_odd & 4) != 0 : (elo_even & 4) != 0)) { 416 target_ulong physical = (n?(elo_odd >> 6) << 12 : (elo_even >> 6) << 12); 417 physical |= (address & (mask >> 1)); 418 int prot = PAGE_READ; 419 if (n ? (elo_odd & 4) != 0 : (elo_even & 4) != 0) 420 prot |= PAGE_WRITE; 421 422 tlb_set_page(env, address & TARGET_PAGE_MASK, 423 physical & TARGET_PAGE_MASK, prot, 424 mmu_idx, is_softmmu); 425 ret = TLBRET_MATCH; 426 goto out; 427 } 428 ret = TLBRET_DIRTY; 429 430 out: 431 env->CP0_BadVAddr = saved_badvaddr; 432 env->CP0_Context = saved_context; 433 env->CP0_EntryHi = saved_entryhi; 434 env->hflags = saved_hflags; 435 return ret; 436 } 437 438 int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw, 439 int mmu_idx, int is_softmmu) 440 { 441 #if !defined(CONFIG_USER_ONLY) 442 target_phys_addr_t physical; 443 int prot; 444 #endif 445 int exception = 0, error_code = 0; 446 int access_type; 447 int ret = 0; 448 449 #if 0 450 log_cpu_state(env, 0); 451 #endif 452 qemu_log("%s pc " TARGET_FMT_lx " ad " TARGET_FMT_lx " rw %d mmu_idx %d smmu %d\n", 453 __func__, env->active_tc.PC, address, rw, mmu_idx, is_softmmu); 454 455 rw &= 1; 456 457 /* data access */ 458 /* XXX: put correct access by using cpu_restore_state() 459 correctly */ 460 access_type = ACCESS_INT; 461 #if defined(CONFIG_USER_ONLY) 462 ret = TLBRET_NOMATCH; 463 #else 464 ret = get_physical_address(env, &physical, &prot, 465 address, rw, access_type); 466 qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx " prot %d\n", 467 __func__, address, ret, physical, prot); 468 if (ret == TLBRET_MATCH) { 469 ret = tlb_set_page(env, address & TARGET_PAGE_MASK, 470 physical & TARGET_PAGE_MASK, prot, 471 mmu_idx, is_softmmu); 472 } 473 else if (ret == TLBRET_NOMATCH) 474 ret = cpu_mips_tlb_refill(env,address,rw,mmu_idx,is_softmmu); 475 if (ret < 0) 476 #endif 477 { 478 raise_mmu_exception(env, address, rw, ret); 479 ret = 1; 480 } 481 482 return ret; 483 } 484 485 #if !defined(CONFIG_USER_ONLY) 486 target_phys_addr_t cpu_mips_translate_address(CPUState *env, target_ulong address, int rw) 487 { 488 target_phys_addr_t physical; 489 int prot; 490 int access_type; 491 int ret = 0; 492 493 rw &= 1; 494 495 /* data access */ 496 access_type = ACCESS_INT; 497 ret = get_physical_address(env, &physical, &prot, 498 address, rw, access_type); 499 if (ret != TLBRET_MATCH || ret != TLBRET_DIRTY) { 500 raise_mmu_exception(env, address, rw, ret); 501 return -1LL; 502 } else { 503 return physical; 504 } 505 } 506 #endif 507 508 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) 509 { 510 #if defined(CONFIG_USER_ONLY) 511 return addr; 512 #else 513 target_phys_addr_t phys_addr; 514 int prot, ret; 515 516 ret = get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT); 517 if (ret != TLBRET_MATCH && ret != TLBRET_DIRTY) { 518 target_ulong pgd_addr = cpu_mips_get_pgd(env); 519 if (unlikely(!pgd_addr)) { 520 phys_addr = -1; 521 } 522 else { 523 target_ulong pgd_phys, pgd_index; 524 target_ulong pt_addr, pt_phys, pt_index; 525 target_ulong lo; 526 /* Mimic the steps taken for a TLB refill */ 527 pgd_phys = pgd_addr - (int32_t)0x80000000UL; 528 pgd_index = (addr >> 22) << 2; 529 pt_addr = ldl_phys(pgd_phys + pgd_index); 530 pt_phys = pt_addr - (int32_t)0x80000000UL; 531 pt_index = (((addr >> 9) & 0x007ffff0) >> 1) & 0xff8; 532 /* get the entrylo value */ 533 if (addr & 0x1000) 534 lo = ldl_phys(pt_phys + pt_index + 4); 535 else 536 lo = ldl_phys(pt_phys + pt_index); 537 /* convert software TLB entry to hardware value */ 538 lo >>= linux_pte_info.softshift; 539 if (lo & 0x00000002) 540 /* It is valid */ 541 phys_addr = (lo >> 6) << 12; 542 else 543 phys_addr = -1; 544 } 545 } 546 return phys_addr; 547 #endif 548 } 549 550 static const char * const excp_names[EXCP_LAST + 1] = { 551 [EXCP_RESET] = "reset", 552 [EXCP_SRESET] = "soft reset", 553 [EXCP_DSS] = "debug single step", 554 [EXCP_DINT] = "debug interrupt", 555 [EXCP_NMI] = "non-maskable interrupt", 556 [EXCP_MCHECK] = "machine check", 557 [EXCP_EXT_INTERRUPT] = "interrupt", 558 [EXCP_DFWATCH] = "deferred watchpoint", 559 [EXCP_DIB] = "debug instruction breakpoint", 560 [EXCP_IWATCH] = "instruction fetch watchpoint", 561 [EXCP_AdEL] = "address error load", 562 [EXCP_AdES] = "address error store", 563 [EXCP_TLBF] = "TLB refill", 564 [EXCP_IBE] = "instruction bus error", 565 [EXCP_DBp] = "debug breakpoint", 566 [EXCP_SYSCALL] = "syscall", 567 [EXCP_BREAK] = "break", 568 [EXCP_CpU] = "coprocessor unusable", 569 [EXCP_RI] = "reserved instruction", 570 [EXCP_OVERFLOW] = "arithmetic overflow", 571 [EXCP_TRAP] = "trap", 572 [EXCP_FPE] = "floating point", 573 [EXCP_DDBS] = "debug data break store", 574 [EXCP_DWATCH] = "data watchpoint", 575 [EXCP_LTLBL] = "TLB modify", 576 [EXCP_TLBL] = "TLB load", 577 [EXCP_TLBS] = "TLB store", 578 [EXCP_DBE] = "data bus error", 579 [EXCP_DDBL] = "debug data break load", 580 [EXCP_THREAD] = "thread", 581 [EXCP_MDMX] = "MDMX", 582 [EXCP_C2E] = "precise coprocessor 2", 583 [EXCP_CACHE] = "cache error", 584 }; 585 586 void do_interrupt (CPUState *env) 587 { 588 #if !defined(CONFIG_USER_ONLY) 589 target_ulong offset; 590 int cause = -1; 591 const char *name; 592 593 if (qemu_log_enabled() && env->exception_index != EXCP_EXT_INTERRUPT) { 594 if (env->exception_index < 0 || env->exception_index > EXCP_LAST) 595 name = "unknown"; 596 else 597 name = excp_names[env->exception_index]; 598 599 qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n", 600 __func__, env->active_tc.PC, env->CP0_EPC, name); 601 } 602 if (env->exception_index == EXCP_EXT_INTERRUPT && 603 (env->hflags & MIPS_HFLAG_DM)) 604 env->exception_index = EXCP_DINT; 605 offset = 0x180; 606 switch (env->exception_index) { 607 case EXCP_DSS: 608 env->CP0_Debug |= 1 << CP0DB_DSS; 609 /* Debug single step cannot be raised inside a delay slot and 610 resume will always occur on the next instruction 611 (but we assume the pc has always been updated during 612 code translation). */ 613 env->CP0_DEPC = env->active_tc.PC; 614 goto enter_debug_mode; 615 case EXCP_DINT: 616 env->CP0_Debug |= 1 << CP0DB_DINT; 617 goto set_DEPC; 618 case EXCP_DIB: 619 env->CP0_Debug |= 1 << CP0DB_DIB; 620 goto set_DEPC; 621 case EXCP_DBp: 622 env->CP0_Debug |= 1 << CP0DB_DBp; 623 goto set_DEPC; 624 case EXCP_DDBS: 625 env->CP0_Debug |= 1 << CP0DB_DDBS; 626 goto set_DEPC; 627 case EXCP_DDBL: 628 env->CP0_Debug |= 1 << CP0DB_DDBL; 629 set_DEPC: 630 if (env->hflags & MIPS_HFLAG_BMASK) { 631 /* If the exception was raised from a delay slot, 632 come back to the jump. */ 633 env->CP0_DEPC = env->active_tc.PC - 4; 634 env->hflags &= ~MIPS_HFLAG_BMASK; 635 } else { 636 env->CP0_DEPC = env->active_tc.PC; 637 } 638 enter_debug_mode: 639 env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_64 | MIPS_HFLAG_CP0; 640 env->hflags &= ~(MIPS_HFLAG_KSU); 641 /* EJTAG probe trap enable is not implemented... */ 642 if (!(env->CP0_Status & (1 << CP0St_EXL))) 643 env->CP0_Cause &= ~(1 << CP0Ca_BD); 644 env->active_tc.PC = (int32_t)0xBFC00480; 645 break; 646 case EXCP_RESET: 647 cpu_reset(env); 648 break; 649 case EXCP_SRESET: 650 env->CP0_Status |= (1 << CP0St_SR); 651 memset(env->CP0_WatchLo, 0, sizeof(*env->CP0_WatchLo)); 652 goto set_error_EPC; 653 case EXCP_NMI: 654 env->CP0_Status |= (1 << CP0St_NMI); 655 set_error_EPC: 656 if (env->hflags & MIPS_HFLAG_BMASK) { 657 /* If the exception was raised from a delay slot, 658 come back to the jump. */ 659 env->CP0_ErrorEPC = env->active_tc.PC - 4; 660 env->hflags &= ~MIPS_HFLAG_BMASK; 661 } else { 662 env->CP0_ErrorEPC = env->active_tc.PC; 663 } 664 env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV); 665 env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0; 666 env->hflags &= ~(MIPS_HFLAG_KSU); 667 if (!(env->CP0_Status & (1 << CP0St_EXL))) 668 env->CP0_Cause &= ~(1 << CP0Ca_BD); 669 env->active_tc.PC = (int32_t)0xBFC00000; 670 break; 671 case EXCP_EXT_INTERRUPT: 672 cause = 0; 673 if (env->CP0_Cause & (1 << CP0Ca_IV)) 674 offset = 0x200; 675 goto set_EPC; 676 case EXCP_LTLBL: 677 cause = 1; 678 goto set_EPC; 679 case EXCP_TLBL: 680 cause = 2; 681 if (env->error_code == 1 && !(env->CP0_Status & (1 << CP0St_EXL))) { 682 #if defined(TARGET_MIPS64) 683 int R = env->CP0_BadVAddr >> 62; 684 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; 685 int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; 686 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; 687 688 if ((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) 689 offset = 0x080; 690 else 691 #endif 692 offset = 0x000; 693 } 694 goto set_EPC; 695 case EXCP_TLBS: 696 cause = 3; 697 if (env->error_code == 1 && !(env->CP0_Status & (1 << CP0St_EXL))) { 698 #if defined(TARGET_MIPS64) 699 int R = env->CP0_BadVAddr >> 62; 700 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; 701 int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; 702 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; 703 704 if ((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) 705 offset = 0x080; 706 else 707 #endif 708 offset = 0x000; 709 } 710 goto set_EPC; 711 case EXCP_AdEL: 712 cause = 4; 713 goto set_EPC; 714 case EXCP_AdES: 715 cause = 5; 716 goto set_EPC; 717 case EXCP_IBE: 718 cause = 6; 719 goto set_EPC; 720 case EXCP_DBE: 721 cause = 7; 722 goto set_EPC; 723 case EXCP_SYSCALL: 724 cause = 8; 725 goto set_EPC; 726 case EXCP_BREAK: 727 cause = 9; 728 goto set_EPC; 729 case EXCP_RI: 730 cause = 10; 731 goto set_EPC; 732 case EXCP_CpU: 733 cause = 11; 734 env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) | 735 (env->error_code << CP0Ca_CE); 736 goto set_EPC; 737 case EXCP_OVERFLOW: 738 cause = 12; 739 goto set_EPC; 740 case EXCP_TRAP: 741 cause = 13; 742 goto set_EPC; 743 case EXCP_FPE: 744 cause = 15; 745 goto set_EPC; 746 case EXCP_C2E: 747 cause = 18; 748 goto set_EPC; 749 case EXCP_MDMX: 750 cause = 22; 751 goto set_EPC; 752 case EXCP_DWATCH: 753 cause = 23; 754 /* XXX: TODO: manage defered watch exceptions */ 755 goto set_EPC; 756 case EXCP_MCHECK: 757 cause = 24; 758 goto set_EPC; 759 case EXCP_THREAD: 760 cause = 25; 761 goto set_EPC; 762 case EXCP_CACHE: 763 cause = 30; 764 if (env->CP0_Status & (1 << CP0St_BEV)) { 765 offset = 0x100; 766 } else { 767 offset = 0x20000100; 768 } 769 set_EPC: 770 if (!(env->CP0_Status & (1 << CP0St_EXL))) { 771 if (env->hflags & MIPS_HFLAG_BMASK) { 772 /* If the exception was raised from a delay slot, 773 come back to the jump. */ 774 env->CP0_EPC = env->active_tc.PC - 4; 775 env->CP0_Cause |= (1 << CP0Ca_BD); 776 } else { 777 env->CP0_EPC = env->active_tc.PC; 778 env->CP0_Cause &= ~(1 << CP0Ca_BD); 779 } 780 env->CP0_Status |= (1 << CP0St_EXL); 781 env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0; 782 env->hflags &= ~(MIPS_HFLAG_KSU); 783 } 784 env->hflags &= ~MIPS_HFLAG_BMASK; 785 if (env->CP0_Status & (1 << CP0St_BEV)) { 786 env->active_tc.PC = (int32_t)0xBFC00200; 787 } else { 788 env->active_tc.PC = (int32_t)(env->CP0_EBase & ~0x3ff); 789 } 790 env->active_tc.PC += offset; 791 env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC); 792 break; 793 default: 794 qemu_log("Invalid MIPS exception %d. Exiting\n", env->exception_index); 795 printf("Invalid MIPS exception %d. Exiting\n", env->exception_index); 796 exit(1); 797 } 798 if (qemu_log_enabled() && env->exception_index != EXCP_EXT_INTERRUPT) { 799 qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n" 800 " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n", 801 __func__, env->active_tc.PC, env->CP0_EPC, cause, 802 env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr, 803 env->CP0_DEPC); 804 } 805 #endif 806 env->exception_index = EXCP_NONE; 807 } 808 809 void r4k_invalidate_tlb (CPUState *env, int idx) 810 { 811 r4k_tlb_t *tlb; 812 target_ulong addr; 813 target_ulong end; 814 uint8_t ASID = env->CP0_EntryHi & 0xFF; 815 target_ulong mask; 816 817 tlb = &env->tlb->mmu.r4k.tlb[idx]; 818 /* The qemu TLB is flushed when the ASID changes, so no need to 819 flush these entries again. */ 820 if (tlb->G == 0 && tlb->ASID != ASID) { 821 return; 822 } 823 824 /* 1k pages are not supported. */ 825 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); 826 if (tlb->V0) { 827 addr = tlb->VPN & ~mask; 828 #if defined(TARGET_MIPS64) 829 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { 830 addr |= 0x3FFFFF0000000000ULL; 831 } 832 #endif 833 end = addr | (mask >> 1); 834 while (addr < end) { 835 tlb_flush_page (env, addr); 836 addr += TARGET_PAGE_SIZE; 837 } 838 } 839 if (tlb->V1) { 840 addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); 841 #if defined(TARGET_MIPS64) 842 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { 843 addr |= 0x3FFFFF0000000000ULL; 844 } 845 #endif 846 end = addr | mask; 847 while (addr - 1 < end) { 848 tlb_flush_page (env, addr); 849 addr += TARGET_PAGE_SIZE; 850 } 851 } 852 } 853