1 /* 2 * x86 segmentation related helpers: 3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4 * 5 * Copyright (c) 2003 Fabrice Bellard 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "cpu.h" 22 #include "qemu/log.h" 23 #include "helper.h" 24 25 #if !defined(CONFIG_USER_ONLY) 26 #include "exec/softmmu_exec.h" 27 #endif /* !defined(CONFIG_USER_ONLY) */ 28 29 //#define DEBUG_PCALL 30 31 32 #ifdef DEBUG_PCALL 33 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) 34 # define LOG_PCALL_STATE(env) \ 35 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP) 36 #else 37 # define LOG_PCALL(...) do { } while (0) 38 # define LOG_PCALL_STATE(env) do { } while (0) 39 #endif 40 41 /* return non zero if error */ 42 static inline int load_segment(CPUX86State *env, 43 uint32_t *e1_ptr, uint32_t *e2_ptr, 44 int selector) 45 { 46 SegmentCache *dt; 47 int index; 48 target_ulong ptr; 49 50 if (selector & 0x4) 51 dt = &env->ldt; 52 else 53 dt = &env->gdt; 54 index = selector & ~7; 55 if ((index + 7) > dt->limit) 56 return -1; 57 ptr = dt->base + index; 58 *e1_ptr = cpu_ldl_kernel(env, ptr); 59 *e2_ptr = cpu_ldl_kernel(env, ptr + 4); 60 return 0; 61 } 62 63 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) 64 { 65 unsigned int limit; 66 limit = (e1 & 0xffff) | (e2 & 0x000f0000); 67 if (e2 & DESC_G_MASK) 68 limit = (limit << 12) | 0xfff; 69 return limit; 70 } 71 72 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) 73 { 74 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); 75 } 76 77 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) 78 { 79 sc->base = get_seg_base(e1, e2); 80 sc->limit = get_seg_limit(e1, e2); 81 sc->flags = e2; 82 } 83 84 /* init the segment cache in vm86 mode. */ 85 static inline void load_seg_vm(CPUX86State *env, int seg, int selector) 86 { 87 selector &= 0xffff; 88 cpu_x86_load_seg_cache(env, seg, selector, 89 (selector << 4), 0xffff, 0); 90 } 91 92 static inline void get_ss_esp_from_tss(CPUX86State *env, 93 uint32_t *ss_ptr, 94 uint32_t *esp_ptr, int dpl) 95 { 96 int type, index, shift; 97 98 #if 0 99 { 100 int i; 101 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); 102 for(i=0;i<env->tr.limit;i++) { 103 printf("%02x ", env->tr.base[i]); 104 if ((i & 7) == 7) printf("\n"); 105 } 106 printf("\n"); 107 } 108 #endif 109 110 if (!(env->tr.flags & DESC_P_MASK)) 111 cpu_abort(env, "invalid tss"); 112 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 113 if ((type & 7) != 1) 114 cpu_abort(env, "invalid tss type"); 115 shift = type >> 3; 116 index = (dpl * 4 + 2) << shift; 117 if (index + (4 << shift) - 1 > env->tr.limit) 118 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 119 if (shift == 0) { 120 *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index); 121 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2); 122 } else { 123 *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index); 124 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4); 125 } 126 } 127 128 /* XXX: merge with load_seg() */ 129 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector) 130 { 131 uint32_t e1, e2; 132 int rpl, dpl, cpl; 133 134 if ((selector & 0xfffc) != 0) { 135 if (load_segment(env, &e1, &e2, selector) != 0) 136 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 137 if (!(e2 & DESC_S_MASK)) 138 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 139 rpl = selector & 3; 140 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 141 cpl = env->hflags & HF_CPL_MASK; 142 if (seg_reg == R_CS) { 143 if (!(e2 & DESC_CS_MASK)) 144 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 145 /* XXX: is it correct ? */ 146 if (dpl != rpl) 147 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 148 if ((e2 & DESC_C_MASK) && dpl > rpl) 149 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 150 } else if (seg_reg == R_SS) { 151 /* SS must be writable data */ 152 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) 153 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 154 if (dpl != cpl || dpl != rpl) 155 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 156 } else { 157 /* not readable code */ 158 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) 159 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 160 /* if data or non conforming code, checks the rights */ 161 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { 162 if (dpl < cpl || dpl < rpl) 163 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 164 } 165 } 166 if (!(e2 & DESC_P_MASK)) 167 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 168 cpu_x86_load_seg_cache(env, seg_reg, selector, 169 get_seg_base(e1, e2), 170 get_seg_limit(e1, e2), 171 e2); 172 } else { 173 if (seg_reg == R_SS || seg_reg == R_CS) 174 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); 175 } 176 } 177 178 #define SWITCH_TSS_JMP 0 179 #define SWITCH_TSS_IRET 1 180 #define SWITCH_TSS_CALL 2 181 182 /* XXX: restore CPU state in registers (PowerPC case) */ 183 static void switch_tss(CPUX86State *env, 184 int tss_selector, 185 uint32_t e1, uint32_t e2, int source, 186 uint32_t next_eip) 187 { 188 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; 189 target_ulong tss_base; 190 uint32_t new_regs[8], new_segs[6]; 191 uint32_t new_eflags, new_eip, new_cr3, new_ldt; 192 uint32_t old_eflags, eflags_mask; 193 SegmentCache *dt; 194 int index; 195 target_ulong ptr; 196 197 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 198 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source); 199 200 /* if task gate, we read the TSS segment and we load it */ 201 if (type == 5) { 202 if (!(e2 & DESC_P_MASK)) 203 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc); 204 tss_selector = e1 >> 16; 205 if (tss_selector & 4) 206 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); 207 if (load_segment(env, &e1, &e2, tss_selector) != 0) 208 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); 209 if (e2 & DESC_S_MASK) 210 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); 211 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 212 if ((type & 7) != 1) 213 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); 214 } 215 216 if (!(e2 & DESC_P_MASK)) 217 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc); 218 219 if (type & 8) 220 tss_limit_max = 103; 221 else 222 tss_limit_max = 43; 223 tss_limit = get_seg_limit(e1, e2); 224 tss_base = get_seg_base(e1, e2); 225 if ((tss_selector & 4) != 0 || 226 tss_limit < tss_limit_max) 227 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); 228 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 229 if (old_type & 8) 230 old_tss_limit_max = 103; 231 else 232 old_tss_limit_max = 43; 233 234 /* read all the registers from the new TSS */ 235 if (type & 8) { 236 /* 32 bit */ 237 new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c); 238 new_eip = cpu_ldl_kernel(env, tss_base + 0x20); 239 new_eflags = cpu_ldl_kernel(env, tss_base + 0x24); 240 for(i = 0; i < 8; i++) 241 new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4)); 242 for(i = 0; i < 6; i++) 243 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4)); 244 new_ldt = cpu_lduw_kernel(env, tss_base + 0x60); 245 cpu_ldl_kernel(env, tss_base + 0x64); 246 } else { 247 /* 16 bit */ 248 new_cr3 = 0; 249 new_eip = cpu_lduw_kernel(env, tss_base + 0x0e); 250 new_eflags = cpu_lduw_kernel(env, tss_base + 0x10); 251 for(i = 0; i < 8; i++) 252 new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) | 0xffff0000; 253 for(i = 0; i < 4; i++) 254 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4)); 255 new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a); 256 new_segs[R_FS] = 0; 257 new_segs[R_GS] = 0; 258 } 259 260 /* NOTE: we must avoid memory exceptions during the task switch, 261 so we make dummy accesses before */ 262 /* XXX: it can still fail in some cases, so a bigger hack is 263 necessary to valid the TLB after having done the accesses */ 264 265 v1 = cpu_ldub_kernel(env, env->tr.base); 266 v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max); 267 cpu_stb_kernel(env, env->tr.base, v1); 268 cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2); 269 270 /* clear busy bit (it is restartable) */ 271 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { 272 target_ulong ptr; 273 uint32_t e2; 274 ptr = env->gdt.base + (env->tr.selector & ~7); 275 e2 = cpu_ldl_kernel(env, ptr + 4); 276 e2 &= ~DESC_TSS_BUSY_MASK; 277 cpu_stl_kernel(env, ptr + 4, e2); 278 } 279 old_eflags = cpu_compute_eflags(env); 280 if (source == SWITCH_TSS_IRET) 281 old_eflags &= ~NT_MASK; 282 283 /* save the current state in the old TSS */ 284 if (type & 8) { 285 /* 32 bit */ 286 cpu_stl_kernel(env, env->tr.base + 0x20, next_eip); 287 cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags); 288 cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), EAX); 289 cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), ECX); 290 cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), EDX); 291 cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), EBX); 292 cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), ESP); 293 cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), EBP); 294 cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), ESI); 295 cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), EDI); 296 for(i = 0; i < 6; i++) 297 cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4), env->segs[i].selector); 298 } else { 299 /* 16 bit */ 300 cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip); 301 cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags); 302 cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), EAX); 303 cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), ECX); 304 cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), EDX); 305 cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), EBX); 306 cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), ESP); 307 cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), EBP); 308 cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), ESI); 309 cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), EDI); 310 for(i = 0; i < 4; i++) 311 cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4), env->segs[i].selector); 312 } 313 314 /* now if an exception occurs, it will occurs in the next task 315 context */ 316 317 if (source == SWITCH_TSS_CALL) { 318 cpu_stw_kernel(env, tss_base, env->tr.selector); 319 new_eflags |= NT_MASK; 320 } 321 322 /* set busy bit */ 323 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { 324 target_ulong ptr; 325 uint32_t e2; 326 ptr = env->gdt.base + (tss_selector & ~7); 327 e2 = cpu_ldl_kernel(env, ptr + 4); 328 e2 |= DESC_TSS_BUSY_MASK; 329 cpu_stl_kernel(env, ptr + 4, e2); 330 } 331 332 /* set the new CPU state */ 333 /* from this point, any exception which occurs can give problems */ 334 env->cr[0] |= CR0_TS_MASK; 335 env->hflags |= HF_TS_MASK; 336 env->tr.selector = tss_selector; 337 env->tr.base = tss_base; 338 env->tr.limit = tss_limit; 339 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; 340 341 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { 342 cpu_x86_update_cr3(env, new_cr3); 343 } 344 345 /* load all registers without an exception, then reload them with 346 possible exception */ 347 env->eip = new_eip; 348 eflags_mask = TF_MASK | AC_MASK | ID_MASK | 349 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; 350 if (!(type & 8)) 351 eflags_mask &= 0xffff; 352 cpu_load_eflags(env, new_eflags, eflags_mask); 353 /* XXX: what to do in 16 bit case ? */ 354 EAX = new_regs[0]; 355 ECX = new_regs[1]; 356 EDX = new_regs[2]; 357 EBX = new_regs[3]; 358 ESP = new_regs[4]; 359 EBP = new_regs[5]; 360 ESI = new_regs[6]; 361 EDI = new_regs[7]; 362 if (new_eflags & VM_MASK) { 363 for(i = 0; i < 6; i++) 364 load_seg_vm(env, i, new_segs[i]); 365 /* in vm86, CPL is always 3 */ 366 cpu_x86_set_cpl(env, 3); 367 } else { 368 /* CPL is set the RPL of CS */ 369 cpu_x86_set_cpl(env, new_segs[R_CS] & 3); 370 /* first just selectors as the rest may trigger exceptions */ 371 for(i = 0; i < 6; i++) 372 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); 373 } 374 375 env->ldt.selector = new_ldt & ~4; 376 env->ldt.base = 0; 377 env->ldt.limit = 0; 378 env->ldt.flags = 0; 379 380 /* load the LDT */ 381 if (new_ldt & 4) 382 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); 383 384 if ((new_ldt & 0xfffc) != 0) { 385 dt = &env->gdt; 386 index = new_ldt & ~7; 387 if ((index + 7) > dt->limit) 388 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); 389 ptr = dt->base + index; 390 e1 = cpu_ldl_kernel(env, ptr); 391 e2 = cpu_ldl_kernel(env, ptr + 4); 392 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) 393 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); 394 if (!(e2 & DESC_P_MASK)) 395 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); 396 load_seg_cache_raw_dt(&env->ldt, e1, e2); 397 } 398 399 /* load the segments */ 400 if (!(new_eflags & VM_MASK)) { 401 tss_load_seg(env, R_CS, new_segs[R_CS]); 402 tss_load_seg(env, R_SS, new_segs[R_SS]); 403 tss_load_seg(env, R_ES, new_segs[R_ES]); 404 tss_load_seg(env, R_DS, new_segs[R_DS]); 405 tss_load_seg(env, R_FS, new_segs[R_FS]); 406 tss_load_seg(env, R_GS, new_segs[R_GS]); 407 } 408 409 /* check that EIP is in the CS segment limits */ 410 if (new_eip > env->segs[R_CS].limit) { 411 /* XXX: different exception if CALL ? */ 412 raise_exception_err(env, EXCP0D_GPF, 0); 413 } 414 415 #ifndef CONFIG_USER_ONLY 416 /* reset local breakpoints */ 417 if (env->dr[7] & 0x55) { 418 for (i = 0; i < 4; i++) { 419 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) 420 hw_breakpoint_remove(env, i); 421 } 422 env->dr[7] &= ~0x55; 423 } 424 #endif 425 } 426 427 static inline unsigned int get_sp_mask(unsigned int e2) 428 { 429 if (e2 & DESC_B_MASK) 430 return 0xffffffff; 431 else 432 return 0xffff; 433 } 434 435 static int exeption_has_error_code(int intno) 436 { 437 switch(intno) { 438 case 8: 439 case 10: 440 case 11: 441 case 12: 442 case 13: 443 case 14: 444 case 17: 445 return 1; 446 } 447 return 0; 448 } 449 450 #ifdef TARGET_X86_64 451 #define SET_ESP(val, sp_mask)\ 452 do {\ 453 if ((sp_mask) == 0xffff)\ 454 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\ 455 else if ((sp_mask) == 0xffffffffLL)\ 456 ESP = (uint32_t)(val);\ 457 else\ 458 ESP = (val);\ 459 } while (0) 460 #else 461 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)) 462 #endif 463 464 /* in 64-bit machines, this can overflow. So this segment addition macro 465 * can be used to trim the value to 32-bit whenever needed */ 466 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) 467 468 /* XXX: add a is_user flag to have proper security support */ 469 #define PUSHW(ssp, sp, sp_mask, val)\ 470 {\ 471 sp -= 2;\ 472 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val));\ 473 } 474 475 #define PUSHL(ssp, sp, sp_mask, val)\ 476 {\ 477 sp -= 4;\ 478 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\ 479 } 480 481 #define POPW(ssp, sp, sp_mask, val)\ 482 {\ 483 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask)));\ 484 sp += 2;\ 485 } 486 487 #define POPL(ssp, sp, sp_mask, val)\ 488 {\ 489 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask));\ 490 sp += 4;\ 491 } 492 493 /* protected mode interrupt */ 494 static void do_interrupt_protected(CPUX86State *env, 495 int intno, int is_int, int error_code, 496 unsigned int next_eip, int is_hw) 497 { 498 SegmentCache *dt; 499 target_ulong ptr, ssp; 500 int type, dpl, selector, ss_dpl, cpl; 501 int has_error_code, new_stack, shift; 502 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; 503 uint32_t old_eip, sp_mask; 504 505 has_error_code = 0; 506 if (!is_int && !is_hw) 507 has_error_code = exeption_has_error_code(intno); 508 if (is_int) 509 old_eip = next_eip; 510 else 511 old_eip = env->eip; 512 513 dt = &env->idt; 514 if (intno * 8 + 7 > dt->limit) 515 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 516 ptr = dt->base + intno * 8; 517 e1 = cpu_ldl_kernel(env, ptr); 518 e2 = cpu_ldl_kernel(env, ptr + 4); 519 /* check gate type */ 520 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 521 switch(type) { 522 case 5: /* task gate */ 523 /* must do that check here to return the correct error code */ 524 if (!(e2 & DESC_P_MASK)) 525 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 526 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); 527 if (has_error_code) { 528 int type; 529 uint32_t mask; 530 /* push the error code */ 531 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; 532 shift = type >> 3; 533 if (env->segs[R_SS].flags & DESC_B_MASK) 534 mask = 0xffffffff; 535 else 536 mask = 0xffff; 537 esp = (ESP - (2 << shift)) & mask; 538 ssp = env->segs[R_SS].base + esp; 539 if (shift) 540 cpu_stl_kernel(env, ssp, error_code); 541 else 542 cpu_stw_kernel(env, ssp, error_code); 543 SET_ESP(esp, mask); 544 } 545 return; 546 case 6: /* 286 interrupt gate */ 547 case 7: /* 286 trap gate */ 548 case 14: /* 386 interrupt gate */ 549 case 15: /* 386 trap gate */ 550 break; 551 default: 552 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 553 break; 554 } 555 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 556 cpl = env->hflags & HF_CPL_MASK; 557 /* check privilege if software int */ 558 if (is_int && dpl < cpl) 559 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 560 /* check valid bit */ 561 if (!(e2 & DESC_P_MASK)) 562 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); 563 selector = e1 >> 16; 564 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 565 if ((selector & 0xfffc) == 0) 566 raise_exception_err(env, EXCP0D_GPF, 0); 567 568 if (load_segment(env, &e1, &e2, selector) != 0) 569 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 570 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) 571 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 572 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 573 if (dpl > cpl) 574 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 575 if (!(e2 & DESC_P_MASK)) 576 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 577 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 578 /* to inner privilege */ 579 get_ss_esp_from_tss(env, &ss, &esp, dpl); 580 if ((ss & 0xfffc) == 0) 581 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 582 if ((ss & 3) != dpl) 583 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 584 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) 585 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 586 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 587 if (ss_dpl != dpl) 588 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 589 if (!(ss_e2 & DESC_S_MASK) || 590 (ss_e2 & DESC_CS_MASK) || 591 !(ss_e2 & DESC_W_MASK)) 592 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 593 if (!(ss_e2 & DESC_P_MASK)) 594 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 595 new_stack = 1; 596 sp_mask = get_sp_mask(ss_e2); 597 ssp = get_seg_base(ss_e1, ss_e2); 598 } else if ((e2 & DESC_C_MASK) || dpl == cpl) { 599 /* to same privilege */ 600 if (env->eflags & VM_MASK) 601 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 602 new_stack = 0; 603 sp_mask = get_sp_mask(env->segs[R_SS].flags); 604 ssp = env->segs[R_SS].base; 605 esp = ESP; 606 dpl = cpl; 607 } else { 608 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 609 new_stack = 0; /* avoid warning */ 610 sp_mask = 0; /* avoid warning */ 611 ssp = 0; /* avoid warning */ 612 esp = 0; /* avoid warning */ 613 } 614 615 shift = type >> 3; 616 617 #if 0 618 /* XXX: check that enough room is available */ 619 push_size = 6 + (new_stack << 2) + (has_error_code << 1); 620 if (env->eflags & VM_MASK) 621 push_size += 8; 622 push_size <<= shift; 623 #endif 624 if (shift == 1) { 625 if (new_stack) { 626 if (env->eflags & VM_MASK) { 627 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); 628 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); 629 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); 630 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); 631 } 632 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); 633 PUSHL(ssp, esp, sp_mask, ESP); 634 } 635 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env)); 636 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); 637 PUSHL(ssp, esp, sp_mask, old_eip); 638 if (has_error_code) { 639 PUSHL(ssp, esp, sp_mask, error_code); 640 } 641 } else { 642 if (new_stack) { 643 if (env->eflags & VM_MASK) { 644 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); 645 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); 646 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); 647 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); 648 } 649 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); 650 PUSHW(ssp, esp, sp_mask, ESP); 651 } 652 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env)); 653 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); 654 PUSHW(ssp, esp, sp_mask, old_eip); 655 if (has_error_code) { 656 PUSHW(ssp, esp, sp_mask, error_code); 657 } 658 } 659 660 if (new_stack) { 661 if (env->eflags & VM_MASK) { 662 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); 663 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); 664 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); 665 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); 666 } 667 ss = (ss & ~3) | dpl; 668 cpu_x86_load_seg_cache(env, R_SS, ss, 669 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); 670 } 671 SET_ESP(esp, sp_mask); 672 673 selector = (selector & ~3) | dpl; 674 cpu_x86_load_seg_cache(env, R_CS, selector, 675 get_seg_base(e1, e2), 676 get_seg_limit(e1, e2), 677 e2); 678 cpu_x86_set_cpl(env, dpl); 679 env->eip = offset; 680 681 /* interrupt gate clear IF mask */ 682 if ((type & 1) == 0) { 683 env->eflags &= ~IF_MASK; 684 } 685 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 686 } 687 688 #ifdef TARGET_X86_64 689 690 #define PUSHQ(sp, val)\ 691 {\ 692 sp -= 8;\ 693 cpu_stq_kernel(env, sp, (val));\ 694 } 695 696 #define POPQ(sp, val)\ 697 {\ 698 val = cpu_ldq_kernel(env, sp);\ 699 sp += 8;\ 700 } 701 702 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 703 { 704 int index; 705 706 #if 0 707 printf("TR: base=" TARGET_FMT_lx " limit=%x\n", 708 env->tr.base, env->tr.limit); 709 #endif 710 711 if (!(env->tr.flags & DESC_P_MASK)) 712 cpu_abort(env, "invalid tss"); 713 index = 8 * level + 4; 714 if ((index + 7) > env->tr.limit) 715 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); 716 return cpu_ldq_kernel(env, env->tr.base + index); 717 } 718 719 /* 64 bit interrupt */ 720 static void do_interrupt64(CPUX86State *env, 721 int intno, int is_int, int error_code, 722 target_ulong next_eip, int is_hw) 723 { 724 SegmentCache *dt; 725 target_ulong ptr; 726 int type, dpl, selector, cpl, ist; 727 int has_error_code, new_stack; 728 uint32_t e1, e2, e3, ss; 729 target_ulong old_eip, esp, offset; 730 731 has_error_code = 0; 732 if (!is_int && !is_hw) 733 has_error_code = exeption_has_error_code(intno); 734 if (is_int) 735 old_eip = next_eip; 736 else 737 old_eip = env->eip; 738 739 dt = &env->idt; 740 if (intno * 16 + 15 > dt->limit) 741 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); 742 ptr = dt->base + intno * 16; 743 e1 = cpu_ldl_kernel(env, ptr); 744 e2 = cpu_ldl_kernel(env, ptr + 4); 745 e3 = cpu_ldl_kernel(env, ptr + 8); 746 /* check gate type */ 747 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 748 switch(type) { 749 case 14: /* 386 interrupt gate */ 750 case 15: /* 386 trap gate */ 751 break; 752 default: 753 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); 754 break; 755 } 756 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 757 cpl = env->hflags & HF_CPL_MASK; 758 /* check privilege if software int */ 759 if (is_int && dpl < cpl) 760 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); 761 /* check valid bit */ 762 if (!(e2 & DESC_P_MASK)) 763 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2); 764 selector = e1 >> 16; 765 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); 766 ist = e2 & 7; 767 if ((selector & 0xfffc) == 0) 768 raise_exception_err(env, EXCP0D_GPF, 0); 769 770 if (load_segment(env, &e1, &e2, selector) != 0) 771 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 772 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) 773 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 774 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 775 if (dpl > cpl) 776 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 777 if (!(e2 & DESC_P_MASK)) 778 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 779 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) 780 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 781 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { 782 /* to inner privilege */ 783 if (ist != 0) 784 esp = get_rsp_from_tss(env, ist + 3); 785 else 786 esp = get_rsp_from_tss(env, dpl); 787 esp &= ~0xfLL; /* align stack */ 788 ss = 0; 789 new_stack = 1; 790 } else if ((e2 & DESC_C_MASK) || dpl == cpl) { 791 /* to same privilege */ 792 if (env->eflags & VM_MASK) 793 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 794 new_stack = 0; 795 if (ist != 0) 796 esp = get_rsp_from_tss(env, ist + 3); 797 else 798 esp = ESP; 799 esp &= ~0xfLL; /* align stack */ 800 dpl = cpl; 801 } else { 802 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 803 new_stack = 0; /* avoid warning */ 804 esp = 0; /* avoid warning */ 805 } 806 807 PUSHQ(esp, env->segs[R_SS].selector); 808 PUSHQ(esp, ESP); 809 PUSHQ(esp, cpu_compute_eflags(env)); 810 PUSHQ(esp, env->segs[R_CS].selector); 811 PUSHQ(esp, old_eip); 812 if (has_error_code) { 813 PUSHQ(esp, error_code); 814 } 815 816 if (new_stack) { 817 ss = 0 | dpl; 818 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); 819 } 820 ESP = esp; 821 822 selector = (selector & ~3) | dpl; 823 cpu_x86_load_seg_cache(env, R_CS, selector, 824 get_seg_base(e1, e2), 825 get_seg_limit(e1, e2), 826 e2); 827 cpu_x86_set_cpl(env, dpl); 828 env->eip = offset; 829 830 /* interrupt gate clear IF mask */ 831 if ((type & 1) == 0) { 832 env->eflags &= ~IF_MASK; 833 } 834 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 835 } 836 #endif 837 838 #ifdef TARGET_X86_64 839 #if defined(CONFIG_USER_ONLY) 840 void helper_syscall(CPUX86State *env, int next_eip_addend) 841 { 842 env->exception_index = EXCP_SYSCALL; 843 env->exception_next_eip = env->eip + next_eip_addend; 844 cpu_loop_exit(env); 845 } 846 #else 847 void helper_syscall(CPUX86State *env, int next_eip_addend) 848 { 849 int selector; 850 851 if (!(env->efer & MSR_EFER_SCE)) { 852 raise_exception_err(env, EXCP06_ILLOP, 0); 853 } 854 selector = (env->star >> 32) & 0xffff; 855 if (env->hflags & HF_LMA_MASK) { 856 int code64; 857 858 ECX = env->eip + next_eip_addend; 859 env->regs[11] = cpu_compute_eflags(env); 860 861 code64 = env->hflags & HF_CS64_MASK; 862 863 cpu_x86_set_cpl(env, 0); 864 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 865 0, 0xffffffff, 866 DESC_G_MASK | DESC_P_MASK | 867 DESC_S_MASK | 868 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); 869 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 870 0, 0xffffffff, 871 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 872 DESC_S_MASK | 873 DESC_W_MASK | DESC_A_MASK); 874 env->eflags &= ~env->fmask; 875 cpu_load_eflags(env, env->eflags, 0); 876 if (code64) 877 env->eip = env->lstar; 878 else 879 env->eip = env->cstar; 880 } else { 881 ECX = (uint32_t)(env->eip + next_eip_addend); 882 883 cpu_x86_set_cpl(env, 0); 884 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 885 0, 0xffffffff, 886 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 887 DESC_S_MASK | 888 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 889 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 890 0, 0xffffffff, 891 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 892 DESC_S_MASK | 893 DESC_W_MASK | DESC_A_MASK); 894 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); 895 env->eip = (uint32_t)env->star; 896 } 897 } 898 #endif 899 #endif 900 901 #ifdef TARGET_X86_64 902 void helper_sysret(CPUX86State *env, int dflag) 903 { 904 int cpl, selector; 905 906 if (!(env->efer & MSR_EFER_SCE)) { 907 raise_exception_err(env, EXCP06_ILLOP, 0); 908 } 909 cpl = env->hflags & HF_CPL_MASK; 910 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { 911 raise_exception_err(env, EXCP0D_GPF, 0); 912 } 913 selector = (env->star >> 48) & 0xffff; 914 if (env->hflags & HF_LMA_MASK) { 915 if (dflag == 2) { 916 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 917 0, 0xffffffff, 918 DESC_G_MASK | DESC_P_MASK | 919 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 920 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 921 DESC_L_MASK); 922 env->eip = ECX; 923 } else { 924 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 925 0, 0xffffffff, 926 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 927 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 928 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 929 env->eip = (uint32_t)ECX; 930 } 931 cpu_x86_load_seg_cache(env, R_SS, selector + 8, 932 0, 0xffffffff, 933 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 934 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 935 DESC_W_MASK | DESC_A_MASK); 936 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | 937 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); 938 cpu_x86_set_cpl(env, 3); 939 } else { 940 cpu_x86_load_seg_cache(env, R_CS, selector | 3, 941 0, 0xffffffff, 942 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 943 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 944 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 945 env->eip = (uint32_t)ECX; 946 cpu_x86_load_seg_cache(env, R_SS, selector + 8, 947 0, 0xffffffff, 948 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 949 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 950 DESC_W_MASK | DESC_A_MASK); 951 env->eflags |= IF_MASK; 952 cpu_x86_set_cpl(env, 3); 953 } 954 } 955 #endif 956 957 /* real mode interrupt */ 958 static void do_interrupt_real(CPUX86State *env, 959 int intno, int is_int, int error_code, 960 unsigned int next_eip) 961 { 962 SegmentCache *dt; 963 target_ulong ptr, ssp; 964 int selector; 965 uint32_t offset, esp; 966 uint32_t old_cs, old_eip; 967 968 /* real mode (simpler !) */ 969 dt = &env->idt; 970 if (intno * 4 + 3 > dt->limit) 971 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); 972 ptr = dt->base + intno * 4; 973 offset = cpu_lduw_kernel(env, ptr); 974 selector = cpu_lduw_kernel(env, ptr + 2); 975 esp = ESP; 976 ssp = env->segs[R_SS].base; 977 if (is_int) 978 old_eip = next_eip; 979 else 980 old_eip = env->eip; 981 old_cs = env->segs[R_CS].selector; 982 /* XXX: use SS segment size ? */ 983 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); 984 PUSHW(ssp, esp, 0xffff, old_cs); 985 PUSHW(ssp, esp, 0xffff, old_eip); 986 987 /* update processor state */ 988 ESP = (ESP & ~0xffff) | (esp & 0xffff); 989 env->eip = offset; 990 env->segs[R_CS].selector = selector; 991 env->segs[R_CS].base = (selector << 4); 992 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); 993 } 994 995 #if defined(CONFIG_USER_ONLY) 996 /* fake user mode interrupt */ 997 static void do_interrupt_user(CPUX86State *env, 998 int intno, int is_int, int error_code, 999 target_ulong next_eip) 1000 { 1001 SegmentCache *dt; 1002 target_ulong ptr; 1003 int dpl, cpl, shift; 1004 uint32_t e2; 1005 1006 dt = &env->idt; 1007 if (env->hflags & HF_LMA_MASK) { 1008 shift = 4; 1009 } else { 1010 shift = 3; 1011 } 1012 ptr = dt->base + (intno << shift); 1013 e2 = cpu_ldl_kernel(env, ptr + 4); 1014 1015 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1016 cpl = env->hflags & HF_CPL_MASK; 1017 /* check privilege if software int */ 1018 if (is_int && dpl < cpl) 1019 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2); 1020 1021 /* Since we emulate only user space, we cannot do more than 1022 exiting the emulation with the suitable exception and error 1023 code */ 1024 if (is_int) 1025 EIP = next_eip; 1026 } 1027 1028 #else 1029 1030 static void handle_even_inj(CPUX86State *env, 1031 int intno, int is_int, int error_code, 1032 int is_hw, int rm) 1033 { 1034 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); 1035 if (!(event_inj & SVM_EVTINJ_VALID)) { 1036 int type; 1037 if (is_int) 1038 type = SVM_EVTINJ_TYPE_SOFT; 1039 else 1040 type = SVM_EVTINJ_TYPE_EXEPT; 1041 event_inj = intno | type | SVM_EVTINJ_VALID; 1042 if (!rm && exeption_has_error_code(intno)) { 1043 event_inj |= SVM_EVTINJ_VALID_ERR; 1044 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code); 1045 } 1046 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj); 1047 } 1048 } 1049 #endif 1050 1051 /* 1052 * Begin execution of an interruption. is_int is TRUE if coming from 1053 * the int instruction. next_eip is the EIP value AFTER the interrupt 1054 * instruction. It is only relevant if is_int is TRUE. 1055 */ 1056 static void do_interrupt_all(CPUX86State *env, 1057 int intno, int is_int, int error_code, 1058 target_ulong next_eip, int is_hw) 1059 { 1060 if (qemu_loglevel_mask(CPU_LOG_INT)) { 1061 if ((env->cr[0] & CR0_PE_MASK)) { 1062 static int count; 1063 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, 1064 count, intno, error_code, is_int, 1065 env->hflags & HF_CPL_MASK, 1066 env->segs[R_CS].selector, EIP, 1067 (int)env->segs[R_CS].base + EIP, 1068 env->segs[R_SS].selector, ESP); 1069 if (intno == 0x0e) { 1070 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); 1071 } else { 1072 qemu_log(" EAX=" TARGET_FMT_lx, EAX); 1073 } 1074 qemu_log("\n"); 1075 log_cpu_state(ENV_GET_CPU(env), X86_DUMP_CCOP); 1076 #if 0 1077 { 1078 int i; 1079 uint8_t *ptr; 1080 qemu_log(" code="); 1081 ptr = env->segs[R_CS].base + env->eip; 1082 for(i = 0; i < 16; i++) { 1083 qemu_log(" %02x", ldub(ptr + i)); 1084 } 1085 qemu_log("\n"); 1086 } 1087 #endif 1088 count++; 1089 } 1090 } 1091 if (env->cr[0] & CR0_PE_MASK) { 1092 #if !defined(CONFIG_USER_ONLY) 1093 if (env->hflags & HF_SVMI_MASK) 1094 handle_even_inj(env, intno, is_int, error_code, is_hw, 0); 1095 #endif 1096 #ifdef TARGET_X86_64 1097 if (env->hflags & HF_LMA_MASK) { 1098 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); 1099 } else 1100 #endif 1101 { 1102 do_interrupt_protected(env, intno, is_int, error_code, next_eip, is_hw); 1103 } 1104 } else { 1105 #if !defined(CONFIG_USER_ONLY) 1106 if (env->hflags & HF_SVMI_MASK) 1107 handle_even_inj(env, intno, is_int, error_code, is_hw, 1); 1108 #endif 1109 do_interrupt_real(env, intno, is_int, error_code, next_eip); 1110 } 1111 1112 #if !defined(CONFIG_USER_ONLY) 1113 if (env->hflags & HF_SVMI_MASK) { 1114 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)); 1115 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID); 1116 } 1117 #endif 1118 } 1119 1120 void do_interrupt(CPUX86State *env) 1121 { 1122 #if defined(CONFIG_USER_ONLY) 1123 /* if user mode only, we simulate a fake exception 1124 which will be handled outside the cpu execution 1125 loop */ 1126 do_interrupt_user(env, 1127 env->exception_index, 1128 env->exception_is_int, 1129 env->error_code, 1130 env->exception_next_eip); 1131 /* successfully delivered */ 1132 env->old_exception = -1; 1133 #else 1134 /* simulate a real cpu exception. On i386, it can 1135 trigger new exceptions, but we do not handle 1136 double or triple faults yet. */ 1137 do_interrupt_all(env, 1138 env->exception_index, 1139 env->exception_is_int, 1140 env->error_code, 1141 env->exception_next_eip, 0); 1142 /* successfully delivered */ 1143 env->old_exception = -1; 1144 #endif 1145 } 1146 1147 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1148 { 1149 do_interrupt_all(env, intno, 0, 0, 0, is_hw); 1150 } 1151 1152 /* This should come from sysemu.h - if we could include it here... */ 1153 void qemu_system_reset_request(void); 1154 1155 void helper_enter_level(CPUX86State *env, 1156 int level, int data32, target_ulong t1) 1157 { 1158 target_ulong ssp; 1159 uint32_t esp_mask, esp, ebp; 1160 1161 esp_mask = get_sp_mask(env->segs[R_SS].flags); 1162 ssp = env->segs[R_SS].base; 1163 ebp = EBP; 1164 esp = ESP; 1165 if (data32) { 1166 /* 32 bit */ 1167 esp -= 4; 1168 while (--level) { 1169 esp -= 4; 1170 ebp -= 4; 1171 cpu_stl_data(env, ssp + (esp & esp_mask), 1172 cpu_ldl_data(env, ssp + (ebp & esp_mask))); 1173 } 1174 esp -= 4; 1175 cpu_stl_data(env, ssp + (esp & esp_mask), t1); 1176 } else { 1177 /* 16 bit */ 1178 esp -= 2; 1179 while (--level) { 1180 esp -= 2; 1181 ebp -= 2; 1182 cpu_stw_data(env, ssp + (esp & esp_mask), 1183 cpu_lduw_data(env, ssp + (ebp & esp_mask))); 1184 } 1185 esp -= 2; 1186 cpu_stw_data(env, ssp + (esp & esp_mask), t1); 1187 } 1188 } 1189 1190 #ifdef TARGET_X86_64 1191 void helper_enter64_level(CPUX86State *env, 1192 int level, int data64, target_ulong t1) 1193 { 1194 target_ulong esp, ebp; 1195 ebp = EBP; 1196 esp = ESP; 1197 1198 if (data64) { 1199 /* 64 bit */ 1200 esp -= 8; 1201 while (--level) { 1202 esp -= 8; 1203 ebp -= 8; 1204 cpu_stq_data(env, esp, cpu_ldq_data(env, ebp)); 1205 } 1206 esp -= 8; 1207 cpu_stq_data(env, esp, t1); 1208 } else { 1209 /* 16 bit */ 1210 esp -= 2; 1211 while (--level) { 1212 esp -= 2; 1213 ebp -= 2; 1214 cpu_stw_data(env, esp, cpu_lduw_data(env, ebp)); 1215 } 1216 esp -= 2; 1217 cpu_stw_data(env, esp, t1); 1218 } 1219 } 1220 #endif 1221 1222 void helper_lldt(CPUX86State *env, int selector) 1223 { 1224 SegmentCache *dt; 1225 uint32_t e1, e2; 1226 int index, entry_limit; 1227 target_ulong ptr; 1228 1229 selector &= 0xffff; 1230 if ((selector & 0xfffc) == 0) { 1231 /* XXX: NULL selector case: invalid LDT */ 1232 env->ldt.base = 0; 1233 env->ldt.limit = 0; 1234 } else { 1235 if (selector & 0x4) 1236 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1237 dt = &env->gdt; 1238 index = selector & ~7; 1239 #ifdef TARGET_X86_64 1240 if (env->hflags & HF_LMA_MASK) 1241 entry_limit = 15; 1242 else 1243 #endif 1244 entry_limit = 7; 1245 if ((index + entry_limit) > dt->limit) 1246 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1247 ptr = dt->base + index; 1248 e1 = cpu_ldl_kernel(env, ptr); 1249 e2 = cpu_ldl_kernel(env, ptr + 4); 1250 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) 1251 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1252 if (!(e2 & DESC_P_MASK)) 1253 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 1254 #ifdef TARGET_X86_64 1255 if (env->hflags & HF_LMA_MASK) { 1256 uint32_t e3; 1257 e3 = cpu_ldl_kernel(env, ptr + 8); 1258 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1259 env->ldt.base |= (target_ulong)e3 << 32; 1260 } else 1261 #endif 1262 { 1263 load_seg_cache_raw_dt(&env->ldt, e1, e2); 1264 } 1265 } 1266 env->ldt.selector = selector; 1267 } 1268 1269 void helper_ltr(CPUX86State *env, int selector) 1270 { 1271 SegmentCache *dt; 1272 uint32_t e1, e2; 1273 int index, type, entry_limit; 1274 target_ulong ptr; 1275 1276 selector &= 0xffff; 1277 if ((selector & 0xfffc) == 0) { 1278 /* NULL selector case: invalid TR */ 1279 env->tr.base = 0; 1280 env->tr.limit = 0; 1281 env->tr.flags = 0; 1282 } else { 1283 if (selector & 0x4) 1284 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1285 dt = &env->gdt; 1286 index = selector & ~7; 1287 #ifdef TARGET_X86_64 1288 if (env->hflags & HF_LMA_MASK) 1289 entry_limit = 15; 1290 else 1291 #endif 1292 entry_limit = 7; 1293 if ((index + entry_limit) > dt->limit) 1294 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1295 ptr = dt->base + index; 1296 e1 = cpu_ldl_kernel(env, ptr); 1297 e2 = cpu_ldl_kernel(env, ptr + 4); 1298 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1299 if ((e2 & DESC_S_MASK) || 1300 (type != 1 && type != 9)) 1301 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1302 if (!(e2 & DESC_P_MASK)) 1303 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 1304 #ifdef TARGET_X86_64 1305 if (env->hflags & HF_LMA_MASK) { 1306 uint32_t e3, e4; 1307 e3 = cpu_ldl_kernel(env, ptr + 8); 1308 e4 = cpu_ldl_kernel(env, ptr + 12); 1309 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) 1310 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1311 load_seg_cache_raw_dt(&env->tr, e1, e2); 1312 env->tr.base |= (target_ulong)e3 << 32; 1313 } else 1314 #endif 1315 { 1316 load_seg_cache_raw_dt(&env->tr, e1, e2); 1317 } 1318 e2 |= DESC_TSS_BUSY_MASK; 1319 cpu_stl_kernel(env, ptr + 4, e2); 1320 } 1321 env->tr.selector = selector; 1322 } 1323 1324 /* only works if protected mode and not VM86. seg_reg must be != R_CS */ 1325 void helper_load_seg(CPUX86State *env, int seg_reg, int selector) 1326 { 1327 uint32_t e1, e2; 1328 int cpl, dpl, rpl; 1329 SegmentCache *dt; 1330 int index; 1331 target_ulong ptr; 1332 1333 selector &= 0xffff; 1334 cpl = env->hflags & HF_CPL_MASK; 1335 if ((selector & 0xfffc) == 0) { 1336 /* null selector case */ 1337 if (seg_reg == R_SS 1338 #ifdef TARGET_X86_64 1339 && (!(env->hflags & HF_CS64_MASK) || cpl == 3) 1340 #endif 1341 ) 1342 raise_exception_err(env, EXCP0D_GPF, 0); 1343 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); 1344 } else { 1345 1346 if (selector & 0x4) 1347 dt = &env->ldt; 1348 else 1349 dt = &env->gdt; 1350 index = selector & ~7; 1351 if ((index + 7) > dt->limit) 1352 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1353 ptr = dt->base + index; 1354 e1 = cpu_ldl_kernel(env, ptr); 1355 e2 = cpu_ldl_kernel(env, ptr + 4); 1356 1357 if (!(e2 & DESC_S_MASK)) 1358 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1359 rpl = selector & 3; 1360 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1361 if (seg_reg == R_SS) { 1362 /* must be writable segment */ 1363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) 1364 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1365 if (rpl != cpl || dpl != cpl) 1366 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1367 } else { 1368 /* must be readable segment */ 1369 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) 1370 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1371 1372 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1373 /* if not conforming code, test rights */ 1374 if (dpl < cpl || dpl < rpl) 1375 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1376 } 1377 } 1378 1379 if (!(e2 & DESC_P_MASK)) { 1380 if (seg_reg == R_SS) 1381 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc); 1382 else 1383 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 1384 } 1385 1386 /* set the access bit if not already set */ 1387 if (!(e2 & DESC_A_MASK)) { 1388 e2 |= DESC_A_MASK; 1389 cpu_stl_kernel(env, ptr + 4, e2); 1390 } 1391 1392 cpu_x86_load_seg_cache(env, seg_reg, selector, 1393 get_seg_base(e1, e2), 1394 get_seg_limit(e1, e2), 1395 e2); 1396 #if 0 1397 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", 1398 selector, (unsigned long)sc->base, sc->limit, sc->flags); 1399 #endif 1400 } 1401 } 1402 1403 /* protected mode jump */ 1404 void helper_ljmp_protected(CPUX86State *env, 1405 int new_cs, target_ulong new_eip, 1406 int next_eip_addend) 1407 { 1408 int gate_cs, type; 1409 uint32_t e1, e2, cpl, dpl, rpl, limit; 1410 target_ulong next_eip; 1411 1412 if ((new_cs & 0xfffc) == 0) 1413 raise_exception_err(env, EXCP0D_GPF, 0); 1414 if (load_segment(env, &e1, &e2, new_cs) != 0) 1415 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1416 cpl = env->hflags & HF_CPL_MASK; 1417 if (e2 & DESC_S_MASK) { 1418 if (!(e2 & DESC_CS_MASK)) 1419 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1420 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1421 if (e2 & DESC_C_MASK) { 1422 /* conforming code segment */ 1423 if (dpl > cpl) 1424 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1425 } else { 1426 /* non conforming code segment */ 1427 rpl = new_cs & 3; 1428 if (rpl > cpl) 1429 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1430 if (dpl != cpl) 1431 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1432 } 1433 if (!(e2 & DESC_P_MASK)) 1434 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); 1435 limit = get_seg_limit(e1, e2); 1436 if (new_eip > limit && 1437 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) 1438 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1439 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1440 get_seg_base(e1, e2), limit, e2); 1441 EIP = new_eip; 1442 } else { 1443 /* jump to call or task gate */ 1444 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1445 rpl = new_cs & 3; 1446 cpl = env->hflags & HF_CPL_MASK; 1447 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 1448 switch(type) { 1449 case 1: /* 286 TSS */ 1450 case 9: /* 386 TSS */ 1451 case 5: /* task gate */ 1452 if (dpl < cpl || dpl < rpl) 1453 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1454 next_eip = env->eip + next_eip_addend; 1455 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); 1456 CC_OP = CC_OP_EFLAGS; 1457 break; 1458 case 4: /* 286 call gate */ 1459 case 12: /* 386 call gate */ 1460 if ((dpl < cpl) || (dpl < rpl)) 1461 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1462 if (!(e2 & DESC_P_MASK)) 1463 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); 1464 gate_cs = e1 >> 16; 1465 new_eip = (e1 & 0xffff); 1466 if (type == 12) 1467 new_eip |= (e2 & 0xffff0000); 1468 if (load_segment(env, &e1, &e2, gate_cs) != 0) 1469 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); 1470 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1471 /* must be code segment */ 1472 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != 1473 (DESC_S_MASK | DESC_CS_MASK))) 1474 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); 1475 if (((e2 & DESC_C_MASK) && (dpl > cpl)) || 1476 (!(e2 & DESC_C_MASK) && (dpl != cpl))) 1477 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); 1478 if (!(e2 & DESC_P_MASK)) 1479 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); 1480 limit = get_seg_limit(e1, e2); 1481 if (new_eip > limit) 1482 raise_exception_err(env, EXCP0D_GPF, 0); 1483 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, 1484 get_seg_base(e1, e2), limit, e2); 1485 EIP = new_eip; 1486 break; 1487 default: 1488 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1489 break; 1490 } 1491 } 1492 } 1493 1494 /* real mode call */ 1495 void helper_lcall_real(CPUX86State *env, 1496 int new_cs, target_ulong new_eip1, 1497 int shift, int next_eip) 1498 { 1499 int new_eip; 1500 uint32_t esp, esp_mask; 1501 target_ulong ssp; 1502 1503 new_eip = new_eip1; 1504 esp = ESP; 1505 esp_mask = get_sp_mask(env->segs[R_SS].flags); 1506 ssp = env->segs[R_SS].base; 1507 if (shift) { 1508 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector); 1509 PUSHL(ssp, esp, esp_mask, next_eip); 1510 } else { 1511 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector); 1512 PUSHW(ssp, esp, esp_mask, next_eip); 1513 } 1514 1515 SET_ESP(esp, esp_mask); 1516 env->eip = new_eip; 1517 env->segs[R_CS].selector = new_cs; 1518 env->segs[R_CS].base = (new_cs << 4); 1519 } 1520 1521 /* protected mode call */ 1522 void helper_lcall_protected(CPUX86State *env, 1523 int new_cs, target_ulong new_eip, 1524 int shift, int next_eip_addend) 1525 { 1526 int new_stack, i; 1527 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; 1528 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask; 1529 uint32_t val, limit, old_sp_mask; 1530 target_ulong ssp, old_ssp, next_eip; 1531 1532 next_eip = env->eip + next_eip_addend; 1533 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift); 1534 LOG_PCALL_STATE(env); 1535 if ((new_cs & 0xfffc) == 0) 1536 raise_exception_err(env, EXCP0D_GPF, 0); 1537 if (load_segment(env, &e1, &e2, new_cs) != 0) 1538 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1539 cpl = env->hflags & HF_CPL_MASK; 1540 LOG_PCALL("desc=%08x:%08x\n", e1, e2); 1541 if (e2 & DESC_S_MASK) { 1542 if (!(e2 & DESC_CS_MASK)) 1543 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1544 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1545 if (e2 & DESC_C_MASK) { 1546 /* conforming code segment */ 1547 if (dpl > cpl) 1548 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1549 } else { 1550 /* non conforming code segment */ 1551 rpl = new_cs & 3; 1552 if (rpl > cpl) 1553 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1554 if (dpl != cpl) 1555 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1556 } 1557 if (!(e2 & DESC_P_MASK)) 1558 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); 1559 1560 #ifdef TARGET_X86_64 1561 /* XXX: check 16/32 bit cases in long mode */ 1562 if (shift == 2) { 1563 target_ulong rsp; 1564 /* 64 bit case */ 1565 rsp = ESP; 1566 PUSHQ(rsp, env->segs[R_CS].selector); 1567 PUSHQ(rsp, next_eip); 1568 /* from this point, not restartable */ 1569 ESP = rsp; 1570 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1571 get_seg_base(e1, e2), 1572 get_seg_limit(e1, e2), e2); 1573 EIP = new_eip; 1574 } else 1575 #endif 1576 { 1577 sp = ESP; 1578 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1579 ssp = env->segs[R_SS].base; 1580 if (shift) { 1581 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); 1582 PUSHL(ssp, sp, sp_mask, next_eip); 1583 } else { 1584 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); 1585 PUSHW(ssp, sp, sp_mask, next_eip); 1586 } 1587 1588 limit = get_seg_limit(e1, e2); 1589 if (new_eip > limit) 1590 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1591 /* from this point, not restartable */ 1592 SET_ESP(sp, sp_mask); 1593 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, 1594 get_seg_base(e1, e2), limit, e2); 1595 EIP = new_eip; 1596 } 1597 } else { 1598 /* check gate type */ 1599 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; 1600 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1601 rpl = new_cs & 3; 1602 switch(type) { 1603 case 1: /* available 286 TSS */ 1604 case 9: /* available 386 TSS */ 1605 case 5: /* task gate */ 1606 if (dpl < cpl || dpl < rpl) 1607 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1608 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip); 1609 CC_OP = CC_OP_EFLAGS; 1610 return; 1611 case 4: /* 286 call gate */ 1612 case 12: /* 386 call gate */ 1613 break; 1614 default: 1615 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1616 break; 1617 } 1618 shift = type >> 3; 1619 1620 if (dpl < cpl || dpl < rpl) 1621 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1622 /* check valid bit */ 1623 if (!(e2 & DESC_P_MASK)) 1624 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); 1625 selector = e1 >> 16; 1626 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); 1627 param_count = e2 & 0x1f; 1628 if ((selector & 0xfffc) == 0) 1629 raise_exception_err(env, EXCP0D_GPF, 0); 1630 1631 if (load_segment(env, &e1, &e2, selector) != 0) 1632 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1633 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) 1634 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1635 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1636 if (dpl > cpl) 1637 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); 1638 if (!(e2 & DESC_P_MASK)) 1639 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); 1640 1641 if (!(e2 & DESC_C_MASK) && dpl < cpl) { 1642 /* to inner privilege */ 1643 get_ss_esp_from_tss(env, &ss, &sp, dpl); 1644 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n", 1645 ss, sp, param_count, ESP); 1646 if ((ss & 0xfffc) == 0) 1647 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 1648 if ((ss & 3) != dpl) 1649 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 1650 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) 1651 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 1652 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 1653 if (ss_dpl != dpl) 1654 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 1655 if (!(ss_e2 & DESC_S_MASK) || 1656 (ss_e2 & DESC_CS_MASK) || 1657 !(ss_e2 & DESC_W_MASK)) 1658 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 1659 if (!(ss_e2 & DESC_P_MASK)) 1660 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); 1661 1662 // push_size = ((param_count * 2) + 8) << shift; 1663 1664 old_sp_mask = get_sp_mask(env->segs[R_SS].flags); 1665 old_ssp = env->segs[R_SS].base; 1666 1667 sp_mask = get_sp_mask(ss_e2); 1668 ssp = get_seg_base(ss_e1, ss_e2); 1669 if (shift) { 1670 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector); 1671 PUSHL(ssp, sp, sp_mask, ESP); 1672 for(i = param_count - 1; i >= 0; i--) { 1673 val = cpu_ldl_kernel(env, old_ssp + ((ESP + i * 4) & old_sp_mask)); 1674 PUSHL(ssp, sp, sp_mask, val); 1675 } 1676 } else { 1677 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector); 1678 PUSHW(ssp, sp, sp_mask, ESP); 1679 for(i = param_count - 1; i >= 0; i--) { 1680 val = cpu_lduw_kernel(env, old_ssp + ((ESP + i * 2) & old_sp_mask)); 1681 PUSHW(ssp, sp, sp_mask, val); 1682 } 1683 } 1684 new_stack = 1; 1685 } else { 1686 /* to same privilege */ 1687 sp = ESP; 1688 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1689 ssp = env->segs[R_SS].base; 1690 // push_size = (4 << shift); 1691 new_stack = 0; 1692 } 1693 1694 if (shift) { 1695 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); 1696 PUSHL(ssp, sp, sp_mask, next_eip); 1697 } else { 1698 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); 1699 PUSHW(ssp, sp, sp_mask, next_eip); 1700 } 1701 1702 /* from this point, not restartable */ 1703 1704 if (new_stack) { 1705 ss = (ss & ~3) | dpl; 1706 cpu_x86_load_seg_cache(env, R_SS, ss, 1707 ssp, 1708 get_seg_limit(ss_e1, ss_e2), 1709 ss_e2); 1710 } 1711 1712 selector = (selector & ~3) | dpl; 1713 cpu_x86_load_seg_cache(env, R_CS, selector, 1714 get_seg_base(e1, e2), 1715 get_seg_limit(e1, e2), 1716 e2); 1717 cpu_x86_set_cpl(env, dpl); 1718 SET_ESP(sp, sp_mask); 1719 EIP = offset; 1720 } 1721 } 1722 1723 /* real and vm86 mode iret */ 1724 void helper_iret_real(CPUX86State *env, int shift) 1725 { 1726 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; 1727 target_ulong ssp; 1728 int eflags_mask; 1729 1730 sp_mask = 0xffff; /* XXXX: use SS segment size ? */ 1731 sp = ESP; 1732 ssp = env->segs[R_SS].base; 1733 if (shift == 1) { 1734 /* 32 bits */ 1735 POPL(ssp, sp, sp_mask, new_eip); 1736 POPL(ssp, sp, sp_mask, new_cs); 1737 new_cs &= 0xffff; 1738 POPL(ssp, sp, sp_mask, new_eflags); 1739 } else { 1740 /* 16 bits */ 1741 POPW(ssp, sp, sp_mask, new_eip); 1742 POPW(ssp, sp, sp_mask, new_cs); 1743 POPW(ssp, sp, sp_mask, new_eflags); 1744 } 1745 ESP = (ESP & ~sp_mask) | (sp & sp_mask); 1746 env->segs[R_CS].selector = new_cs; 1747 env->segs[R_CS].base = (new_cs << 4); 1748 env->eip = new_eip; 1749 if (env->eflags & VM_MASK) 1750 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK; 1751 else 1752 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK; 1753 if (shift == 0) 1754 eflags_mask &= 0xffff; 1755 cpu_load_eflags(env, new_eflags, eflags_mask); 1756 env->hflags2 &= ~HF2_NMI_MASK; 1757 } 1758 1759 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl) 1760 { 1761 int dpl; 1762 uint32_t e2; 1763 1764 /* XXX: on x86_64, we do not want to nullify FS and GS because 1765 they may still contain a valid base. I would be interested to 1766 know how a real x86_64 CPU behaves */ 1767 if ((seg_reg == R_FS || seg_reg == R_GS) && 1768 (env->segs[seg_reg].selector & 0xfffc) == 0) 1769 return; 1770 1771 e2 = env->segs[seg_reg].flags; 1772 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1773 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { 1774 /* data or non conforming code segment */ 1775 if (dpl < cpl) { 1776 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0); 1777 } 1778 } 1779 } 1780 1781 /* protected mode iret */ 1782 static inline void helper_ret_protected(CPUX86State *env, 1783 int shift, int is_iret, int addend) 1784 { 1785 uint32_t new_cs, new_eflags, new_ss; 1786 uint32_t new_es, new_ds, new_fs, new_gs; 1787 uint32_t e1, e2, ss_e1, ss_e2; 1788 int cpl, dpl, rpl, eflags_mask, iopl; 1789 target_ulong ssp, sp, new_eip, new_esp, sp_mask; 1790 1791 #ifdef TARGET_X86_64 1792 if (shift == 2) 1793 sp_mask = -1; 1794 else 1795 #endif 1796 sp_mask = get_sp_mask(env->segs[R_SS].flags); 1797 sp = ESP; 1798 ssp = env->segs[R_SS].base; 1799 new_eflags = 0; /* avoid warning */ 1800 #ifdef TARGET_X86_64 1801 if (shift == 2) { 1802 POPQ(sp, new_eip); 1803 POPQ(sp, new_cs); 1804 new_cs &= 0xffff; 1805 if (is_iret) { 1806 POPQ(sp, new_eflags); 1807 } 1808 } else 1809 #endif 1810 if (shift == 1) { 1811 /* 32 bits */ 1812 POPL(ssp, sp, sp_mask, new_eip); 1813 POPL(ssp, sp, sp_mask, new_cs); 1814 new_cs &= 0xffff; 1815 if (is_iret) { 1816 POPL(ssp, sp, sp_mask, new_eflags); 1817 if (new_eflags & VM_MASK) 1818 goto return_to_vm86; 1819 } 1820 } else { 1821 /* 16 bits */ 1822 POPW(ssp, sp, sp_mask, new_eip); 1823 POPW(ssp, sp, sp_mask, new_cs); 1824 if (is_iret) 1825 POPW(ssp, sp, sp_mask, new_eflags); 1826 } 1827 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 1828 new_cs, new_eip, shift, addend); 1829 LOG_PCALL_STATE(env); 1830 if ((new_cs & 0xfffc) == 0) 1831 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1832 if (load_segment(env, &e1, &e2, new_cs) != 0) 1833 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1834 if (!(e2 & DESC_S_MASK) || 1835 !(e2 & DESC_CS_MASK)) 1836 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1837 cpl = env->hflags & HF_CPL_MASK; 1838 rpl = new_cs & 3; 1839 if (rpl < cpl) 1840 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1841 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 1842 if (e2 & DESC_C_MASK) { 1843 if (dpl > rpl) 1844 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1845 } else { 1846 if (dpl != rpl) 1847 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); 1848 } 1849 if (!(e2 & DESC_P_MASK)) 1850 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); 1851 1852 sp += addend; 1853 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || 1854 ((env->hflags & HF_CS64_MASK) && !is_iret))) { 1855 /* return to same privilege level */ 1856 cpu_x86_load_seg_cache(env, R_CS, new_cs, 1857 get_seg_base(e1, e2), 1858 get_seg_limit(e1, e2), 1859 e2); 1860 } else { 1861 /* return to different privilege level */ 1862 #ifdef TARGET_X86_64 1863 if (shift == 2) { 1864 POPQ(sp, new_esp); 1865 POPQ(sp, new_ss); 1866 new_ss &= 0xffff; 1867 } else 1868 #endif 1869 if (shift == 1) { 1870 /* 32 bits */ 1871 POPL(ssp, sp, sp_mask, new_esp); 1872 POPL(ssp, sp, sp_mask, new_ss); 1873 new_ss &= 0xffff; 1874 } else { 1875 /* 16 bits */ 1876 POPW(ssp, sp, sp_mask, new_esp); 1877 POPW(ssp, sp, sp_mask, new_ss); 1878 } 1879 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", 1880 new_ss, new_esp); 1881 if ((new_ss & 0xfffc) == 0) { 1882 #ifdef TARGET_X86_64 1883 /* NULL ss is allowed in long mode if cpl != 3*/ 1884 /* XXX: test CS64 ? */ 1885 if ((env->hflags & HF_LMA_MASK) && rpl != 3) { 1886 cpu_x86_load_seg_cache(env, R_SS, new_ss, 1887 0, 0xffffffff, 1888 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 1889 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | 1890 DESC_W_MASK | DESC_A_MASK); 1891 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */ 1892 } else 1893 #endif 1894 { 1895 raise_exception_err(env, EXCP0D_GPF, 0); 1896 } 1897 } else { 1898 if ((new_ss & 3) != rpl) 1899 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); 1900 if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) 1901 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); 1902 if (!(ss_e2 & DESC_S_MASK) || 1903 (ss_e2 & DESC_CS_MASK) || 1904 !(ss_e2 & DESC_W_MASK)) 1905 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); 1906 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; 1907 if (dpl != rpl) 1908 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); 1909 if (!(ss_e2 & DESC_P_MASK)) 1910 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc); 1911 cpu_x86_load_seg_cache(env, R_SS, new_ss, 1912 get_seg_base(ss_e1, ss_e2), 1913 get_seg_limit(ss_e1, ss_e2), 1914 ss_e2); 1915 } 1916 1917 cpu_x86_load_seg_cache(env, R_CS, new_cs, 1918 get_seg_base(e1, e2), 1919 get_seg_limit(e1, e2), 1920 e2); 1921 cpu_x86_set_cpl(env, rpl); 1922 sp = new_esp; 1923 #ifdef TARGET_X86_64 1924 if (env->hflags & HF_CS64_MASK) 1925 sp_mask = -1; 1926 else 1927 #endif 1928 sp_mask = get_sp_mask(ss_e2); 1929 1930 /* validate data segments */ 1931 validate_seg(env, R_ES, rpl); 1932 validate_seg(env, R_DS, rpl); 1933 validate_seg(env, R_FS, rpl); 1934 validate_seg(env, R_GS, rpl); 1935 1936 sp += addend; 1937 } 1938 SET_ESP(sp, sp_mask); 1939 env->eip = new_eip; 1940 if (is_iret) { 1941 /* NOTE: 'cpl' is the _old_ CPL */ 1942 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 1943 if (cpl == 0) 1944 eflags_mask |= IOPL_MASK; 1945 iopl = (env->eflags >> IOPL_SHIFT) & 3; 1946 if (cpl <= iopl) 1947 eflags_mask |= IF_MASK; 1948 if (shift == 0) 1949 eflags_mask &= 0xffff; 1950 cpu_load_eflags(env, new_eflags, eflags_mask); 1951 } 1952 return; 1953 1954 return_to_vm86: 1955 POPL(ssp, sp, sp_mask, new_esp); 1956 POPL(ssp, sp, sp_mask, new_ss); 1957 POPL(ssp, sp, sp_mask, new_es); 1958 POPL(ssp, sp, sp_mask, new_ds); 1959 POPL(ssp, sp, sp_mask, new_fs); 1960 POPL(ssp, sp, sp_mask, new_gs); 1961 1962 /* modify processor state */ 1963 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | 1964 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK); 1965 load_seg_vm(env, R_CS, new_cs & 0xffff); 1966 cpu_x86_set_cpl(env, 3); 1967 load_seg_vm(env, R_SS, new_ss & 0xffff); 1968 load_seg_vm(env, R_ES, new_es & 0xffff); 1969 load_seg_vm(env, R_DS, new_ds & 0xffff); 1970 load_seg_vm(env, R_FS, new_fs & 0xffff); 1971 load_seg_vm(env, R_GS, new_gs & 0xffff); 1972 1973 env->eip = new_eip & 0xffff; 1974 ESP = new_esp; 1975 } 1976 1977 void helper_iret_protected(CPUX86State *env, int shift, int next_eip) 1978 { 1979 int tss_selector, type; 1980 uint32_t e1, e2; 1981 1982 /* specific case for TSS */ 1983 if (env->eflags & NT_MASK) { 1984 #ifdef TARGET_X86_64 1985 if (env->hflags & HF_LMA_MASK) 1986 raise_exception_err(env, EXCP0D_GPF, 0); 1987 #endif 1988 tss_selector = cpu_lduw_kernel(env, env->tr.base + 0); 1989 if (tss_selector & 4) 1990 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); 1991 if (load_segment(env, &e1, &e2, tss_selector) != 0) 1992 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); 1993 type = (e2 >> DESC_TYPE_SHIFT) & 0x17; 1994 /* NOTE: we check both segment and busy TSS */ 1995 if (type != 3) 1996 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); 1997 switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip); 1998 } else { 1999 helper_ret_protected(env, shift, 1, 0); 2000 } 2001 env->hflags2 &= ~HF2_NMI_MASK; 2002 } 2003 2004 void helper_lret_protected(CPUX86State *env, int shift, int addend) 2005 { 2006 helper_ret_protected(env, shift, 0, addend); 2007 } 2008 2009 void helper_sysenter(CPUX86State *env) 2010 { 2011 if (env->sysenter_cs == 0) { 2012 raise_exception_err(env, EXCP0D_GPF, 0); 2013 } 2014 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); 2015 cpu_x86_set_cpl(env, 0); 2016 2017 #ifdef TARGET_X86_64 2018 if (env->hflags & HF_LMA_MASK) { 2019 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2020 0, 0xffffffff, 2021 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2022 DESC_S_MASK | 2023 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); 2024 } else 2025 #endif 2026 { 2027 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, 2028 0, 0xffffffff, 2029 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2030 DESC_S_MASK | 2031 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2032 } 2033 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, 2034 0, 0xffffffff, 2035 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2036 DESC_S_MASK | 2037 DESC_W_MASK | DESC_A_MASK); 2038 ESP = env->sysenter_esp; 2039 EIP = env->sysenter_eip; 2040 } 2041 2042 void helper_sysexit(CPUX86State *env, int dflag) 2043 { 2044 int cpl; 2045 2046 cpl = env->hflags & HF_CPL_MASK; 2047 if (env->sysenter_cs == 0 || cpl != 0) { 2048 raise_exception_err(env, EXCP0D_GPF, 0); 2049 } 2050 cpu_x86_set_cpl(env, 3); 2051 #ifdef TARGET_X86_64 2052 if (dflag == 2) { 2053 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3, 2054 0, 0xffffffff, 2055 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2056 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2057 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); 2058 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3, 2059 0, 0xffffffff, 2060 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2061 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2062 DESC_W_MASK | DESC_A_MASK); 2063 } else 2064 #endif 2065 { 2066 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 2067 0, 0xffffffff, 2068 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2070 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 2071 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 2072 0, 0xffffffff, 2073 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 2074 DESC_S_MASK | (3 << DESC_DPL_SHIFT) | 2075 DESC_W_MASK | DESC_A_MASK); 2076 } 2077 ESP = ECX; 2078 EIP = EDX; 2079 } 2080 2081 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) 2082 { 2083 unsigned int limit; 2084 uint32_t e1, e2, eflags, selector; 2085 int rpl, dpl, cpl, type; 2086 2087 selector = selector1 & 0xffff; 2088 eflags = helper_cc_compute_all(env, CC_OP); 2089 if ((selector & 0xfffc) == 0) 2090 goto fail; 2091 if (load_segment(env, &e1, &e2, selector) != 0) 2092 goto fail; 2093 rpl = selector & 3; 2094 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2095 cpl = env->hflags & HF_CPL_MASK; 2096 if (e2 & DESC_S_MASK) { 2097 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2098 /* conforming */ 2099 } else { 2100 if (dpl < cpl || dpl < rpl) 2101 goto fail; 2102 } 2103 } else { 2104 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2105 switch(type) { 2106 case 1: 2107 case 2: 2108 case 3: 2109 case 9: 2110 case 11: 2111 break; 2112 default: 2113 goto fail; 2114 } 2115 if (dpl < cpl || dpl < rpl) { 2116 fail: 2117 CC_SRC = eflags & ~CC_Z; 2118 return 0; 2119 } 2120 } 2121 limit = get_seg_limit(e1, e2); 2122 CC_SRC = eflags | CC_Z; 2123 return limit; 2124 } 2125 2126 target_ulong helper_lar(CPUX86State *env, target_ulong selector1) 2127 { 2128 uint32_t e1, e2, eflags, selector; 2129 int rpl, dpl, cpl, type; 2130 2131 selector = selector1 & 0xffff; 2132 eflags = helper_cc_compute_all(env, CC_OP); 2133 if ((selector & 0xfffc) == 0) 2134 goto fail; 2135 if (load_segment(env, &e1, &e2, selector) != 0) 2136 goto fail; 2137 rpl = selector & 3; 2138 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2139 cpl = env->hflags & HF_CPL_MASK; 2140 if (e2 & DESC_S_MASK) { 2141 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { 2142 /* conforming */ 2143 } else { 2144 if (dpl < cpl || dpl < rpl) 2145 goto fail; 2146 } 2147 } else { 2148 type = (e2 >> DESC_TYPE_SHIFT) & 0xf; 2149 switch(type) { 2150 case 1: 2151 case 2: 2152 case 3: 2153 case 4: 2154 case 5: 2155 case 9: 2156 case 11: 2157 case 12: 2158 break; 2159 default: 2160 goto fail; 2161 } 2162 if (dpl < cpl || dpl < rpl) { 2163 fail: 2164 CC_SRC = eflags & ~CC_Z; 2165 return 0; 2166 } 2167 } 2168 CC_SRC = eflags | CC_Z; 2169 return e2 & 0x00f0ff00; 2170 } 2171 2172 void helper_verr(CPUX86State *env, target_ulong selector1) 2173 { 2174 uint32_t e1, e2, eflags, selector; 2175 int rpl, dpl, cpl; 2176 2177 selector = selector1 & 0xffff; 2178 eflags = helper_cc_compute_all(env, CC_OP); 2179 if ((selector & 0xfffc) == 0) 2180 goto fail; 2181 if (load_segment(env, &e1, &e2, selector) != 0) 2182 goto fail; 2183 if (!(e2 & DESC_S_MASK)) 2184 goto fail; 2185 rpl = selector & 3; 2186 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2187 cpl = env->hflags & HF_CPL_MASK; 2188 if (e2 & DESC_CS_MASK) { 2189 if (!(e2 & DESC_R_MASK)) 2190 goto fail; 2191 if (!(e2 & DESC_C_MASK)) { 2192 if (dpl < cpl || dpl < rpl) 2193 goto fail; 2194 } 2195 } else { 2196 if (dpl < cpl || dpl < rpl) { 2197 fail: 2198 CC_SRC = eflags & ~CC_Z; 2199 return; 2200 } 2201 } 2202 CC_SRC = eflags | CC_Z; 2203 } 2204 2205 void helper_verw(CPUX86State *env, target_ulong selector1) 2206 { 2207 uint32_t e1, e2, eflags, selector; 2208 int rpl, dpl, cpl; 2209 2210 selector = selector1 & 0xffff; 2211 eflags = helper_cc_compute_all(env, CC_OP); 2212 if ((selector & 0xfffc) == 0) 2213 goto fail; 2214 if (load_segment(env, &e1, &e2, selector) != 0) 2215 goto fail; 2216 if (!(e2 & DESC_S_MASK)) 2217 goto fail; 2218 rpl = selector & 3; 2219 dpl = (e2 >> DESC_DPL_SHIFT) & 3; 2220 cpl = env->hflags & HF_CPL_MASK; 2221 if (e2 & DESC_CS_MASK) { 2222 goto fail; 2223 } else { 2224 if (dpl < cpl || dpl < rpl) 2225 goto fail; 2226 if (!(e2 & DESC_W_MASK)) { 2227 fail: 2228 CC_SRC = eflags & ~CC_Z; 2229 return; 2230 } 2231 } 2232 CC_SRC = eflags | CC_Z; 2233 } 2234