Lines Matching refs:env
70 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
77 if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
82 while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
104 cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, R_EDX);
119 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
130 int kvm_arch_init_vcpu(CPUState *env)
139 env->mp_state = KVM_MP_STATE_RUNNABLE;
143 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
156 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
163 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
174 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
189 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
193 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
200 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
205 return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
208 static int kvm_has_msr_star(CPUState *env)
222 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
230 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
330 static int kvm_getput_regs(CPUState *env, int set)
336 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
341 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
342 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
343 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
344 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
345 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
346 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
347 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
348 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
350 kvm_getput_reg(®s.r8, &env->regs[8], set);
351 kvm_getput_reg(®s.r9, &env->regs[9], set);
352 kvm_getput_reg(®s.r10, &env->regs[10], set);
353 kvm_getput_reg(®s.r11, &env->regs[11], set);
354 kvm_getput_reg(®s.r12, &env->regs[12], set);
355 kvm_getput_reg(®s.r13, &env->regs[13], set);
356 kvm_getput_reg(®s.r14, &env->regs[14], set);
357 kvm_getput_reg(®s.r15, &env->regs[15], set);
360 kvm_getput_reg(®s.rflags, &env->eflags, set);
361 kvm_getput_reg(®s.rip, &env->eip, set);
364 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
369 static int kvm_put_fpu(CPUState *env)
375 fpu.fsw = env->fpus & ~(7 << 11);
376 fpu.fsw |= (env->fpstt & 7) << 11;
377 fpu.fcw = env->fpuc;
379 fpu.ftwx |= (!env->fptags[i]) << i;
380 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
381 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
382 fpu.mxcsr = env->mxcsr;
384 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
387 static int kvm_put_sregs(CPUState *env)
392 env->interrupt_bitmap,
395 if ((env->eflags & VM_MASK)) {
396 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
397 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
398 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
399 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
400 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
401 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
403 set_seg(&sregs.cs, &env->segs[R_CS]);
404 set_seg(&sregs.ds, &env->segs[R_DS]);
405 set_seg(&sregs.es, &env->segs[R_ES]);
406 set_seg(&sregs.fs, &env->segs[R_FS]);
407 set_seg(&sregs.gs, &env->segs[R_GS]);
408 set_seg(&sregs.ss, &env->segs[R_SS]);
410 if (env->cr[0] & CR0_PE_MASK) {
418 set_seg(&sregs.tr, &env->tr);
419 set_seg(&sregs.ldt, &env->ldt);
421 sregs.idt.limit = env->idt.limit;
422 sregs.idt.base = env->idt.base;
423 sregs.gdt.limit = env->gdt.limit;
424 sregs.gdt.base = env->gdt.base;
426 sregs.cr0 = env->cr[0];
427 sregs.cr2 = env->cr[2];
428 sregs.cr3 = env->cr[3];
429 sregs.cr4 = env->cr[4];
431 sregs.cr8 = cpu_get_apic_tpr(env);
432 sregs.apic_base = cpu_get_apic_base(env);
434 sregs.efer = env->efer;
436 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
446 static int kvm_put_msrs(CPUState *env)
455 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
456 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
457 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
458 if (kvm_has_msr_star(env))
459 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
460 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
463 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
464 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
465 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
466 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
470 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
475 static int kvm_get_fpu(CPUState *env)
480 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
484 env->fpstt = (fpu.fsw >> 11) & 7;
485 env->fpus = fpu.fsw;
486 env->fpuc = fpu.fcw;
488 env->fptags[i] = !((fpu.ftwx >> i) & 1);
489 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
490 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
491 env->mxcsr = fpu.mxcsr;
496 int kvm_get_sregs(CPUState *env)
502 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
506 memcpy(env->interrupt_bitmap,
510 get_seg(&env->segs[R_CS], &sregs.cs);
511 get_seg(&env->segs[R_DS], &sregs.ds);
512 get_seg(&env->segs[R_ES], &sregs.es);
513 get_seg(&env->segs[R_FS], &sregs.fs);
514 get_seg(&env->segs[R_GS], &sregs.gs);
515 get_seg(&env->segs[R_SS], &sregs.ss);
517 get_seg(&env->tr, &sregs.tr);
518 get_seg(&env->ldt, &sregs.ldt);
520 env->idt.limit = sregs.idt.limit;
521 env->idt.base = sregs.idt.base;
522 env->gdt.limit = sregs.gdt.limit;
523 env->gdt.base = sregs.gdt.base;
525 env->cr[0] = sregs.cr0;
526 env->cr[2] = sregs.cr2;
527 env->cr[3] = sregs.cr3;
528 env->cr[4] = sregs.cr4;
530 cpu_set_apic_base(env, sregs.apic_base);
532 env->efer = sregs.efer;
533 //cpu_set_apic_tpr(env, sregs.cr8);
543 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
544 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
545 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
547 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
548 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
551 if (env->efer & MSR_EFER_LMA) {
555 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
558 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
560 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
562 if (!(env->cr[0] & CR0_PE_MASK) ||
563 (env->eflags & VM_MASK) ||
567 hflags |= ((env->segs[R_DS].base |
568 env->segs[R_ES].base |
569 env->segs[R_SS].base) != 0) <<
573 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
578 static int kvm_get_msrs(CPUState *env)
591 if (kvm_has_msr_star(env))
602 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
609 env->sysenter_cs = msrs[i].data;
612 env->sysenter_esp = msrs[i].data;
615 env->sysenter_eip = msrs[i].data;
618 env->star = msrs[i].data;
622 env->cstar = msrs[i].data;
625 env->kernelgsbase = msrs[i].data;
628 env->fmask = msrs[i].data;
631 env->lstar = msrs[i].data;
635 env->tsc = msrs[i].data;
643 int kvm_arch_put_registers(CPUState *env)
647 ret = kvm_getput_regs(env, 1);
651 ret = kvm_put_fpu(env);
655 ret = kvm_put_sregs(env);
659 ret = kvm_put_msrs(env);
663 ret = kvm_put_mp_state(env);
667 ret = kvm_get_mp_state(env);
674 int kvm_arch_get_registers(CPUState *env)
678 ret = kvm_getput_regs(env, 0);
682 ret = kvm_get_fpu(env);
686 ret = kvm_get_sregs(env);
690 ret = kvm_get_msrs(env);
697 int kvm_arch_vcpu_run(CPUState *env)
701 return no_gs_ioctl(env->kvm_fd, KVM_RUN, 0);
704 return kvm_vcpu_ioctl(env, KVM_RUN, 0);
707 int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
711 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
712 (env->eflags & IF_MASK)) {
715 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
716 irq = cpu_get_pic_interrupt(env);
722 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
730 if ((env->interrupt_request & CPU_INTERRUPT_HARD))
736 run->cr8 = cpu_get_apic_tpr(env);
745 int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
751 env->eflags |= IF_MASK;
753 env->eflags &= ~IF_MASK;
755 cpu_set_apic_tpr(env, run->cr8);
756 cpu_set_apic_base(env, run->apic_base);
761 static int kvm_handle_halt(CPUState *env)
763 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
764 (env->eflags & IF_MASK)) &&
765 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
766 env->halted = 1;
767 env->exception_index = EXCP_HLT;
774 int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
781 ret = kvm_handle_halt(env);
789 int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
793 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
794 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
799 int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
803 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
804 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
931 void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
943 if (kvm_sw_breakpoints_active(env))