Home | History | Annotate | Download | only in target-i386
      1 /*
      2  *  x86 misc helpers
      3  *
      4  *  Copyright (c) 2003 Fabrice Bellard
      5  *
      6  * This library is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU Lesser General Public
      8  * License as published by the Free Software Foundation; either
      9  * version 2 of the License, or (at your option) any later version.
     10  *
     11  * This library is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  * Lesser General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU Lesser General Public
     17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18  */
     19 
     20 #include "cpu.h"
     21 #include "exec/ioport.h"
     22 #include "helper.h"
     23 
     24 #if !defined(CONFIG_USER_ONLY)
     25 #include "exec/softmmu_exec.h"
     26 #endif /* !defined(CONFIG_USER_ONLY) */
     27 
     28 /* check if Port I/O is allowed in TSS */
     29 static inline void check_io(CPUX86State *env, int addr, int size)
     30 {
     31     int io_offset, val, mask;
     32 
     33     /* TSS must be a valid 32 bit one */
     34     if (!(env->tr.flags & DESC_P_MASK) ||
     35         ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
     36         env->tr.limit < 103)
     37         goto fail;
     38     io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
     39     io_offset += (addr >> 3);
     40     /* Note: the check needs two bytes */
     41     if ((io_offset + 1) > env->tr.limit)
     42         goto fail;
     43     val = cpu_lduw_kernel(env, env->tr.base + io_offset);
     44     val >>= (addr & 7);
     45     mask = (1 << size) - 1;
     46     /* all bits must be zero to allow the I/O */
     47     if ((val & mask) != 0) {
     48     fail:
     49         raise_exception_err(env, EXCP0D_GPF, 0);
     50     }
     51 }
     52 
     53 void helper_check_iob(CPUX86State *env, uint32_t t0)
     54 {
     55     check_io(env, t0, 1);
     56 }
     57 
     58 void helper_check_iow(CPUX86State *env, uint32_t t0)
     59 {
     60     check_io(env, t0, 2);
     61 }
     62 
     63 void helper_check_iol(CPUX86State *env, uint32_t t0)
     64 {
     65     check_io(env, t0, 4);
     66 }
     67 
     68 void helper_outb(uint32_t port, uint32_t data)
     69 {
     70     cpu_outb(port, data & 0xff);
     71 }
     72 
     73 target_ulong helper_inb(uint32_t port)
     74 {
     75     return cpu_inb(port);
     76 }
     77 
     78 void helper_outw(uint32_t port, uint32_t data)
     79 {
     80     cpu_outw(port, data & 0xffff);
     81 }
     82 
     83 target_ulong helper_inw(uint32_t port)
     84 {
     85     return cpu_inw(port);
     86 }
     87 
     88 void helper_outl(uint32_t port, uint32_t data)
     89 {
     90     cpu_outl(port, data);
     91 }
     92 
     93 target_ulong helper_inl(uint32_t port)
     94 {
     95     return cpu_inl(port);
     96 }
     97 
     98 void helper_into(CPUX86State *env, int next_eip_addend)
     99 {
    100     int eflags;
    101     eflags = helper_cc_compute_all(env, CC_OP);
    102     if (eflags & CC_O) {
    103         raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
    104     }
    105 }
    106 
    107 void helper_single_step(CPUX86State *env)
    108 {
    109 #ifndef CONFIG_USER_ONLY
    110     check_hw_breakpoints(env, 1);
    111     env->dr[6] |= DR6_BS;
    112 #endif
    113     raise_exception(env, EXCP01_DB);
    114 }
    115 
    116 void helper_cpuid(CPUX86State *env)
    117 {
    118     uint32_t eax, ebx, ecx, edx;
    119 
    120     helper_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0);
    121 
    122     cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
    123     EAX = eax;
    124     EBX = ebx;
    125     ECX = ecx;
    126     EDX = edx;
    127 }
    128 
    129 #if defined(CONFIG_USER_ONLY)
    130 target_ulong helper_read_crN(CPUX86State *env, int reg)
    131 {
    132     return 0;
    133 }
    134 
    135 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
    136 {
    137 }
    138 
    139 void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0)
    140 {
    141 }
    142 #else
    143 target_ulong helper_read_crN(CPUX86State *env, int reg)
    144 {
    145     target_ulong val;
    146 
    147     helper_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0);
    148     switch(reg) {
    149     default:
    150         val = env->cr[reg];
    151         break;
    152     case 8:
    153         if (!(env->hflags2 & HF2_VINTR_MASK)) {
    154             val = cpu_get_apic_tpr(env);
    155         } else {
    156             val = env->v_tpr;
    157         }
    158         break;
    159     }
    160     return val;
    161 }
    162 
    163 void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
    164 {
    165     helper_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0);
    166     switch(reg) {
    167     case 0:
    168         cpu_x86_update_cr0(env, t0);
    169         break;
    170     case 3:
    171         cpu_x86_update_cr3(env, t0);
    172         break;
    173     case 4:
    174         cpu_x86_update_cr4(env, t0);
    175         break;
    176     case 8:
    177         if (!(env->hflags2 & HF2_VINTR_MASK)) {
    178             cpu_set_apic_tpr(env, t0);
    179         }
    180         env->v_tpr = t0 & 0x0f;
    181         break;
    182     default:
    183         env->cr[reg] = t0;
    184         break;
    185     }
    186 }
    187 
    188 void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0)
    189 {
    190     int i;
    191 
    192     if (reg < 4) {
    193         hw_breakpoint_remove(env, reg);
    194         env->dr[reg] = t0;
    195         hw_breakpoint_insert(env, reg);
    196     } else if (reg == 7) {
    197         for (i = 0; i < 4; i++)
    198             hw_breakpoint_remove(env, i);
    199         env->dr[7] = t0;
    200         for (i = 0; i < 4; i++)
    201             hw_breakpoint_insert(env, i);
    202     } else
    203         env->dr[reg] = t0;
    204 }
    205 #endif
    206 
    207 void helper_lmsw(CPUX86State *env, target_ulong t0)
    208 {
    209     /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
    210        if already set to one. */
    211     t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
    212     helper_write_crN(env, 0, t0);
    213 }
    214 
    215 void helper_invlpg(CPUX86State *env, target_ulong addr)
    216 {
    217     helper_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
    218     tlb_flush_page(env, addr);
    219 }
    220 
    221 void helper_rdtsc(CPUX86State *env)
    222 {
    223     uint64_t val;
    224 
    225     if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
    226         raise_exception(env, EXCP0D_GPF);
    227     }
    228     helper_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0);
    229 
    230     val = cpu_get_tsc(env) + env->tsc_offset;
    231     EAX = (uint32_t)(val);
    232     EDX = (uint32_t)(val >> 32);
    233 }
    234 
    235 void helper_rdpmc(CPUX86State *env)
    236 {
    237     if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
    238         raise_exception(env, EXCP0D_GPF);
    239     }
    240     helper_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0);
    241 
    242     /* currently unimplemented */
    243     raise_exception_err(env, EXCP06_ILLOP, 0);
    244 }
    245 
    246 #if defined(CONFIG_USER_ONLY)
    247 void helper_wrmsr(CPUX86State *env)
    248 {
    249 }
    250 
    251 void helper_rdmsr(CPUX86State *env)
    252 {
    253 }
    254 #else
    255 void helper_wrmsr(CPUX86State *env)
    256 {
    257     uint64_t val;
    258 
    259     helper_svm_check_intercept_param(env, SVM_EXIT_MSR, 1);
    260 
    261     val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
    262 
    263     switch((uint32_t)ECX) {
    264     case MSR_IA32_SYSENTER_CS:
    265         env->sysenter_cs = val & 0xffff;
    266         break;
    267     case MSR_IA32_SYSENTER_ESP:
    268         env->sysenter_esp = val;
    269         break;
    270     case MSR_IA32_SYSENTER_EIP:
    271         env->sysenter_eip = val;
    272         break;
    273     case MSR_IA32_APICBASE:
    274         cpu_set_apic_base(env, val);
    275         break;
    276     case MSR_EFER:
    277         {
    278             uint64_t update_mask;
    279             update_mask = 0;
    280             if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
    281                 update_mask |= MSR_EFER_SCE;
    282             if (env->cpuid_ext2_features & CPUID_EXT2_LM)
    283                 update_mask |= MSR_EFER_LME;
    284             if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
    285                 update_mask |= MSR_EFER_FFXSR;
    286             if (env->cpuid_ext2_features & CPUID_EXT2_NX)
    287                 update_mask |= MSR_EFER_NXE;
    288             if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
    289                 update_mask |= MSR_EFER_SVME;
    290             if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
    291                 update_mask |= MSR_EFER_FFXSR;
    292             cpu_load_efer(env, (env->efer & ~update_mask) |
    293                           (val & update_mask));
    294         }
    295         break;
    296     case MSR_STAR:
    297         env->star = val;
    298         break;
    299     case MSR_PAT:
    300         env->pat = val;
    301         break;
    302     case MSR_VM_HSAVE_PA:
    303         env->vm_hsave = val;
    304         break;
    305 #ifdef TARGET_X86_64
    306     case MSR_LSTAR:
    307         env->lstar = val;
    308         break;
    309     case MSR_CSTAR:
    310         env->cstar = val;
    311         break;
    312     case MSR_FMASK:
    313         env->fmask = val;
    314         break;
    315     case MSR_FSBASE:
    316         env->segs[R_FS].base = val;
    317         break;
    318     case MSR_GSBASE:
    319         env->segs[R_GS].base = val;
    320         break;
    321     case MSR_KERNELGSBASE:
    322         env->kernelgsbase = val;
    323         break;
    324 #endif
    325     case MSR_MTRRphysBase(0):
    326     case MSR_MTRRphysBase(1):
    327     case MSR_MTRRphysBase(2):
    328     case MSR_MTRRphysBase(3):
    329     case MSR_MTRRphysBase(4):
    330     case MSR_MTRRphysBase(5):
    331     case MSR_MTRRphysBase(6):
    332     case MSR_MTRRphysBase(7):
    333         env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
    334         break;
    335     case MSR_MTRRphysMask(0):
    336     case MSR_MTRRphysMask(1):
    337     case MSR_MTRRphysMask(2):
    338     case MSR_MTRRphysMask(3):
    339     case MSR_MTRRphysMask(4):
    340     case MSR_MTRRphysMask(5):
    341     case MSR_MTRRphysMask(6):
    342     case MSR_MTRRphysMask(7):
    343         env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
    344         break;
    345     case MSR_MTRRfix64K_00000:
    346         env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
    347         break;
    348     case MSR_MTRRfix16K_80000:
    349     case MSR_MTRRfix16K_A0000:
    350         env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
    351         break;
    352     case MSR_MTRRfix4K_C0000:
    353     case MSR_MTRRfix4K_C8000:
    354     case MSR_MTRRfix4K_D0000:
    355     case MSR_MTRRfix4K_D8000:
    356     case MSR_MTRRfix4K_E0000:
    357     case MSR_MTRRfix4K_E8000:
    358     case MSR_MTRRfix4K_F0000:
    359     case MSR_MTRRfix4K_F8000:
    360         env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
    361         break;
    362     case MSR_MTRRdefType:
    363         env->mtrr_deftype = val;
    364         break;
    365     case MSR_MCG_STATUS:
    366         env->mcg_status = val;
    367         break;
    368     case MSR_MCG_CTL:
    369         if ((env->mcg_cap & MCG_CTL_P)
    370             && (val == 0 || val == ~(uint64_t)0))
    371             env->mcg_ctl = val;
    372         break;
    373     default:
    374         if ((uint32_t)ECX >= MSR_MC0_CTL
    375             && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
    376             uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
    377             if ((offset & 0x3) != 0
    378                 || (val == 0 || val == ~(uint64_t)0))
    379                 env->mce_banks[offset] = val;
    380             break;
    381         }
    382         /* XXX: exception ? */
    383         break;
    384     }
    385 }
    386 
    387 void helper_rdmsr(CPUX86State *env)
    388 {
    389     uint64_t val;
    390 
    391     helper_svm_check_intercept_param(env, SVM_EXIT_MSR, 0);
    392 
    393     switch((uint32_t)ECX) {
    394     case MSR_IA32_SYSENTER_CS:
    395         val = env->sysenter_cs;
    396         break;
    397     case MSR_IA32_SYSENTER_ESP:
    398         val = env->sysenter_esp;
    399         break;
    400     case MSR_IA32_SYSENTER_EIP:
    401         val = env->sysenter_eip;
    402         break;
    403     case MSR_IA32_APICBASE:
    404         val = cpu_get_apic_base(env);
    405         break;
    406     case MSR_EFER:
    407         val = env->efer;
    408         break;
    409     case MSR_STAR:
    410         val = env->star;
    411         break;
    412     case MSR_PAT:
    413         val = env->pat;
    414         break;
    415     case MSR_VM_HSAVE_PA:
    416         val = env->vm_hsave;
    417         break;
    418     case MSR_IA32_PERF_STATUS:
    419         /* tsc_increment_by_tick */
    420         val = 1000ULL;
    421         /* CPU multiplier */
    422         val |= (((uint64_t)4ULL) << 40);
    423         break;
    424 #ifdef TARGET_X86_64
    425     case MSR_LSTAR:
    426         val = env->lstar;
    427         break;
    428     case MSR_CSTAR:
    429         val = env->cstar;
    430         break;
    431     case MSR_FMASK:
    432         val = env->fmask;
    433         break;
    434     case MSR_FSBASE:
    435         val = env->segs[R_FS].base;
    436         break;
    437     case MSR_GSBASE:
    438         val = env->segs[R_GS].base;
    439         break;
    440     case MSR_KERNELGSBASE:
    441         val = env->kernelgsbase;
    442         break;
    443 #endif
    444     case MSR_MTRRphysBase(0):
    445     case MSR_MTRRphysBase(1):
    446     case MSR_MTRRphysBase(2):
    447     case MSR_MTRRphysBase(3):
    448     case MSR_MTRRphysBase(4):
    449     case MSR_MTRRphysBase(5):
    450     case MSR_MTRRphysBase(6):
    451     case MSR_MTRRphysBase(7):
    452         val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
    453         break;
    454     case MSR_MTRRphysMask(0):
    455     case MSR_MTRRphysMask(1):
    456     case MSR_MTRRphysMask(2):
    457     case MSR_MTRRphysMask(3):
    458     case MSR_MTRRphysMask(4):
    459     case MSR_MTRRphysMask(5):
    460     case MSR_MTRRphysMask(6):
    461     case MSR_MTRRphysMask(7):
    462         val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
    463         break;
    464     case MSR_MTRRfix64K_00000:
    465         val = env->mtrr_fixed[0];
    466         break;
    467     case MSR_MTRRfix16K_80000:
    468     case MSR_MTRRfix16K_A0000:
    469         val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
    470         break;
    471     case MSR_MTRRfix4K_C0000:
    472     case MSR_MTRRfix4K_C8000:
    473     case MSR_MTRRfix4K_D0000:
    474     case MSR_MTRRfix4K_D8000:
    475     case MSR_MTRRfix4K_E0000:
    476     case MSR_MTRRfix4K_E8000:
    477     case MSR_MTRRfix4K_F0000:
    478     case MSR_MTRRfix4K_F8000:
    479         val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
    480         break;
    481     case MSR_MTRRdefType:
    482         val = env->mtrr_deftype;
    483         break;
    484     case MSR_MTRRcap:
    485         if (env->cpuid_features & CPUID_MTRR)
    486             val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
    487         else
    488             /* XXX: exception ? */
    489             val = 0;
    490         break;
    491     case MSR_MCG_CAP:
    492         val = env->mcg_cap;
    493         break;
    494     case MSR_MCG_CTL:
    495         if (env->mcg_cap & MCG_CTL_P)
    496             val = env->mcg_ctl;
    497         else
    498             val = 0;
    499         break;
    500     case MSR_MCG_STATUS:
    501         val = env->mcg_status;
    502         break;
    503     default:
    504         if ((uint32_t)ECX >= MSR_MC0_CTL
    505             && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
    506             uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
    507             val = env->mce_banks[offset];
    508             break;
    509         }
    510         /* XXX: exception ? */
    511         val = 0;
    512         break;
    513     }
    514     EAX = (uint32_t)(val);
    515     EDX = (uint32_t)(val >> 32);
    516 }
    517 #endif
    518 
    519 static void do_hlt(CPUX86State *env)
    520 {
    521     env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
    522     ENV_GET_CPU(env)->halted = 1;
    523     env->exception_index = EXCP_HLT;
    524     cpu_loop_exit(env);
    525 }
    526 
    527 void helper_hlt(CPUX86State *env, int next_eip_addend)
    528 {
    529     helper_svm_check_intercept_param(env, SVM_EXIT_HLT, 0);
    530     EIP += next_eip_addend;
    531 
    532     do_hlt(env);
    533 }
    534 
    535 void helper_monitor(CPUX86State *env, target_ulong ptr)
    536 {
    537     if ((uint32_t)ECX != 0)
    538         raise_exception(env, EXCP0D_GPF);
    539     /* XXX: store address ? */
    540     helper_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0);
    541 }
    542 
    543 void helper_mwait(CPUX86State *env, int next_eip_addend)
    544 {
    545     if ((uint32_t)ECX != 0)
    546         raise_exception(env, EXCP0D_GPF);
    547     helper_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
    548     EIP += next_eip_addend;
    549 
    550     /* XXX: not complete but not completely erroneous */
    551     CPUState *cpu = ENV_GET_CPU(env);
    552     if (cpu->cpu_index != 0 || QTAILQ_NEXT(cpu, node) != NULL) {
    553         /* more than one CPU: do not sleep because another CPU may
    554            wake this one */
    555     } else {
    556         do_hlt(env);
    557     }
    558 }
    559 
    560 void helper_debug(CPUX86State *env)
    561 {
    562     env->exception_index = EXCP_DEBUG;
    563     cpu_loop_exit(env);
    564 }
    565