Home | History | Annotate | Download | only in target-i386
      1 /*
      2  *  i386 helpers (without register variable usage)
      3  *
      4  *  Copyright (c) 2003 Fabrice Bellard
      5  *
      6  * This library is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU Lesser General Public
      8  * License as published by the Free Software Foundation; either
      9  * version 2 of the License, or (at your option) any later version.
     10  *
     11  * This library is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  * Lesser General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU Lesser General Public
     17  * License along with this library; if not, write to the Free Software
     18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
     19  */
     20 #include <stdarg.h>
     21 #include <stdlib.h>
     22 #include <stdio.h>
     23 #include <string.h>
     24 #include <inttypes.h>
     25 #include <signal.h>
     26 
     27 #include "cpu.h"
     28 #include "qemu-common.h"
     29 #include "sysemu/kvm.h"
     30 #include "exec/hax.h"
     31 
     32 //#define DEBUG_MMU
     33 
     34 /* feature flags taken from "Intel Processor Identification and the CPUID
     35  * Instruction" and AMD's "CPUID Specification". In cases of disagreement
     36  * about feature names, the Linux name is used. */
     37 static const char *feature_name[] = {
     38     "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
     39     "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
     40     "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
     41     "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
     42 };
     43 static const char *ext_feature_name[] = {
     44     "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
     45     "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
     46     NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
     47        NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
     48 };
     49 static const char *ext2_feature_name[] = {
     50     "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
     51     "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
     52     "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
     53     "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
     54 };
     55 static const char *ext3_feature_name[] = {
     56     "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
     57     "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
     58     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
     59     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
     60 };
     61 
     62 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
     63                                     uint32_t *ext_features,
     64                                     uint32_t *ext2_features,
     65                                     uint32_t *ext3_features)
     66 {
     67     int i;
     68     int found = 0;
     69 
     70     for ( i = 0 ; i < 32 ; i++ )
     71         if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
     72             *features |= 1 << i;
     73             found = 1;
     74         }
     75     for ( i = 0 ; i < 32 ; i++ )
     76         if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
     77             *ext_features |= 1 << i;
     78             found = 1;
     79         }
     80     for ( i = 0 ; i < 32 ; i++ )
     81         if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
     82             *ext2_features |= 1 << i;
     83             found = 1;
     84         }
     85     for ( i = 0 ; i < 32 ; i++ )
     86         if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
     87             *ext3_features |= 1 << i;
     88             found = 1;
     89         }
     90     if (!found) {
     91         fprintf(stderr, "CPU feature %s not found\n", flagname);
     92     }
     93 }
     94 
     95 static void kvm_trim_features(uint32_t *features, uint32_t supported,
     96                               const char *names[])
     97 {
     98     int i;
     99     uint32_t mask;
    100 
    101     for (i = 0; i < 32; ++i) {
    102         mask = 1U << i;
    103         if ((*features & mask) && !(supported & mask)) {
    104             *features &= ~mask;
    105         }
    106     }
    107 }
    108 
    109 typedef struct x86_def_t {
    110     const char *name;
    111     uint32_t level;
    112     uint32_t vendor1, vendor2, vendor3;
    113     int family;
    114     int model;
    115     int stepping;
    116     uint32_t features, ext_features, ext2_features, ext3_features;
    117     uint32_t xlevel;
    118     char model_id[48];
    119     int vendor_override;
    120 } x86_def_t;
    121 
    122 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
    123 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
    124           CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
    125 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
    126           CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
    127           CPUID_PSE36 | CPUID_FXSR)
    128 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
    129 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
    130           CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
    131           CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
    132           CPUID_PAE | CPUID_SEP | CPUID_APIC)
    133 static x86_def_t x86_defs[] = {
    134 #ifdef TARGET_X86_64
    135     {
    136         .name = "qemu64",
    137         .level = 2,
    138         .vendor1 = CPUID_VENDOR_AMD_1,
    139         .vendor2 = CPUID_VENDOR_AMD_2,
    140         .vendor3 = CPUID_VENDOR_AMD_3,
    141         .family = 6,
    142         .model = 2,
    143         .stepping = 3,
    144         .features = PPRO_FEATURES |
    145         /* these features are needed for Win64 and aren't fully implemented */
    146             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
    147         /* this feature is needed for Solaris and isn't fully implemented */
    148             CPUID_PSE36,
    149         .ext_features = CPUID_EXT_SSE3,
    150         .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
    151             CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
    152             CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
    153         .ext3_features = CPUID_EXT3_SVM,
    154         .xlevel = 0x8000000A,
    155         .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
    156     },
    157     {
    158         .name = "phenom",
    159         .level = 5,
    160         .vendor1 = CPUID_VENDOR_AMD_1,
    161         .vendor2 = CPUID_VENDOR_AMD_2,
    162         .vendor3 = CPUID_VENDOR_AMD_3,
    163         .family = 16,
    164         .model = 2,
    165         .stepping = 3,
    166         /* Missing: CPUID_VME, CPUID_HT */
    167         .features = PPRO_FEATURES |
    168             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
    169             CPUID_PSE36,
    170         /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
    171         .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
    172         /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
    173         .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
    174             CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
    175             CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
    176             CPUID_EXT2_FFXSR,
    177         /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
    178                     CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
    179                     CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
    180                     CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
    181         .ext3_features = CPUID_EXT3_SVM,
    182         .xlevel = 0x8000001A,
    183         .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
    184     },
    185     {
    186         .name = "core2duo",
    187         .level = 10,
    188         .family = 6,
    189         .model = 15,
    190         .stepping = 11,
    191 	/* The original CPU also implements these features:
    192                CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
    193                CPUID_TM, CPUID_PBE */
    194         .features = PPRO_FEATURES |
    195             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
    196             CPUID_PSE36,
    197 	/* The original CPU also implements these ext features:
    198                CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
    199                CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
    200         .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
    201         .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
    202         /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
    203         .xlevel = 0x80000008,
    204         .model_id = "Intel(R) Core(TM)2 Duo CPU     T7700  @ 2.40GHz",
    205     },
    206 #endif
    207     {
    208         .name = "qemu32",
    209         .level = 2,
    210         .family = 6,
    211         .model = 3,
    212         .stepping = 3,
    213         .features = PPRO_FEATURES,
    214         .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_SSSE3,
    215         .xlevel = 0,
    216         .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
    217     },
    218     {
    219         .name = "coreduo",
    220         .level = 10,
    221         .family = 6,
    222         .model = 14,
    223         .stepping = 8,
    224         /* The original CPU also implements these features:
    225                CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
    226                CPUID_TM, CPUID_PBE */
    227         .features = PPRO_FEATURES | CPUID_VME |
    228             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
    229         /* The original CPU also implements these ext features:
    230                CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
    231                CPUID_EXT_PDCM */
    232         .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
    233         .ext2_features = CPUID_EXT2_NX,
    234         .xlevel = 0x80000008,
    235         .model_id = "Genuine Intel(R) CPU           T2600  @ 2.16GHz",
    236     },
    237     {
    238         .name = "486",
    239         .level = 0,
    240         .family = 4,
    241         .model = 0,
    242         .stepping = 0,
    243         .features = I486_FEATURES,
    244         .xlevel = 0,
    245     },
    246     {
    247         .name = "pentium",
    248         .level = 1,
    249         .family = 5,
    250         .model = 4,
    251         .stepping = 3,
    252         .features = PENTIUM_FEATURES,
    253         .xlevel = 0,
    254     },
    255     {
    256         .name = "pentium2",
    257         .level = 2,
    258         .family = 6,
    259         .model = 5,
    260         .stepping = 2,
    261         .features = PENTIUM2_FEATURES,
    262         .xlevel = 0,
    263     },
    264     {
    265         .name = "pentium3",
    266         .level = 2,
    267         .family = 6,
    268         .model = 7,
    269         .stepping = 3,
    270         .features = PENTIUM3_FEATURES,
    271         .xlevel = 0,
    272     },
    273     {
    274         .name = "athlon",
    275         .level = 2,
    276         .vendor1 = 0x68747541, /* "Auth" */
    277         .vendor2 = 0x69746e65, /* "enti" */
    278         .vendor3 = 0x444d4163, /* "cAMD" */
    279         .family = 6,
    280         .model = 2,
    281         .stepping = 3,
    282         .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
    283         .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
    284         .xlevel = 0x80000008,
    285         /* XXX: put another string ? */
    286         .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
    287     },
    288     {
    289         .name = "n270",
    290         /* original is on level 10 */
    291         .level = 5,
    292         .family = 6,
    293         .model = 28,
    294         .stepping = 2,
    295         .features = PPRO_FEATURES |
    296             CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
    297             /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
    298              * CPUID_HT | CPUID_TM | CPUID_PBE */
    299             /* Some CPUs got no CPUID_SEP */
    300         .ext_features = CPUID_EXT_MONITOR |
    301             CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
    302             /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
    303              * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
    304         .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
    305         /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
    306         .xlevel = 0x8000000A,
    307         .model_id = "Intel(R) Atom(TM) CPU N270   @ 1.60GHz",
    308     },
    309 };
    310 
    311 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
    312 {
    313     unsigned int i;
    314     x86_def_t *def;
    315 
    316     char *s = strdup(cpu_model);
    317     char *featurestr, *name = strtok(s, ",");
    318     uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
    319     uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
    320     int family = -1, model = -1, stepping = -1;
    321 
    322     def = NULL;
    323     for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
    324         if (strcmp(name, x86_defs[i].name) == 0) {
    325             def = &x86_defs[i];
    326             break;
    327         }
    328     }
    329     if (!def)
    330         goto error;
    331     memcpy(x86_cpu_def, def, sizeof(*def));
    332 
    333     featurestr = strtok(NULL, ",");
    334 
    335     while (featurestr) {
    336         char *val;
    337         if (featurestr[0] == '+') {
    338             add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
    339         } else if (featurestr[0] == '-') {
    340             add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
    341         } else if ((val = strchr(featurestr, '='))) {
    342             *val = 0; val++;
    343             if (!strcmp(featurestr, "family")) {
    344                 char *err;
    345                 family = strtol(val, &err, 10);
    346                 if (!*val || *err || family < 0) {
    347                     fprintf(stderr, "bad numerical value %s\n", val);
    348                     goto error;
    349                 }
    350                 x86_cpu_def->family = family;
    351             } else if (!strcmp(featurestr, "model")) {
    352                 char *err;
    353                 model = strtol(val, &err, 10);
    354                 if (!*val || *err || model < 0 || model > 0xff) {
    355                     fprintf(stderr, "bad numerical value %s\n", val);
    356                     goto error;
    357                 }
    358                 x86_cpu_def->model = model;
    359             } else if (!strcmp(featurestr, "stepping")) {
    360                 char *err;
    361                 stepping = strtol(val, &err, 10);
    362                 if (!*val || *err || stepping < 0 || stepping > 0xf) {
    363                     fprintf(stderr, "bad numerical value %s\n", val);
    364                     goto error;
    365                 }
    366                 x86_cpu_def->stepping = stepping;
    367             } else if (!strcmp(featurestr, "vendor")) {
    368                 if (strlen(val) != 12) {
    369                     fprintf(stderr, "vendor string must be 12 chars long\n");
    370                     goto error;
    371                 }
    372                 x86_cpu_def->vendor1 = 0;
    373                 x86_cpu_def->vendor2 = 0;
    374                 x86_cpu_def->vendor3 = 0;
    375                 for(i = 0; i < 4; i++) {
    376                     x86_cpu_def->vendor1 |= ((uint8_t)val[i    ]) << (8 * i);
    377                     x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
    378                     x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
    379                 }
    380                 x86_cpu_def->vendor_override = 1;
    381             } else if (!strcmp(featurestr, "model_id")) {
    382                 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
    383                         val);
    384             } else {
    385                 fprintf(stderr, "unrecognized feature %s\n", featurestr);
    386                 goto error;
    387             }
    388         } else {
    389             fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
    390             goto error;
    391         }
    392         featurestr = strtok(NULL, ",");
    393     }
    394     x86_cpu_def->features |= plus_features;
    395     x86_cpu_def->ext_features |= plus_ext_features;
    396     x86_cpu_def->ext2_features |= plus_ext2_features;
    397     x86_cpu_def->ext3_features |= plus_ext3_features;
    398     x86_cpu_def->features &= ~minus_features;
    399     x86_cpu_def->ext_features &= ~minus_ext_features;
    400     x86_cpu_def->ext2_features &= ~minus_ext2_features;
    401     x86_cpu_def->ext3_features &= ~minus_ext3_features;
    402     free(s);
    403     return 0;
    404 
    405 error:
    406     free(s);
    407     return -1;
    408 }
    409 
    410 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
    411 {
    412     unsigned int i;
    413 
    414     for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
    415         (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
    416 }
    417 
    418 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
    419 {
    420     x86_def_t def1, *def = &def1;
    421 
    422     if (cpu_x86_find_by_name(def, cpu_model) < 0)
    423         return -1;
    424     if (def->vendor1) {
    425         env->cpuid_vendor1 = def->vendor1;
    426         env->cpuid_vendor2 = def->vendor2;
    427         env->cpuid_vendor3 = def->vendor3;
    428     } else {
    429         env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
    430         env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
    431         env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
    432     }
    433     env->cpuid_vendor_override = def->vendor_override;
    434     env->cpuid_level = def->level;
    435     if (def->family > 0x0f)
    436         env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
    437     else
    438         env->cpuid_version = def->family << 8;
    439     env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
    440     env->cpuid_version |= def->stepping;
    441     env->cpuid_features = def->features;
    442     env->pat = 0x0007040600070406ULL;
    443     env->cpuid_ext_features = def->ext_features;
    444     env->cpuid_ext2_features = def->ext2_features;
    445     env->cpuid_xlevel = def->xlevel;
    446     env->cpuid_ext3_features = def->ext3_features;
    447     {
    448         const char *model_id = def->model_id;
    449         int c, len, i;
    450         if (!model_id)
    451             model_id = "";
    452         len = strlen(model_id);
    453         for(i = 0; i < 48; i++) {
    454             if (i >= len)
    455                 c = '\0';
    456             else
    457                 c = (uint8_t)model_id[i];
    458             env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
    459         }
    460     }
    461     return 0;
    462 }
    463 
    464 /* NOTE: must be called outside the CPU execute loop */
    465 void cpu_reset(CPUState *cpu)
    466 {
    467     CPUX86State *env = cpu->env_ptr;
    468     int i;
    469 
    470     if (qemu_loglevel_mask(CPU_LOG_RESET)) {
    471         qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
    472         log_cpu_state(cpu, X86_DUMP_FPU | X86_DUMP_CCOP);
    473     }
    474 
    475     memset(env, 0, offsetof(CPUX86State, breakpoints));
    476 
    477     tlb_flush(env, 1);
    478 
    479     env->old_exception = -1;
    480 
    481     /* init to reset state */
    482 
    483 #ifdef CONFIG_SOFTMMU
    484     env->hflags |= HF_SOFTMMU_MASK;
    485 #endif
    486     env->hflags2 |= HF2_GIF_MASK;
    487 
    488     cpu_x86_update_cr0(env, 0x60000010);
    489     env->a20_mask = ~0x0;
    490     env->smbase = 0x30000;
    491 
    492     env->idt.limit = 0xffff;
    493     env->gdt.limit = 0xffff;
    494     env->ldt.limit = 0xffff;
    495     env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
    496     env->tr.limit = 0xffff;
    497     env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
    498 
    499     cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
    500                            DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
    501                            DESC_R_MASK | DESC_A_MASK);
    502     cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
    503                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
    504                            DESC_A_MASK);
    505     cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
    506                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
    507                            DESC_A_MASK);
    508     cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
    509                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
    510                            DESC_A_MASK);
    511     cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
    512                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
    513                            DESC_A_MASK);
    514     cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
    515                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
    516                            DESC_A_MASK);
    517 
    518     env->eip = 0xfff0;
    519     env->regs[R_EDX] = env->cpuid_version;
    520 
    521     env->eflags = 0x2;
    522 
    523     /* FPU init */
    524     for(i = 0;i < 8; i++)
    525         env->fptags[i] = 1;
    526     env->fpuc = 0x37f;
    527 
    528     env->mxcsr = 0x1f80;
    529 
    530     memset(env->dr, 0, sizeof(env->dr));
    531     env->dr[6] = DR6_FIXED_1;
    532     env->dr[7] = DR7_FIXED_1;
    533     cpu_breakpoint_remove_all(env, BP_CPU);
    534     cpu_watchpoint_remove_all(env, BP_CPU);
    535 }
    536 
    537 void cpu_x86_close(CPUX86State *env)
    538 {
    539     g_free(env);
    540 }
    541 
    542 /***********************************************************/
    543 /* x86 debug */
    544 
    545 static const char *cc_op_str[] = {
    546     "DYNAMIC",
    547     "EFLAGS",
    548 
    549     "MULB",
    550     "MULW",
    551     "MULL",
    552     "MULQ",
    553 
    554     "ADDB",
    555     "ADDW",
    556     "ADDL",
    557     "ADDQ",
    558 
    559     "ADCB",
    560     "ADCW",
    561     "ADCL",
    562     "ADCQ",
    563 
    564     "SUBB",
    565     "SUBW",
    566     "SUBL",
    567     "SUBQ",
    568 
    569     "SBBB",
    570     "SBBW",
    571     "SBBL",
    572     "SBBQ",
    573 
    574     "LOGICB",
    575     "LOGICW",
    576     "LOGICL",
    577     "LOGICQ",
    578 
    579     "INCB",
    580     "INCW",
    581     "INCL",
    582     "INCQ",
    583 
    584     "DECB",
    585     "DECW",
    586     "DECL",
    587     "DECQ",
    588 
    589     "SHLB",
    590     "SHLW",
    591     "SHLL",
    592     "SHLQ",
    593 
    594     "SARB",
    595     "SARW",
    596     "SARL",
    597     "SARQ",
    598 };
    599 
    600 static void
    601 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f,
    602                        int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
    603                        const char *name, struct SegmentCache *sc)
    604 {
    605 #ifdef TARGET_X86_64
    606     if (env->hflags & HF_CS64_MASK) {
    607         cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
    608                     sc->selector, sc->base, sc->limit, sc->flags);
    609     } else
    610 #endif
    611     {
    612         cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
    613                     (uint32_t)sc->base, sc->limit, sc->flags);
    614     }
    615 
    616     if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
    617         goto done;
    618 
    619     cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
    620     if (sc->flags & DESC_S_MASK) {
    621         if (sc->flags & DESC_CS_MASK) {
    622             cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
    623                            ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
    624             cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
    625                         (sc->flags & DESC_R_MASK) ? 'R' : '-');
    626         } else {
    627             cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
    628             cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
    629                         (sc->flags & DESC_W_MASK) ? 'W' : '-');
    630         }
    631         cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
    632     } else {
    633         static const char *sys_type_name[2][16] = {
    634             { /* 32 bit mode */
    635                 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
    636                 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
    637                 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
    638                 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
    639             },
    640             { /* 64 bit mode */
    641                 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
    642                 "Reserved", "Reserved", "Reserved", "Reserved",
    643                 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
    644                 "Reserved", "IntGate64", "TrapGate64"
    645             }
    646         };
    647         cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
    648                                     [(sc->flags & DESC_TYPE_MASK)
    649                                      >> DESC_TYPE_SHIFT]);
    650     }
    651 done:
    652     cpu_fprintf(f, "\n");
    653 }
    654 
    655 void cpu_dump_state(CPUState *cpu, FILE *f,
    656                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
    657                     int flags)
    658 {
    659     CPUX86State *env = cpu->env_ptr;
    660     int eflags, i, nb;
    661     char cc_op_name[32];
    662     static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
    663 
    664     if (kvm_enabled())
    665         kvm_arch_get_registers(cpu);
    666 
    667 #ifdef CONFIG_HAX
    668     if (hax_enabled())
    669         hax_arch_get_registers(cpu);
    670 #endif
    671 
    672     eflags = env->eflags;
    673 #ifdef TARGET_X86_64
    674     if (env->hflags & HF_CS64_MASK) {
    675         cpu_fprintf(f,
    676                     "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
    677                     "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
    678                     "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
    679                     "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
    680                     "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
    681                     env->regs[R_EAX],
    682                     env->regs[R_EBX],
    683                     env->regs[R_ECX],
    684                     env->regs[R_EDX],
    685                     env->regs[R_ESI],
    686                     env->regs[R_EDI],
    687                     env->regs[R_EBP],
    688                     env->regs[R_ESP],
    689                     env->regs[8],
    690                     env->regs[9],
    691                     env->regs[10],
    692                     env->regs[11],
    693                     env->regs[12],
    694                     env->regs[13],
    695                     env->regs[14],
    696                     env->regs[15],
    697                     env->eip, eflags,
    698                     eflags & DF_MASK ? 'D' : '-',
    699                     eflags & CC_O ? 'O' : '-',
    700                     eflags & CC_S ? 'S' : '-',
    701                     eflags & CC_Z ? 'Z' : '-',
    702                     eflags & CC_A ? 'A' : '-',
    703                     eflags & CC_P ? 'P' : '-',
    704                     eflags & CC_C ? 'C' : '-',
    705                     env->hflags & HF_CPL_MASK,
    706                     (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
    707                     (int)(env->a20_mask >> 20) & 1,
    708                     (env->hflags >> HF_SMM_SHIFT) & 1,
    709                     cpu->halted);
    710     } else
    711 #endif
    712     {
    713         cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
    714                     "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
    715                     "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
    716                     (uint32_t)env->regs[R_EAX],
    717                     (uint32_t)env->regs[R_EBX],
    718                     (uint32_t)env->regs[R_ECX],
    719                     (uint32_t)env->regs[R_EDX],
    720                     (uint32_t)env->regs[R_ESI],
    721                     (uint32_t)env->regs[R_EDI],
    722                     (uint32_t)env->regs[R_EBP],
    723                     (uint32_t)env->regs[R_ESP],
    724                     (uint32_t)env->eip, eflags,
    725                     eflags & DF_MASK ? 'D' : '-',
    726                     eflags & CC_O ? 'O' : '-',
    727                     eflags & CC_S ? 'S' : '-',
    728                     eflags & CC_Z ? 'Z' : '-',
    729                     eflags & CC_A ? 'A' : '-',
    730                     eflags & CC_P ? 'P' : '-',
    731                     eflags & CC_C ? 'C' : '-',
    732                     env->hflags & HF_CPL_MASK,
    733                     (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
    734                     (int)(env->a20_mask >> 20) & 1,
    735                     (env->hflags >> HF_SMM_SHIFT) & 1,
    736                     cpu->halted);
    737     }
    738 
    739     for(i = 0; i < 6; i++) {
    740         cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
    741                                &env->segs[i]);
    742     }
    743     cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
    744     cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
    745 
    746 #ifdef TARGET_X86_64
    747     if (env->hflags & HF_LMA_MASK) {
    748         cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
    749                     env->gdt.base, env->gdt.limit);
    750         cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
    751                     env->idt.base, env->idt.limit);
    752         cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
    753                     (uint32_t)env->cr[0],
    754                     env->cr[2],
    755                     env->cr[3],
    756                     (uint32_t)env->cr[4]);
    757         for(i = 0; i < 4; i++)
    758             cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
    759         cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
    760                     env->dr[6], env->dr[7]);
    761     } else
    762 #endif
    763     {
    764         cpu_fprintf(f, "GDT=     %08x %08x\n",
    765                     (uint32_t)env->gdt.base, env->gdt.limit);
    766         cpu_fprintf(f, "IDT=     %08x %08x\n",
    767                     (uint32_t)env->idt.base, env->idt.limit);
    768         cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
    769                     (uint32_t)env->cr[0],
    770                     (uint32_t)env->cr[2],
    771                     (uint32_t)env->cr[3],
    772                     (uint32_t)env->cr[4]);
    773         for(i = 0; i < 4; i++)
    774             cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
    775         cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
    776     }
    777     if (flags & X86_DUMP_CCOP) {
    778         if ((unsigned)env->cc_op < CC_OP_NB)
    779             snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
    780         else
    781             snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
    782 #ifdef TARGET_X86_64
    783         if (env->hflags & HF_CS64_MASK) {
    784             cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
    785                         env->cc_src, env->cc_dst,
    786                         cc_op_name);
    787         } else
    788 #endif
    789         {
    790             cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
    791                         (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
    792                         cc_op_name);
    793         }
    794     }
    795     if (flags & X86_DUMP_FPU) {
    796         int fptag;
    797         fptag = 0;
    798         for(i = 0; i < 8; i++) {
    799             fptag |= ((!env->fptags[i]) << i);
    800         }
    801         cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
    802                     env->fpuc,
    803                     (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
    804                     env->fpstt,
    805                     fptag,
    806                     env->mxcsr);
    807         for(i=0;i<8;i++) {
    808             cpu_fprintf(f, "FPR%d=%016" PRIx64,
    809                         i, env->fpregs[i].mmx.q);
    810             if ((i & 1) == 1)
    811                 cpu_fprintf(f, "\n");
    812             else
    813                 cpu_fprintf(f, " ");
    814         }
    815         if (env->hflags & HF_CS64_MASK)
    816             nb = 16;
    817         else
    818             nb = 8;
    819         for(i=0;i<nb;i++) {
    820             cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
    821                         i,
    822                         env->xmm_regs[i].XMM_L(3),
    823                         env->xmm_regs[i].XMM_L(2),
    824                         env->xmm_regs[i].XMM_L(1),
    825                         env->xmm_regs[i].XMM_L(0));
    826             if ((i & 1) == 1)
    827                 cpu_fprintf(f, "\n");
    828             else
    829                 cpu_fprintf(f, " ");
    830         }
    831     }
    832 }
    833 
    834 /***********************************************************/
    835 /* x86 mmu */
    836 /* XXX: add PGE support */
    837 
    838 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
    839 {
    840     a20_state = (a20_state != 0);
    841     if (a20_state != ((env->a20_mask >> 20) & 1)) {
    842 #if defined(DEBUG_MMU)
    843         printf("A20 update: a20=%d\n", a20_state);
    844 #endif
    845         /* if the cpu is currently executing code, we must unlink it and
    846            all the potentially executing TB */
    847         cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_EXITTB);
    848 
    849         /* when a20 is changed, all the MMU mappings are invalid, so
    850            we must flush everything */
    851         tlb_flush(env, 1);
    852         env->a20_mask = (~0x100000) | (a20_state << 20);
    853     }
    854 }
    855 
    856 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
    857 {
    858     int pe_state;
    859 
    860 #if defined(DEBUG_MMU)
    861     printf("CR0 update: CR0=0x%08x\n", new_cr0);
    862 #endif
    863     if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
    864         (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
    865         tlb_flush(env, 1);
    866     }
    867 
    868 #ifdef TARGET_X86_64
    869     if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
    870         (env->efer & MSR_EFER_LME)) {
    871         /* enter in long mode */
    872         /* XXX: generate an exception */
    873         if (!(env->cr[4] & CR4_PAE_MASK))
    874             return;
    875         env->efer |= MSR_EFER_LMA;
    876         env->hflags |= HF_LMA_MASK;
    877     } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
    878                (env->efer & MSR_EFER_LMA)) {
    879         /* exit long mode */
    880         env->efer &= ~MSR_EFER_LMA;
    881         env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
    882         env->eip &= 0xffffffff;
    883     }
    884 #endif
    885     env->cr[0] = new_cr0 | CR0_ET_MASK;
    886 
    887     /* update PE flag in hidden flags */
    888     pe_state = (env->cr[0] & CR0_PE_MASK);
    889     env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
    890     /* ensure that ADDSEG is always set in real mode */
    891     env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
    892     /* update FPU flags */
    893     env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
    894         ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
    895 }
    896 
    897 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
    898    the PDPT */
    899 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
    900 {
    901     env->cr[3] = new_cr3;
    902     if (env->cr[0] & CR0_PG_MASK) {
    903 #if defined(DEBUG_MMU)
    904         printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
    905 #endif
    906         tlb_flush(env, 0);
    907     }
    908 }
    909 
    910 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
    911 {
    912 #if defined(DEBUG_MMU)
    913     printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
    914 #endif
    915     if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
    916         (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
    917         tlb_flush(env, 1);
    918     }
    919     /* SSE handling */
    920     if (!(env->cpuid_features & CPUID_SSE))
    921         new_cr4 &= ~CR4_OSFXSR_MASK;
    922     if (new_cr4 & CR4_OSFXSR_MASK)
    923         env->hflags |= HF_OSFXSR_MASK;
    924     else
    925         env->hflags &= ~HF_OSFXSR_MASK;
    926 
    927     env->cr[4] = new_cr4;
    928 }
    929 
    930 #if defined(CONFIG_USER_ONLY)
    931 
    932 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
    933                              int is_write, int mmu_idx)
    934 {
    935     /* user mode only emulation */
    936     is_write &= 1;
    937     env->cr[2] = addr;
    938     env->error_code = (is_write << PG_ERROR_W_BIT);
    939     env->error_code |= PG_ERROR_U_MASK;
    940     env->exception_index = EXCP0E_PAGE;
    941     return 1;
    942 }
    943 
    944 hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
    945 {
    946     return addr;
    947 }
    948 
    949 #else
    950 
    951 /* XXX: This value should match the one returned by CPUID
    952  * and in exec.c */
    953 #if defined(TARGET_X86_64)
    954 # define PHYS_ADDR_MASK 0xfffffff000LL
    955 #else
    956 # define PHYS_ADDR_MASK 0xffffff000LL
    957 #endif
    958 
    959 /* return value:
    960    -1 = cannot handle fault
    961    0  = nothing more to do
    962    1  = generate PF fault
    963 */
    964 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
    965                              int is_write1, int mmu_idx)
    966 {
    967     uint64_t ptep, pte;
    968     target_ulong pde_addr, pte_addr;
    969     int error_code, is_dirty, prot, page_size, is_write, is_user;
    970     hwaddr paddr;
    971     uint32_t page_offset;
    972     target_ulong vaddr, virt_addr;
    973 
    974     is_user = mmu_idx == MMU_USER_IDX;
    975 #if defined(DEBUG_MMU)
    976     printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
    977            addr, is_write1, is_user, env->eip);
    978 #endif
    979     is_write = is_write1 & 1;
    980 
    981     if (!(env->cr[0] & CR0_PG_MASK)) {
    982         pte = addr;
    983         virt_addr = addr & TARGET_PAGE_MASK;
    984         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    985         page_size = 4096;
    986         goto do_mapping;
    987     }
    988 
    989     if (env->cr[4] & CR4_PAE_MASK) {
    990         uint64_t pde, pdpe;
    991         target_ulong pdpe_addr;
    992 
    993 #ifdef TARGET_X86_64
    994         if (env->hflags & HF_LMA_MASK) {
    995             uint64_t pml4e_addr, pml4e;
    996             int32_t sext;
    997 
    998             /* test virtual address sign extension */
    999             sext = (int64_t)addr >> 47;
   1000             if (sext != 0 && sext != -1) {
   1001                 env->error_code = 0;
   1002                 env->exception_index = EXCP0D_GPF;
   1003                 return 1;
   1004             }
   1005 
   1006             pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
   1007                 env->a20_mask;
   1008             pml4e = ldq_phys(pml4e_addr);
   1009             if (!(pml4e & PG_PRESENT_MASK)) {
   1010                 error_code = 0;
   1011                 goto do_fault;
   1012             }
   1013             if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
   1014                 error_code = PG_ERROR_RSVD_MASK;
   1015                 goto do_fault;
   1016             }
   1017             if (!(pml4e & PG_ACCESSED_MASK)) {
   1018                 pml4e |= PG_ACCESSED_MASK;
   1019                 stl_phys_notdirty(pml4e_addr, pml4e);
   1020             }
   1021             ptep = pml4e ^ PG_NX_MASK;
   1022             pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
   1023                 env->a20_mask;
   1024             pdpe = ldq_phys(pdpe_addr);
   1025             if (!(pdpe & PG_PRESENT_MASK)) {
   1026                 error_code = 0;
   1027                 goto do_fault;
   1028             }
   1029             if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
   1030                 error_code = PG_ERROR_RSVD_MASK;
   1031                 goto do_fault;
   1032             }
   1033             ptep &= pdpe ^ PG_NX_MASK;
   1034             if (!(pdpe & PG_ACCESSED_MASK)) {
   1035                 pdpe |= PG_ACCESSED_MASK;
   1036                 stl_phys_notdirty(pdpe_addr, pdpe);
   1037             }
   1038         } else
   1039 #endif
   1040         {
   1041             /* XXX: load them when cr3 is loaded ? */
   1042             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
   1043                 env->a20_mask;
   1044             pdpe = ldq_phys(pdpe_addr);
   1045             if (!(pdpe & PG_PRESENT_MASK)) {
   1046                 error_code = 0;
   1047                 goto do_fault;
   1048             }
   1049             ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
   1050         }
   1051 
   1052         pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
   1053             env->a20_mask;
   1054         pde = ldq_phys(pde_addr);
   1055         if (!(pde & PG_PRESENT_MASK)) {
   1056             error_code = 0;
   1057             goto do_fault;
   1058         }
   1059         if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
   1060             error_code = PG_ERROR_RSVD_MASK;
   1061             goto do_fault;
   1062         }
   1063         ptep &= pde ^ PG_NX_MASK;
   1064         if (pde & PG_PSE_MASK) {
   1065             /* 2 MB page */
   1066             page_size = 2048 * 1024;
   1067             ptep ^= PG_NX_MASK;
   1068             if ((ptep & PG_NX_MASK) && is_write1 == 2)
   1069                 goto do_fault_protect;
   1070             if (is_user) {
   1071                 if (!(ptep & PG_USER_MASK))
   1072                     goto do_fault_protect;
   1073                 if (is_write && !(ptep & PG_RW_MASK))
   1074                     goto do_fault_protect;
   1075             } else {
   1076                 if ((env->cr[0] & CR0_WP_MASK) &&
   1077                     is_write && !(ptep & PG_RW_MASK))
   1078                     goto do_fault_protect;
   1079             }
   1080             is_dirty = is_write && !(pde & PG_DIRTY_MASK);
   1081             if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
   1082                 pde |= PG_ACCESSED_MASK;
   1083                 if (is_dirty)
   1084                     pde |= PG_DIRTY_MASK;
   1085                 stl_phys_notdirty(pde_addr, pde);
   1086             }
   1087             /* align to page_size */
   1088             pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
   1089             virt_addr = addr & ~(page_size - 1);
   1090         } else {
   1091             /* 4 KB page */
   1092             if (!(pde & PG_ACCESSED_MASK)) {
   1093                 pde |= PG_ACCESSED_MASK;
   1094                 stl_phys_notdirty(pde_addr, pde);
   1095             }
   1096             pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
   1097                 env->a20_mask;
   1098             pte = ldq_phys(pte_addr);
   1099             if (!(pte & PG_PRESENT_MASK)) {
   1100                 error_code = 0;
   1101                 goto do_fault;
   1102             }
   1103             if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
   1104                 error_code = PG_ERROR_RSVD_MASK;
   1105                 goto do_fault;
   1106             }
   1107             /* combine pde and pte nx, user and rw protections */
   1108             ptep &= pte ^ PG_NX_MASK;
   1109             ptep ^= PG_NX_MASK;
   1110             if ((ptep & PG_NX_MASK) && is_write1 == 2)
   1111                 goto do_fault_protect;
   1112             if (is_user) {
   1113                 if (!(ptep & PG_USER_MASK))
   1114                     goto do_fault_protect;
   1115                 if (is_write && !(ptep & PG_RW_MASK))
   1116                     goto do_fault_protect;
   1117             } else {
   1118                 if ((env->cr[0] & CR0_WP_MASK) &&
   1119                     is_write && !(ptep & PG_RW_MASK))
   1120                     goto do_fault_protect;
   1121             }
   1122             is_dirty = is_write && !(pte & PG_DIRTY_MASK);
   1123             if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
   1124                 pte |= PG_ACCESSED_MASK;
   1125                 if (is_dirty)
   1126                     pte |= PG_DIRTY_MASK;
   1127                 stl_phys_notdirty(pte_addr, pte);
   1128             }
   1129             page_size = 4096;
   1130             virt_addr = addr & ~0xfff;
   1131             pte = pte & (PHYS_ADDR_MASK | 0xfff);
   1132         }
   1133     } else {
   1134         uint32_t pde;
   1135 
   1136         /* page directory entry */
   1137         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
   1138             env->a20_mask;
   1139         pde = ldl_phys(pde_addr);
   1140         if (!(pde & PG_PRESENT_MASK)) {
   1141             error_code = 0;
   1142             goto do_fault;
   1143         }
   1144         /* if PSE bit is set, then we use a 4MB page */
   1145         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
   1146             page_size = 4096 * 1024;
   1147             if (is_user) {
   1148                 if (!(pde & PG_USER_MASK))
   1149                     goto do_fault_protect;
   1150                 if (is_write && !(pde & PG_RW_MASK))
   1151                     goto do_fault_protect;
   1152             } else {
   1153                 if ((env->cr[0] & CR0_WP_MASK) &&
   1154                     is_write && !(pde & PG_RW_MASK))
   1155                     goto do_fault_protect;
   1156             }
   1157             is_dirty = is_write && !(pde & PG_DIRTY_MASK);
   1158             if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
   1159                 pde |= PG_ACCESSED_MASK;
   1160                 if (is_dirty)
   1161                     pde |= PG_DIRTY_MASK;
   1162                 stl_phys_notdirty(pde_addr, pde);
   1163             }
   1164 
   1165             pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
   1166             ptep = pte;
   1167             virt_addr = addr & ~(page_size - 1);
   1168         } else {
   1169             if (!(pde & PG_ACCESSED_MASK)) {
   1170                 pde |= PG_ACCESSED_MASK;
   1171                 stl_phys_notdirty(pde_addr, pde);
   1172             }
   1173 
   1174             /* page directory entry */
   1175             pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
   1176                 env->a20_mask;
   1177             pte = ldl_phys(pte_addr);
   1178             if (!(pte & PG_PRESENT_MASK)) {
   1179                 error_code = 0;
   1180                 goto do_fault;
   1181             }
   1182             /* combine pde and pte user and rw protections */
   1183             ptep = pte & pde;
   1184             if (is_user) {
   1185                 if (!(ptep & PG_USER_MASK))
   1186                     goto do_fault_protect;
   1187                 if (is_write && !(ptep & PG_RW_MASK))
   1188                     goto do_fault_protect;
   1189             } else {
   1190                 if ((env->cr[0] & CR0_WP_MASK) &&
   1191                     is_write && !(ptep & PG_RW_MASK))
   1192                     goto do_fault_protect;
   1193             }
   1194             is_dirty = is_write && !(pte & PG_DIRTY_MASK);
   1195             if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
   1196                 pte |= PG_ACCESSED_MASK;
   1197                 if (is_dirty)
   1198                     pte |= PG_DIRTY_MASK;
   1199                 stl_phys_notdirty(pte_addr, pte);
   1200             }
   1201             page_size = 4096;
   1202             virt_addr = addr & ~0xfff;
   1203         }
   1204     }
   1205     /* the page can be put in the TLB */
   1206     prot = PAGE_READ;
   1207     if (!(ptep & PG_NX_MASK))
   1208         prot |= PAGE_EXEC;
   1209     if (pte & PG_DIRTY_MASK) {
   1210         /* only set write access if already dirty... otherwise wait
   1211            for dirty access */
   1212         if (is_user) {
   1213             if (ptep & PG_RW_MASK)
   1214                 prot |= PAGE_WRITE;
   1215         } else {
   1216             if (!(env->cr[0] & CR0_WP_MASK) ||
   1217                 (ptep & PG_RW_MASK))
   1218                 prot |= PAGE_WRITE;
   1219         }
   1220     }
   1221  do_mapping:
   1222     pte = pte & env->a20_mask;
   1223 
   1224     /* Even if 4MB pages, we map only one 4KB page in the cache to
   1225        avoid filling it too fast */
   1226     page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
   1227     paddr = (pte & TARGET_PAGE_MASK) + page_offset;
   1228     vaddr = virt_addr + page_offset;
   1229 
   1230     tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
   1231     return 0;
   1232  do_fault_protect:
   1233     error_code = PG_ERROR_P_MASK;
   1234  do_fault:
   1235     error_code |= (is_write << PG_ERROR_W_BIT);
   1236     if (is_user)
   1237         error_code |= PG_ERROR_U_MASK;
   1238     if (is_write1 == 2 &&
   1239         (env->efer & MSR_EFER_NXE) &&
   1240         (env->cr[4] & CR4_PAE_MASK))
   1241         error_code |= PG_ERROR_I_D_MASK;
   1242     if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
   1243         /* cr2 is not modified in case of exceptions */
   1244         stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
   1245                  addr);
   1246     } else {
   1247         env->cr[2] = addr;
   1248     }
   1249     env->error_code = error_code;
   1250     env->exception_index = EXCP0E_PAGE;
   1251     return 1;
   1252 }
   1253 
   1254 hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
   1255 {
   1256     target_ulong pde_addr, pte_addr;
   1257     uint64_t pte;
   1258     hwaddr paddr;
   1259     uint32_t page_offset;
   1260     int page_size;
   1261 
   1262     if (env->cr[4] & CR4_PAE_MASK) {
   1263         target_ulong pdpe_addr;
   1264         uint64_t pde, pdpe;
   1265 
   1266 #ifdef TARGET_X86_64
   1267         if (env->hflags & HF_LMA_MASK) {
   1268             uint64_t pml4e_addr, pml4e;
   1269             int32_t sext;
   1270 
   1271             /* test virtual address sign extension */
   1272             sext = (int64_t)addr >> 47;
   1273             if (sext != 0 && sext != -1)
   1274                 return -1;
   1275 
   1276             pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
   1277                 env->a20_mask;
   1278             pml4e = ldq_phys(pml4e_addr);
   1279             if (!(pml4e & PG_PRESENT_MASK))
   1280                 return -1;
   1281 
   1282             pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
   1283                 env->a20_mask;
   1284             pdpe = ldq_phys(pdpe_addr);
   1285             if (!(pdpe & PG_PRESENT_MASK))
   1286                 return -1;
   1287         } else
   1288 #endif
   1289         {
   1290             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
   1291                 env->a20_mask;
   1292             pdpe = ldq_phys(pdpe_addr);
   1293             if (!(pdpe & PG_PRESENT_MASK))
   1294                 return -1;
   1295         }
   1296 
   1297         pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
   1298             env->a20_mask;
   1299         pde = ldq_phys(pde_addr);
   1300         if (!(pde & PG_PRESENT_MASK)) {
   1301             return -1;
   1302         }
   1303         if (pde & PG_PSE_MASK) {
   1304             /* 2 MB page */
   1305             page_size = 2048 * 1024;
   1306             pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
   1307         } else {
   1308             /* 4 KB page */
   1309             pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
   1310                 env->a20_mask;
   1311             page_size = 4096;
   1312             pte = ldq_phys(pte_addr);
   1313         }
   1314         if (!(pte & PG_PRESENT_MASK))
   1315             return -1;
   1316     } else {
   1317         uint32_t pde;
   1318 
   1319         if (!(env->cr[0] & CR0_PG_MASK)) {
   1320             pte = addr;
   1321             page_size = 4096;
   1322         } else {
   1323             /* page directory entry */
   1324             pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
   1325             pde = ldl_phys(pde_addr);
   1326             if (!(pde & PG_PRESENT_MASK))
   1327                 return -1;
   1328             if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
   1329                 pte = pde & ~0x003ff000; /* align to 4MB */
   1330                 page_size = 4096 * 1024;
   1331             } else {
   1332                 /* page directory entry */
   1333                 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
   1334                 pte = ldl_phys(pte_addr);
   1335                 if (!(pte & PG_PRESENT_MASK))
   1336                     return -1;
   1337                 page_size = 4096;
   1338             }
   1339         }
   1340         pte = pte & env->a20_mask;
   1341     }
   1342 
   1343     page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
   1344     paddr = (pte & TARGET_PAGE_MASK) + page_offset;
   1345     return paddr;
   1346 }
   1347 
   1348 void hw_breakpoint_insert(CPUX86State *env, int index)
   1349 {
   1350     int type, err = 0;
   1351 
   1352     switch (hw_breakpoint_type(env->dr[7], index)) {
   1353     case 0:
   1354         if (hw_breakpoint_enabled(env->dr[7], index))
   1355             err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
   1356                                         &env->cpu_breakpoint[index]);
   1357         break;
   1358     case 1:
   1359         type = BP_CPU | BP_MEM_WRITE;
   1360         goto insert_wp;
   1361     case 2:
   1362          /* No support for I/O watchpoints yet */
   1363         break;
   1364     case 3:
   1365         type = BP_CPU | BP_MEM_ACCESS;
   1366     insert_wp:
   1367         err = cpu_watchpoint_insert(env, env->dr[index],
   1368                                     hw_breakpoint_len(env->dr[7], index),
   1369                                     type, &env->cpu_watchpoint[index]);
   1370         break;
   1371     }
   1372     if (err)
   1373         env->cpu_breakpoint[index] = NULL;
   1374 }
   1375 
   1376 void hw_breakpoint_remove(CPUX86State *env, int index)
   1377 {
   1378     if (!env->cpu_breakpoint[index])
   1379         return;
   1380     switch (hw_breakpoint_type(env->dr[7], index)) {
   1381     case 0:
   1382         if (hw_breakpoint_enabled(env->dr[7], index))
   1383             cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
   1384         break;
   1385     case 1:
   1386     case 3:
   1387         cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
   1388         break;
   1389     case 2:
   1390         /* No support for I/O watchpoints yet */
   1391         break;
   1392     }
   1393 }
   1394 
   1395 int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
   1396 {
   1397     target_ulong dr6;
   1398     int reg, type;
   1399     int hit_enabled = 0;
   1400 
   1401     dr6 = env->dr[6] & ~0xf;
   1402     for (reg = 0; reg < 4; reg++) {
   1403         type = hw_breakpoint_type(env->dr[7], reg);
   1404         if ((type == 0 && env->dr[reg] == env->eip) ||
   1405             ((type & 1) && env->cpu_watchpoint[reg] &&
   1406              (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
   1407             dr6 |= 1 << reg;
   1408             if (hw_breakpoint_enabled(env->dr[7], reg))
   1409                 hit_enabled = 1;
   1410         }
   1411     }
   1412     if (hit_enabled || force_dr6_update)
   1413         env->dr[6] = dr6;
   1414     return hit_enabled;
   1415 }
   1416 
   1417 static void breakpoint_handler(CPUX86State *env)
   1418 {
   1419     CPUBreakpoint *bp;
   1420 
   1421     if (env->watchpoint_hit) {
   1422         if (env->watchpoint_hit->flags & BP_CPU) {
   1423             env->watchpoint_hit = NULL;
   1424             if (check_hw_breakpoints(env, 0))
   1425                 raise_exception(env, EXCP01_DB);
   1426             else
   1427                 cpu_resume_from_signal(env, NULL);
   1428         }
   1429     } else {
   1430         QTAILQ_FOREACH(bp, &env->breakpoints, entry)
   1431             if (bp->pc == env->eip) {
   1432                 if (bp->flags & BP_CPU) {
   1433                     check_hw_breakpoints(env, 1);
   1434                     raise_exception(env, EXCP01_DB);
   1435                 }
   1436                 break;
   1437             }
   1438     }
   1439 }
   1440 
   1441 
   1442 /* This should come from sysemu.h - if we could include it here... */
   1443 void qemu_system_reset_request(void);
   1444 
   1445 void cpu_inject_x86_mce(CPUX86State *cenv, int bank, uint64_t status,
   1446                         uint64_t mcg_status, uint64_t addr, uint64_t misc)
   1447 {
   1448     uint64_t mcg_cap = cenv->mcg_cap;
   1449     unsigned bank_num = mcg_cap & 0xff;
   1450     uint64_t *banks = cenv->mce_banks;
   1451 
   1452     if (bank >= bank_num || !(status & MCI_STATUS_VAL))
   1453         return;
   1454 
   1455     /*
   1456      * if MSR_MCG_CTL is not all 1s, the uncorrected error
   1457      * reporting is disabled
   1458      */
   1459     if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
   1460         cenv->mcg_ctl != ~(uint64_t)0)
   1461         return;
   1462     banks += 4 * bank;
   1463     /*
   1464      * if MSR_MCi_CTL is not all 1s, the uncorrected error
   1465      * reporting is disabled for the bank
   1466      */
   1467     if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
   1468         return;
   1469     if (status & MCI_STATUS_UC) {
   1470         if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
   1471             !(cenv->cr[4] & CR4_MCE_MASK)) {
   1472             fprintf(stderr, "injects mce exception while previous "
   1473                     "one is in progress!\n");
   1474             qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
   1475             qemu_system_reset_request();
   1476             return;
   1477         }
   1478         if (banks[1] & MCI_STATUS_VAL)
   1479             status |= MCI_STATUS_OVER;
   1480         banks[2] = addr;
   1481         banks[3] = misc;
   1482         cenv->mcg_status = mcg_status;
   1483         banks[1] = status;
   1484         cpu_interrupt(ENV_GET_CPU(cenv), CPU_INTERRUPT_MCE);
   1485     } else if (!(banks[1] & MCI_STATUS_VAL)
   1486                || !(banks[1] & MCI_STATUS_UC)) {
   1487         if (banks[1] & MCI_STATUS_VAL)
   1488             status |= MCI_STATUS_OVER;
   1489         banks[2] = addr;
   1490         banks[3] = misc;
   1491         banks[1] = status;
   1492     } else
   1493         banks[1] |= MCI_STATUS_OVER;
   1494 }
   1495 #endif /* !CONFIG_USER_ONLY */
   1496 
   1497 static void mce_init(CPUX86State *cenv)
   1498 {
   1499     unsigned int bank, bank_num;
   1500 
   1501     if (((cenv->cpuid_version >> 8)&0xf) >= 6
   1502         && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
   1503         cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
   1504         cenv->mcg_ctl = ~(uint64_t)0;
   1505         bank_num = cenv->mcg_cap & 0xff;
   1506         cenv->mce_banks = g_malloc0(bank_num * sizeof(uint64_t) * 4);
   1507         for (bank = 0; bank < bank_num; bank++)
   1508             cenv->mce_banks[bank*4] = ~(uint64_t)0;
   1509     }
   1510 }
   1511 
   1512 static void host_cpuid(uint32_t function, uint32_t count,
   1513                        uint32_t *eax, uint32_t *ebx,
   1514                        uint32_t *ecx, uint32_t *edx)
   1515 {
   1516 #if defined(CONFIG_KVM)
   1517     uint32_t vec[4];
   1518 
   1519 #ifdef __x86_64__
   1520     asm volatile("cpuid"
   1521                  : "=a"(vec[0]), "=b"(vec[1]),
   1522                    "=c"(vec[2]), "=d"(vec[3])
   1523                  : "0"(function), "c"(count) : "cc");
   1524 #else
   1525     asm volatile("pusha \n\t"
   1526                  "cpuid \n\t"
   1527                  "mov %%eax, 0(%2) \n\t"
   1528                  "mov %%ebx, 4(%2) \n\t"
   1529                  "mov %%ecx, 8(%2) \n\t"
   1530                  "mov %%edx, 12(%2) \n\t"
   1531                  "popa"
   1532                  : : "a"(function), "c"(count), "S"(vec)
   1533                  : "memory", "cc");
   1534 #endif
   1535 
   1536     if (eax)
   1537 	*eax = vec[0];
   1538     if (ebx)
   1539 	*ebx = vec[1];
   1540     if (ecx)
   1541 	*ecx = vec[2];
   1542     if (edx)
   1543 	*edx = vec[3];
   1544 #endif
   1545 }
   1546 
   1547 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
   1548                    uint32_t *eax, uint32_t *ebx,
   1549                    uint32_t *ecx, uint32_t *edx)
   1550 {
   1551     /* test if maximum index reached */
   1552     if (index & 0x80000000) {
   1553         if (index > env->cpuid_xlevel)
   1554             index = env->cpuid_level;
   1555     } else {
   1556         if (index > env->cpuid_level)
   1557             index = env->cpuid_level;
   1558     }
   1559 
   1560     switch(index) {
   1561     case 0:
   1562         *eax = env->cpuid_level;
   1563         *ebx = env->cpuid_vendor1;
   1564         *edx = env->cpuid_vendor2;
   1565         *ecx = env->cpuid_vendor3;
   1566 
   1567         /* sysenter isn't supported on compatibility mode on AMD.  and syscall
   1568          * isn't supported in compatibility mode on Intel.  so advertise the
   1569          * actuall cpu, and say goodbye to migration between different vendors
   1570          * is you use compatibility mode. */
   1571         if (kvm_enabled() && !env->cpuid_vendor_override)
   1572             host_cpuid(0, 0, NULL, ebx, ecx, edx);
   1573         break;
   1574     case 1:
   1575         *eax = env->cpuid_version;
   1576         if (kvm_enabled() && !env->cpuid_vendor_override) {
   1577             /* take only subset of ext features which processor can handle */
   1578             uint32_t unused;
   1579             host_cpuid(1, 0, NULL, &unused, ecx, &unused);
   1580         } else {
   1581             *ecx = UINT32_MAX;
   1582         }
   1583         *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
   1584         *ecx &= env->cpuid_ext_features;
   1585         *edx = env->cpuid_features;
   1586 
   1587         /* "Hypervisor present" bit required for Microsoft SVVP */
   1588         if (kvm_enabled())
   1589             *ecx |= (1 << 31);
   1590         break;
   1591     case 2:
   1592         /* cache info: needed for Pentium Pro compatibility */
   1593         *eax = 1;
   1594         *ebx = 0;
   1595         *ecx = 0;
   1596         *edx = 0x2c307d;
   1597         break;
   1598     case 4:
   1599         /* cache info: needed for Core compatibility */
   1600         switch (count) {
   1601             case 0: /* L1 dcache info */
   1602                 *eax = 0x0000121;
   1603                 *ebx = 0x1c0003f;
   1604                 *ecx = 0x000003f;
   1605                 *edx = 0x0000001;
   1606                 break;
   1607             case 1: /* L1 icache info */
   1608                 *eax = 0x0000122;
   1609                 *ebx = 0x1c0003f;
   1610                 *ecx = 0x000003f;
   1611                 *edx = 0x0000001;
   1612                 break;
   1613             case 2: /* L2 cache info */
   1614                 *eax = 0x0000143;
   1615                 *ebx = 0x3c0003f;
   1616                 *ecx = 0x0000fff;
   1617                 *edx = 0x0000001;
   1618                 break;
   1619             default: /* end of info */
   1620                 *eax = 0;
   1621                 *ebx = 0;
   1622                 *ecx = 0;
   1623                 *edx = 0;
   1624                 break;
   1625         }
   1626         break;
   1627     case 5:
   1628         /* mwait info: needed for Core compatibility */
   1629         *eax = 0; /* Smallest monitor-line size in bytes */
   1630         *ebx = 0; /* Largest monitor-line size in bytes */
   1631         *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
   1632         *edx = 0;
   1633         break;
   1634     case 6:
   1635         /* Thermal and Power Leaf */
   1636         *eax = 0;
   1637         *ebx = 0;
   1638         *ecx = 0;
   1639         *edx = 0;
   1640         break;
   1641     case 9:
   1642         /* Direct Cache Access Information Leaf */
   1643         *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
   1644         *ebx = 0;
   1645         *ecx = 0;
   1646         *edx = 0;
   1647         break;
   1648     case 0xA:
   1649         /* Architectural Performance Monitoring Leaf */
   1650         *eax = 0;
   1651         *ebx = 0;
   1652         *ecx = 0;
   1653         *edx = 0;
   1654         break;
   1655     case 0x80000000:
   1656         *eax = env->cpuid_xlevel;
   1657         *ebx = env->cpuid_vendor1;
   1658         *edx = env->cpuid_vendor2;
   1659         *ecx = env->cpuid_vendor3;
   1660         break;
   1661     case 0x80000001:
   1662         *eax = env->cpuid_features;
   1663         *ebx = 0;
   1664         *ecx = env->cpuid_ext3_features;
   1665         *edx = env->cpuid_ext2_features;
   1666 
   1667         if (kvm_enabled()) {
   1668             uint32_t h_eax, h_edx;
   1669 
   1670             host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
   1671 
   1672             /* disable CPU features that the host does not support */
   1673 
   1674             /* long mode */
   1675             if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
   1676                 *edx &= ~0x20000000;
   1677             /* syscall */
   1678             if ((h_edx & 0x00000800) == 0)
   1679                 *edx &= ~0x00000800;
   1680             /* nx */
   1681             if ((h_edx & 0x00100000) == 0)
   1682                 *edx &= ~0x00100000;
   1683 
   1684             /* disable CPU features that KVM cannot support */
   1685 
   1686             /* svm */
   1687             *ecx &= ~4UL;
   1688             /* 3dnow */
   1689             *edx &= ~0xc0000000;
   1690         }
   1691         break;
   1692     case 0x80000002:
   1693     case 0x80000003:
   1694     case 0x80000004:
   1695         *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
   1696         *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
   1697         *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
   1698         *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
   1699         break;
   1700     case 0x80000005:
   1701         /* cache info (L1 cache) */
   1702         *eax = 0x01ff01ff;
   1703         *ebx = 0x01ff01ff;
   1704         *ecx = 0x40020140;
   1705         *edx = 0x40020140;
   1706         break;
   1707     case 0x80000006:
   1708         /* cache info (L2 cache) */
   1709         *eax = 0;
   1710         *ebx = 0x42004200;
   1711         *ecx = 0x02008140;
   1712         *edx = 0;
   1713         break;
   1714     case 0x80000008:
   1715         /* virtual & phys address size in low 2 bytes. */
   1716 /* XXX: This value must match the one used in the MMU code. */
   1717         if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
   1718             /* 64 bit processor */
   1719 /* XXX: The physical address space is limited to 42 bits in exec.c. */
   1720             *eax = 0x00003028;	/* 48 bits virtual, 40 bits physical */
   1721         } else {
   1722             if (env->cpuid_features & CPUID_PSE36)
   1723                 *eax = 0x00000024; /* 36 bits physical */
   1724             else
   1725                 *eax = 0x00000020; /* 32 bits physical */
   1726         }
   1727         *ebx = 0;
   1728         *ecx = 0;
   1729         *edx = 0;
   1730         break;
   1731     case 0x8000000A:
   1732         *eax = 0x00000001; /* SVM Revision */
   1733         *ebx = 0x00000010; /* nr of ASIDs */
   1734         *ecx = 0;
   1735         *edx = 0; /* optional features */
   1736         break;
   1737     default:
   1738         /* reserved values: zero */
   1739         *eax = 0;
   1740         *ebx = 0;
   1741         *ecx = 0;
   1742         *edx = 0;
   1743         break;
   1744     }
   1745 }
   1746 
   1747 CPUX86State *cpu_x86_init(const char *cpu_model)
   1748 {
   1749     X86CPU *x86_cpu;
   1750     CPUX86State *env;
   1751     static int inited;
   1752 
   1753     x86_cpu = g_malloc0(sizeof(X86CPU));
   1754     env = &x86_cpu->env;
   1755     ENV_GET_CPU(env)->env_ptr = env;
   1756     CPUState *cpu = ENV_GET_CPU(env);
   1757 
   1758     cpu_exec_init(env);
   1759     cpu->cpu_model_str = cpu_model;
   1760 
   1761 
   1762     /* init various static tables */
   1763     if (!inited) {
   1764         inited = 1;
   1765         optimize_flags_init();
   1766 #ifndef CONFIG_USER_ONLY
   1767         cpu_set_debug_excp_handler(breakpoint_handler);
   1768 #endif
   1769     }
   1770     if (cpu_x86_register(env, cpu_model) < 0) {
   1771         cpu_x86_close(env);
   1772         return NULL;
   1773     }
   1774     mce_init(env);
   1775     cpu_reset(cpu);
   1776 
   1777     qemu_init_vcpu(cpu);
   1778 
   1779     if (kvm_enabled()) {
   1780         kvm_trim_features(&env->cpuid_features,
   1781                           kvm_arch_get_supported_cpuid(cpu, 1, R_EDX),
   1782                           feature_name);
   1783         kvm_trim_features(&env->cpuid_ext_features,
   1784                           kvm_arch_get_supported_cpuid(cpu, 1, R_ECX),
   1785                           ext_feature_name);
   1786         kvm_trim_features(&env->cpuid_ext2_features,
   1787                           kvm_arch_get_supported_cpuid(cpu, 0x80000001, R_EDX),
   1788                           ext2_feature_name);
   1789         kvm_trim_features(&env->cpuid_ext3_features,
   1790                           kvm_arch_get_supported_cpuid(cpu, 0x80000001, R_ECX),
   1791                           ext3_feature_name);
   1792     }
   1793 
   1794     return env;
   1795 }
   1796 
   1797 #if !defined(CONFIG_USER_ONLY)
   1798 void do_cpu_init(CPUX86State *env)
   1799 {
   1800     CPUState *cpu = ENV_GET_CPU(env);
   1801     int sipi = cpu->interrupt_request & CPU_INTERRUPT_SIPI;
   1802     cpu_reset(cpu);
   1803     cpu->interrupt_request = sipi;
   1804     apic_init_reset(env);
   1805 }
   1806 
   1807 void do_cpu_sipi(CPUX86State *env)
   1808 {
   1809     apic_sipi(env);
   1810 }
   1811 #else
   1812 void do_cpu_init(CPUX86State *env)
   1813 {
   1814 }
   1815 void do_cpu_sipi(CPUX86State *env)
   1816 {
   1817 }
   1818 #endif
   1819