summaryrefslogtreecommitdiffstats
path: root/target-i386
diff options
context:
space:
mode:
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/cpu.c642
-rw-r--r--target-i386/cpu.h55
-rw-r--r--target-i386/fpu_helper.c108
-rw-r--r--target-i386/helper.c19
-rw-r--r--target-i386/kvm.c104
-rw-r--r--target-i386/kvm_i386.h2
-rw-r--r--target-i386/monitor.c3
-rw-r--r--target-i386/seg_helper.c36
-rw-r--r--target-i386/translate.c8
9 files changed, 560 insertions, 417 deletions
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 6a1afab595..1c57fce81b 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -57,6 +57,7 @@
#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
#define CPUID_2_L1I_32KB_8WAY_64B 0x30
#define CPUID_2_L2_2MB_8WAY_64B 0x7d
+#define CPUID_2_L3_16MB_16WAY_64B 0x4d
/* CPUID Leaf 4 constants: */
@@ -131,11 +132,18 @@
#define L2_LINES_PER_TAG 1
#define L2_SIZE_KB_AMD 512
-/* No L3 cache: */
+/* Level 3 unified cache: */
#define L3_SIZE_KB 0 /* disabled */
#define L3_ASSOCIATIVITY 0 /* disabled */
#define L3_LINES_PER_TAG 0 /* disabled */
#define L3_LINE_SIZE 0 /* disabled */
+#define L3_N_LINE_SIZE 64
+#define L3_N_ASSOCIATIVITY 16
+#define L3_N_SETS 16384
+#define L3_N_PARTITIONS 1
+#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
+#define L3_N_LINES_PER_TAG 1
+#define L3_N_SIZE_KB_AMD 16384
/* TLB definitions: */
@@ -173,181 +181,6 @@ static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
dst[CPUID_VENDOR_SZ] = '\0';
}
-/* feature flags taken from "Intel Processor Identification and the CPUID
- * Instruction" and AMD's "CPUID Specification". In cases of disagreement
- * between feature naming conventions, aliases may be added.
- */
-static const char *feature_name[] = {
- "fpu", "vme", "de", "pse",
- "tsc", "msr", "pae", "mce",
- "cx8", "apic", NULL, "sep",
- "mtrr", "pge", "mca", "cmov",
- "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
- NULL, "ds" /* Intel dts */, "acpi", "mmx",
- "fxsr", "sse", "sse2", "ss",
- "ht" /* Intel htt */, "tm", "ia64", "pbe",
-};
-static const char *ext_feature_name[] = {
- "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
- "ds_cpl", "vmx", "smx", "est",
- "tm2", "ssse3", "cid", NULL,
- "fma", "cx16", "xtpr", "pdcm",
- NULL, "pcid", "dca", "sse4.1|sse4_1",
- "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
- "tsc-deadline", "aes", "xsave", "osxsave",
- "avx", "f16c", "rdrand", "hypervisor",
-};
-/* Feature names that are already defined on feature_name[] but are set on
- * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
- * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
- * if and only if CPU vendor is AMD.
- */
-static const char *ext2_feature_name[] = {
- NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
- NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
- NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
- NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
- NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
- "nx|xd", NULL, "mmxext", NULL /* mmx */,
- NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
- NULL, "lm|i64", "3dnowext", "3dnow",
-};
-static const char *ext3_feature_name[] = {
- "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
- "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
- "3dnowprefetch", "osvw", "ibs", "xop",
- "skinit", "wdt", NULL, "lwp",
- "fma4", "tce", NULL, "nodeid_msr",
- NULL, "tbm", "topoext", "perfctr_core",
- "perfctr_nb", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *ext4_feature_name[] = {
- NULL, NULL, "xstore", "xstore-en",
- NULL, NULL, "xcrypt", "xcrypt-en",
- "ace2", "ace2-en", "phe", "phe-en",
- "pmm", "pmm-en", NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *kvm_feature_name[] = {
- "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
- "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- "kvmclock-stable-bit", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *hyperv_priv_feature_name[] = {
- NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
- NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
- NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
- NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
- NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
- NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *hyperv_ident_feature_name[] = {
- NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
- NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
- NULL /* hv_post_messages */, NULL /* hv_signal_events */,
- NULL /* hv_create_port */, NULL /* hv_connect_port */,
- NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
- NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
- NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *hyperv_misc_feature_name[] = {
- NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
- NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
- NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
- NULL, NULL,
- NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *svm_feature_name[] = {
- "npt", "lbrv", "svm_lock", "nrip_save",
- "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
- NULL, NULL, "pause_filter", NULL,
- "pfthreshold", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *cpuid_7_0_ebx_feature_name[] = {
- "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
- "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
- "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
- "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
-};
-
-static const char *cpuid_7_0_ecx_feature_name[] = {
- NULL, NULL, "umip", "pku",
- "ospke", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, "rdpid", NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *cpuid_apm_edx_feature_name[] = {
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- "invtsc", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *cpuid_xsave_feature_name[] = {
- "xsaveopt", "xsavec", "xgetbv1", "xsaves",
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
-static const char *cpuid_6_feature_name[] = {
- NULL, NULL, "arat", NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
-};
-
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
@@ -413,96 +246,266 @@ static const char *cpuid_6_feature_name[] = {
CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
typedef struct FeatureWordInfo {
- const char **feat_names;
+ /* feature flags names are taken from "Intel Processor Identification and
+ * the CPUID Instruction" and AMD's "CPUID Specification".
+ * In cases of disagreement between feature naming conventions,
+ * aliases may be added.
+ */
+ const char *feat_names[32];
uint32_t cpuid_eax; /* Input EAX for CPUID */
bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
uint32_t cpuid_ecx; /* Input ECX value for CPUID */
int cpuid_reg; /* output register (R_* constant) */
uint32_t tcg_features; /* Feature flags supported by TCG */
uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
+ uint32_t migratable_flags; /* Feature flags known to be migratable */
} FeatureWordInfo;
static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_1_EDX] = {
- .feat_names = feature_name,
+ .feat_names = {
+ "fpu", "vme", "de", "pse",
+ "tsc", "msr", "pae", "mce",
+ "cx8", "apic", NULL, "sep",
+ "mtrr", "pge", "mca", "cmov",
+ "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
+ NULL, "ds" /* Intel dts */, "acpi", "mmx",
+ "fxsr", "sse", "sse2", "ss",
+ "ht" /* Intel htt */, "tm", "ia64", "pbe",
+ },
.cpuid_eax = 1, .cpuid_reg = R_EDX,
.tcg_features = TCG_FEATURES,
},
[FEAT_1_ECX] = {
- .feat_names = ext_feature_name,
+ .feat_names = {
+ "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
+ "ds_cpl", "vmx", "smx", "est",
+ "tm2", "ssse3", "cid", NULL,
+ "fma", "cx16", "xtpr", "pdcm",
+ NULL, "pcid", "dca", "sse4.1|sse4_1",
+ "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
+ "tsc-deadline", "aes", "xsave", "osxsave",
+ "avx", "f16c", "rdrand", "hypervisor",
+ },
.cpuid_eax = 1, .cpuid_reg = R_ECX,
.tcg_features = TCG_EXT_FEATURES,
},
+ /* Feature names that are already defined on feature_name[] but
+ * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
+ * names on feat_names below. They are copied automatically
+ * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
+ */
[FEAT_8000_0001_EDX] = {
- .feat_names = ext2_feature_name,
+ .feat_names = {
+ NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
+ NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
+ NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
+ NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
+ NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
+ "nx|xd", NULL, "mmxext", NULL /* mmx */,
+ NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
+ NULL, "lm|i64", "3dnowext", "3dnow",
+ },
.cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
.tcg_features = TCG_EXT2_FEATURES,
},
[FEAT_8000_0001_ECX] = {
- .feat_names = ext3_feature_name,
+ .feat_names = {
+ "lahf_lm", "cmp_legacy", "svm", "extapic",
+ "cr8legacy", "abm", "sse4a", "misalignsse",
+ "3dnowprefetch", "osvw", "ibs", "xop",
+ "skinit", "wdt", NULL, "lwp",
+ "fma4", "tce", NULL, "nodeid_msr",
+ NULL, "tbm", "topoext", "perfctr_core",
+ "perfctr_nb", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
.tcg_features = TCG_EXT3_FEATURES,
},
[FEAT_C000_0001_EDX] = {
- .feat_names = ext4_feature_name,
+ .feat_names = {
+ NULL, NULL, "xstore", "xstore-en",
+ NULL, NULL, "xcrypt", "xcrypt-en",
+ "ace2", "ace2-en", "phe", "phe-en",
+ "pmm", "pmm-en", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
.tcg_features = TCG_EXT4_FEATURES,
},
[FEAT_KVM] = {
- .feat_names = kvm_feature_name,
+ .feat_names = {
+ "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
+ "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "kvmclock-stable-bit", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
.tcg_features = TCG_KVM_FEATURES,
},
[FEAT_HYPERV_EAX] = {
- .feat_names = hyperv_priv_feature_name,
+ .feat_names = {
+ NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
+ NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
+ NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
+ NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
+ NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
+ NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
},
[FEAT_HYPERV_EBX] = {
- .feat_names = hyperv_ident_feature_name,
+ .feat_names = {
+ NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
+ NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
+ NULL /* hv_post_messages */, NULL /* hv_signal_events */,
+ NULL /* hv_create_port */, NULL /* hv_connect_port */,
+ NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
+ NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
+ NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
},
[FEAT_HYPERV_EDX] = {
- .feat_names = hyperv_misc_feature_name,
+ .feat_names = {
+ NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
+ NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
+ NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
+ NULL, NULL,
+ NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
},
[FEAT_SVM] = {
- .feat_names = svm_feature_name,
+ .feat_names = {
+ "npt", "lbrv", "svm_lock", "nrip_save",
+ "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
+ NULL, NULL, "pause_filter", NULL,
+ "pfthreshold", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
.tcg_features = TCG_SVM_FEATURES,
},
[FEAT_7_0_EBX] = {
- .feat_names = cpuid_7_0_ebx_feature_name,
+ .feat_names = {
+ "fsgsbase", "tsc_adjust", NULL, "bmi1",
+ "hle", "avx2", NULL, "smep",
+ "bmi2", "erms", "invpcid", "rtm",
+ NULL, NULL, "mpx", NULL,
+ "avx512f", "avx512dq", "rdseed", "adx",
+ "smap", "avx512ifma", "pcommit", "clflushopt",
+ "clwb", NULL, "avx512pf", "avx512er",
+ "avx512cd", NULL, "avx512bw", "avx512vl",
+ },
.cpuid_eax = 7,
.cpuid_needs_ecx = true, .cpuid_ecx = 0,
.cpuid_reg = R_EBX,
.tcg_features = TCG_7_0_EBX_FEATURES,
},
[FEAT_7_0_ECX] = {
- .feat_names = cpuid_7_0_ecx_feature_name,
+ .feat_names = {
+ NULL, "avx512vbmi", "umip", "pku",
+ "ospke", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, "rdpid", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 7,
.cpuid_needs_ecx = true, .cpuid_ecx = 0,
.cpuid_reg = R_ECX,
.tcg_features = TCG_7_0_ECX_FEATURES,
},
[FEAT_8000_0007_EDX] = {
- .feat_names = cpuid_apm_edx_feature_name,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "invtsc", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0x80000007,
.cpuid_reg = R_EDX,
.tcg_features = TCG_APM_FEATURES,
.unmigratable_flags = CPUID_APM_INVTSC,
},
[FEAT_XSAVE] = {
- .feat_names = cpuid_xsave_feature_name,
+ .feat_names = {
+ "xsaveopt", "xsavec", "xgetbv1", "xsaves",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 0xd,
.cpuid_needs_ecx = true, .cpuid_ecx = 1,
.cpuid_reg = R_EAX,
.tcg_features = TCG_XSAVE_FEATURES,
},
[FEAT_6_EAX] = {
- .feat_names = cpuid_6_feature_name,
+ .feat_names = {
+ NULL, NULL, "arat", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
.cpuid_eax = 6, .cpuid_reg = R_EAX,
.tcg_features = TCG_6_EAX_FEATURES,
},
+ [FEAT_XSAVE_COMP_LO] = {
+ .cpuid_eax = 0xD,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+ .cpuid_reg = R_EAX,
+ .tcg_features = ~0U,
+ .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
+ XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
+ XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
+ XSTATE_PKRU_MASK,
+ },
+ [FEAT_XSAVE_COMP_HI] = {
+ .cpuid_eax = 0xD,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+ .cpuid_reg = R_EDX,
+ .tcg_features = ~0U,
+ },
};
typedef struct X86RegisterInfo32 {
@@ -526,7 +529,12 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
};
#undef REGISTER
-const ExtSaveArea x86_ext_save_areas[] = {
+typedef struct ExtSaveArea {
+ uint32_t feature, bits;
+ uint32_t offset, size;
+} ExtSaveArea;
+
+static const ExtSaveArea x86_ext_save_areas[] = {
[XSTATE_YMM_BIT] =
{ .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
.offset = offsetof(X86XSaveArea, avx_state),
@@ -557,6 +565,26 @@ const ExtSaveArea x86_ext_save_areas[] = {
.size = sizeof(XSavePKRU) },
};
+static uint32_t xsave_area_size(uint64_t mask)
+{
+ int i;
+ uint64_t ret = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader);
+
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if ((mask >> i) & 1) {
+ ret = MAX(ret, esa->offset + esa->size);
+ }
+ }
+ return ret;
+}
+
+static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
+{
+ return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
+ cpu->env.features[FEAT_XSAVE_COMP_LO];
+}
+
const char *get_register_name_32(unsigned int reg)
{
if (reg >= CPU_NB_REGS32) {
@@ -577,15 +605,13 @@ static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
for (i = 0; i < 32; i++) {
uint32_t f = 1U << i;
- /* If the feature name is unknown, it is not supported by QEMU yet */
- if (!wi->feat_names[i]) {
- continue;
- }
- /* Skip features known to QEMU, but explicitly marked as unmigratable */
- if (wi->unmigratable_flags & f) {
- continue;
+
+ /* If the feature name is known, it is implicitly considered migratable,
+ * unless it is explicitly set in unmigratable_flags */
+ if ((wi->migratable_flags & f) ||
+ (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
+ r |= f;
}
- r |= f;
}
return r;
}
@@ -694,8 +720,7 @@ static void add_flagname_to_bitmaps(const char *flagname,
FeatureWord w;
for (w = 0; w < FEATURE_WORDS; w++) {
FeatureWordInfo *wi = &feature_word_info[w];
- if (wi->feat_names &&
- lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
+ if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
break;
}
}
@@ -744,7 +769,6 @@ struct X86CPUDefinition {
const char *name;
uint32_t level;
uint32_t xlevel;
- uint32_t xlevel2;
/* vendor is zero-terminated, 12 character ASCII string */
char vendor[CPUID_VENDOR_SZ + 1];
int family;
@@ -1404,9 +1428,9 @@ static X86CPUDefinition builtin_x86_defs[] = {
.name = "Opteron_G3",
.level = 5,
.vendor = CPUID_VENDOR_AMD,
- .family = 15,
- .model = 6,
- .stepping = 1,
+ .family = 16,
+ .model = 2,
+ .stepping = 3,
.features[FEAT_1_EDX] =
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
@@ -1627,9 +1651,12 @@ static void host_x86_cpu_initfn(Object *obj)
/* If KVM is disabled, x86_cpu_realizefn() will report an error later */
if (kvm_enabled()) {
- env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
- env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
- env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+ env->cpuid_min_level =
+ kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
+ env->cpuid_min_xlevel =
+ kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
+ env->cpuid_min_xlevel2 =
+ kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
if (lmce_supported()) {
object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
@@ -2192,12 +2219,13 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
char host_vendor[CPUID_VENDOR_SZ + 1];
FeatureWord w;
- object_property_set_int(OBJECT(cpu), def->level, "level", errp);
+ /* CPU models only set _minimum_ values for level/xlevel: */
+ object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
+ object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
+
object_property_set_int(OBJECT(cpu), def->family, "family", errp);
object_property_set_int(OBJECT(cpu), def->model, "model", errp);
object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
- object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
- object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
for (w = 0; w < FEATURE_WORDS; w++) {
env->features[w] = def->features[w];
@@ -2275,6 +2303,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
{
X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
+ uint32_t pkg_offset;
/* test if maximum index reached */
if (index & 0x80000000) {
@@ -2328,7 +2357,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
*eax = 1; /* Number of CPUID[EAX=2] calls required */
*ebx = 0;
- *ecx = 0;
+ if (!cpu->enable_l3_cache) {
+ *ecx = 0;
+ } else {
+ *ecx = L3_N_DESCRIPTOR;
+ }
*edx = (L1D_DESCRIPTOR << 16) | \
(L1I_DESCRIPTOR << 8) | \
(L2_DESCRIPTOR);
@@ -2374,6 +2407,25 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ecx = L2_SETS - 1;
*edx = CPUID_4_NO_INVD_SHARING;
break;
+ case 3: /* L3 cache info */
+ if (!cpu->enable_l3_cache) {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ }
+ *eax |= CPUID_4_TYPE_UNIFIED | \
+ CPUID_4_LEVEL(3) | \
+ CPUID_4_SELF_INIT_LEVEL;
+ pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
+ *eax |= ((1 << pkg_offset) - 1) << 14;
+ *ebx = (L3_N_LINE_SIZE - 1) | \
+ ((L3_N_PARTITIONS - 1) << 12) | \
+ ((L3_N_ASSOCIATIVITY - 1) << 22);
+ *ecx = L3_N_SETS - 1;
+ *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
+ break;
default: /* end of info */
*eax = 0;
*ebx = 0;
@@ -2454,13 +2506,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
switch (count) {
case 0:
- *eax = apicid_core_offset(smp_cores, smp_threads);
- *ebx = smp_threads;
+ *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
+ *ebx = cs->nr_threads;
*ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
break;
case 1:
- *eax = apicid_pkg_offset(smp_cores, smp_threads);
- *ebx = smp_cores * smp_threads;
+ *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
+ *ebx = cs->nr_cores * cs->nr_threads;
*ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
break;
default:
@@ -2473,10 +2525,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx &= 0xffff; /* The count doesn't need to be reliable. */
break;
case 0xD: {
- KVMState *s = cs->kvm_state;
- uint64_t ena_mask;
- int i;
-
/* Processor Extended State */
*eax = 0;
*ebx = 0;
@@ -2485,36 +2533,17 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
break;
}
- if (kvm_enabled()) {
- ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
- ena_mask <<= 32;
- ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
- } else {
- ena_mask = -1;
- }
if (count == 0) {
- *ecx = 0x240;
- for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
- const ExtSaveArea *esa = &x86_ext_save_areas[i];
- if ((env->features[esa->feature] & esa->bits) == esa->bits
- && ((ena_mask >> i) & 1) != 0) {
- if (i < 32) {
- *eax |= 1u << i;
- } else {
- *edx |= 1u << (i - 32);
- }
- *ecx = MAX(*ecx, esa->offset + esa->size);
- }
- }
- *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
+ *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
+ *eax = env->features[FEAT_XSAVE_COMP_LO];
+ *edx = env->features[FEAT_XSAVE_COMP_HI];
*ebx = *ecx;
} else if (count == 1) {
*eax = env->features[FEAT_XSAVE];
} else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
- const ExtSaveArea *esa = &x86_ext_save_areas[count];
- if ((env->features[esa->feature] & esa->bits) == esa->bits
- && ((ena_mask >> count) & 1) != 0) {
+ if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[count];
*eax = esa->size;
*ebx = esa->offset;
}
@@ -2585,9 +2614,15 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ecx = (L2_SIZE_KB_AMD << 16) | \
(AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
(L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
- *edx = ((L3_SIZE_KB/512) << 18) | \
- (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
- (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
+ if (!cpu->enable_l3_cache) {
+ *edx = ((L3_SIZE_KB / 512) << 18) | \
+ (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
+ (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
+ } else {
+ *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
+ (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
+ (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
+ }
break;
case 0x80000007:
*eax = 0;
@@ -2669,7 +2704,7 @@ static void x86_cpu_reset(CPUState *s)
xcc->parent_reset(s);
- memset(env, 0, offsetof(CPUX86State, cpuid_level));
+ memset(env, 0, offsetof(CPUX86State, end_reset_fields));
tlb_flush(s, 1);
@@ -2743,7 +2778,7 @@ static void x86_cpu_reset(CPUState *s)
}
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
- if ((env->features[esa->feature] & esa->bits) == esa->bits) {
+ if (env->features[esa->feature] & esa->bits) {
xcr0 |= 1ull << i;
}
}
@@ -2906,6 +2941,61 @@ static uint32_t x86_host_phys_bits(void)
return host_phys_bits;
}
+static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
+{
+ if (*min < value) {
+ *min = value;
+ }
+}
+
+/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
+static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *fi = &feature_word_info[w];
+ uint32_t eax = fi->cpuid_eax;
+ uint32_t region = eax & 0xF0000000;
+
+ if (!env->features[w]) {
+ return;
+ }
+
+ switch (region) {
+ case 0x00000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
+ break;
+ case 0x80000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
+ break;
+ case 0xC0000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
+ break;
+ }
+}
+
+/* Calculate XSAVE components based on the configured CPU feature flags */
+static void x86_cpu_enable_xsave_components(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int i;
+ uint64_t mask;
+
+ if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
+ return;
+ }
+
+ mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if (env->features[esa->feature] & esa->bits) {
+ mask |= (1ULL << i);
+ }
+ }
+
+ env->features[FEAT_XSAVE_COMP_LO] = mask;
+ env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
+}
+
#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
(env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
(env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
@@ -2951,8 +3041,40 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->env.features[w] &= ~minus_features[w];
}
- if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
- env->cpuid_level = 7;
+ if (!kvm_enabled() || !cpu->expose_kvm) {
+ env->features[FEAT_KVM] = 0;
+ }
+
+ x86_cpu_enable_xsave_components(cpu);
+
+ /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
+ if (cpu->full_cpuid_auto_level) {
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
+ x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
+ /* SVM requires CPUID[0x8000000A] */
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
+ }
+ }
+
+ /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
+ if (env->cpuid_level == UINT32_MAX) {
+ env->cpuid_level = env->cpuid_min_level;
+ }
+ if (env->cpuid_xlevel == UINT32_MAX) {
+ env->cpuid_xlevel = env->cpuid_min_xlevel;
+ }
+ if (env->cpuid_xlevel2 == UINT32_MAX) {
+ env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
}
if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
@@ -3215,9 +3337,6 @@ static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
char **names;
FeatureWordInfo *fi = &feature_word_info[w];
- if (!fi->feat_names) {
- return;
- }
if (!fi->feat_names[bitnr]) {
return;
}
@@ -3358,12 +3477,17 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
- DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
- DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
- DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
+ DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
+ DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
+ DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
+ DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
+ DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
+ DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
+ DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
+ DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
DEFINE_PROP_END_OF_LIST()
};
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 65615c0f3b..e64569854f 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -453,6 +453,8 @@ typedef enum FeatureWord {
FEAT_SVM, /* CPUID[8000_000A].EDX */
FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
FEAT_6_EAX, /* CPUID[6].EAX */
+ FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
+ FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
FEATURE_WORDS,
} FeatureWord;
@@ -606,16 +608,21 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EBX_RTM (1U << 11)
#define CPUID_7_0_EBX_MPX (1U << 14)
#define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */
+#define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */
#define CPUID_7_0_EBX_RDSEED (1U << 18)
#define CPUID_7_0_EBX_ADX (1U << 19)
#define CPUID_7_0_EBX_SMAP (1U << 20)
+#define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */
#define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */
#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */
#define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */
#define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
+#define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
+#define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
+#define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */
#define CPUID_7_0_ECX_UMIP (1U << 2)
#define CPUID_7_0_ECX_PKU (1U << 3)
#define CPUID_7_0_ECX_OSPKE (1U << 4)
@@ -691,6 +698,13 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
/* Use a clearer name for this. */
#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
+/* Instead of computing the condition codes after each x86 instruction,
+ * QEMU just stores one operand (called CC_SRC), the result
+ * (called CC_DST) and the type of operation (called CC_OP). When the
+ * condition codes are needed, the condition codes can be calculated
+ * using this information. Condition codes are not generated if they
+ * are only needed for conditional branches.
+ */
typedef enum {
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
@@ -872,7 +886,8 @@ typedef union X86LegacyXSaveArea {
typedef struct X86XSaveHeader {
uint64_t xstate_bv;
uint64_t xcomp_bv;
- uint8_t reserved[48];
+ uint64_t reserve0;
+ uint8_t reserved[40];
} X86XSaveHeader;
/* Ext. save area 2: AVX State */
@@ -1030,6 +1045,9 @@ typedef struct CPUX86State {
uint64_t tsc;
uint64_t tsc_adjust;
uint64_t tsc_deadline;
+ uint64_t tsc_aux;
+
+ uint64_t xcr0;
uint64_t mcg_status;
uint64_t msr_ia32_misc_enable;
@@ -1046,6 +1064,8 @@ typedef struct CPUX86State {
uint64_t pat;
uint32_t smbase;
+ uint32_t pkru;
+
/* End of state preserved by INIT (dummy marker). */
struct {} end_init_save;
@@ -1097,11 +1117,15 @@ typedef struct CPUX86State {
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
+ struct {} end_reset_fields;
/* processor features (e.g. for CPUID insn) */
- uint32_t cpuid_level;
- uint32_t cpuid_xlevel;
- uint32_t cpuid_xlevel2;
+ /* Minimum level/xlevel/xlevel2, based on CPU model + features */
+ uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
+ /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
+ uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
+ /* Actual level/xlevel/xlevel2 value: */
+ uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
uint32_t cpuid_vendor1;
uint32_t cpuid_vendor2;
uint32_t cpuid_vendor3;
@@ -1130,20 +1154,15 @@ typedef struct CPUX86State {
uint64_t mcg_ctl;
uint64_t mcg_ext_ctl;
uint64_t mce_banks[MCE_BANKS_DEF*4];
-
- uint64_t tsc_aux;
+ uint64_t xstate_bv;
/* vmstate */
uint16_t fpus_vmstate;
uint16_t fptag_vmstate;
uint16_t fpregs_format_vmstate;
- uint64_t xstate_bv;
- uint64_t xcr0;
uint64_t xss;
- uint32_t pkru;
-
TPRAccess tpr_access_type;
} CPUX86State;
@@ -1202,9 +1221,18 @@ struct X86CPU {
*/
bool enable_lmce;
+ /* Compatibility bits for old machine types.
+ * If true present virtual l3 cache for VM, the vcpus in the same virtual
+ * socket share an virtual l3 cache.
+ */
+ bool enable_l3_cache;
+
/* Compatibility bits for old machine types: */
bool enable_cpuid_0xb;
+ /* Enable auto level-increase for all CPUID leaves */
+ bool full_cpuid_auto_level;
+
/* if true fill the top bits of the MTRR_PHYSMASKn variable range */
bool fill_mtrr_mask;
@@ -1381,13 +1409,6 @@ int cpu_x86_signal_handler(int host_signum, void *pinfo,
void *puc);
/* cpu.c */
-typedef struct ExtSaveArea {
- uint32_t feature, bits;
- uint32_t offset, size;
-} ExtSaveArea;
-
-extern const ExtSaveArea x86_ext_save_areas[];
-
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
diff --git a/target-i386/fpu_helper.c b/target-i386/fpu_helper.c
index 929489bfc8..2049a8c01d 100644
--- a/target-i386/fpu_helper.c
+++ b/target-i386/fpu_helper.c
@@ -1110,6 +1110,8 @@ void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32)
}
#endif
+#define XO(X) offsetof(X86XSaveArea, X)
+
static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
int fpus, fptag, i;
@@ -1120,17 +1122,18 @@ static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
for (i = 0; i < 8; i++) {
fptag |= (env->fptags[i] << i);
}
- cpu_stw_data_ra(env, ptr, env->fpuc, ra);
- cpu_stw_data_ra(env, ptr + 2, fpus, ra);
- cpu_stw_data_ra(env, ptr + 4, fptag ^ 0xff, ra);
+
+ cpu_stw_data_ra(env, ptr + XO(legacy.fcw), env->fpuc, ra);
+ cpu_stw_data_ra(env, ptr + XO(legacy.fsw), fpus, ra);
+ cpu_stw_data_ra(env, ptr + XO(legacy.ftw), fptag ^ 0xff, ra);
/* In 32-bit mode this is eip, sel, dp, sel.
In 64-bit mode this is rip, rdp.
But in either case we don't write actual data, just zeros. */
- cpu_stq_data_ra(env, ptr + 0x08, 0, ra); /* eip+sel; rip */
- cpu_stq_data_ra(env, ptr + 0x10, 0, ra); /* edp+sel; rdp */
+ cpu_stq_data_ra(env, ptr + XO(legacy.fpip), 0, ra); /* eip+sel; rip */
+ cpu_stq_data_ra(env, ptr + XO(legacy.fpdp), 0, ra); /* edp+sel; rdp */
- addr = ptr + 0x20;
+ addr = ptr + XO(legacy.fpregs);
for (i = 0; i < 8; i++) {
floatx80 tmp = ST(i);
helper_fstt(env, tmp, addr, ra);
@@ -1140,8 +1143,8 @@ static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
- cpu_stl_data_ra(env, ptr + 0x18, env->mxcsr, ra); /* mxcsr */
- cpu_stl_data_ra(env, ptr + 0x1c, 0x0000ffff, ra); /* mxcsr_mask */
+ cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr), env->mxcsr, ra);
+ cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr_mask), 0x0000ffff, ra);
}
static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
@@ -1155,7 +1158,7 @@ static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
nb_xmm_regs = 8;
}
- addr = ptr + 0xa0;
+ addr = ptr + XO(legacy.xmm_regs);
for (i = 0; i < nb_xmm_regs; i++) {
cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), ra);
cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), ra);
@@ -1163,8 +1166,9 @@ static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
}
}
-static void do_xsave_bndregs(CPUX86State *env, target_ulong addr, uintptr_t ra)
+static void do_xsave_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
+ target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs);
int i;
for (i = 0; i < 4; i++, addr += 16) {
@@ -1173,15 +1177,17 @@ static void do_xsave_bndregs(CPUX86State *env, target_ulong addr, uintptr_t ra)
}
}
-static void do_xsave_bndcsr(CPUX86State *env, target_ulong addr, uintptr_t ra)
+static void do_xsave_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
- cpu_stq_data_ra(env, addr, env->bndcs_regs.cfgu, ra);
- cpu_stq_data_ra(env, addr + 8, env->bndcs_regs.sts, ra);
+ cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu),
+ env->bndcs_regs.cfgu, ra);
+ cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts),
+ env->bndcs_regs.sts, ra);
}
-static void do_xsave_pkru(CPUX86State *env, target_ulong addr, uintptr_t ra)
+static void do_xsave_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
- cpu_stq_data_ra(env, addr, env->pkru, ra);
+ cpu_stq_data_ra(env, ptr, env->pkru, ra);
}
void helper_fxsave(CPUX86State *env, target_ulong ptr)
@@ -1250,22 +1256,19 @@ static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm,
do_xsave_sse(env, ptr, ra);
}
if (opt & XSTATE_BNDREGS_MASK) {
- target_ulong off = x86_ext_save_areas[XSTATE_BNDREGS_BIT].offset;
- do_xsave_bndregs(env, ptr + off, ra);
+ do_xsave_bndregs(env, ptr + XO(bndreg_state), ra);
}
if (opt & XSTATE_BNDCSR_MASK) {
- target_ulong off = x86_ext_save_areas[XSTATE_BNDCSR_BIT].offset;
- do_xsave_bndcsr(env, ptr + off, ra);
+ do_xsave_bndcsr(env, ptr + XO(bndcsr_state), ra);
}
if (opt & XSTATE_PKRU_MASK) {
- target_ulong off = x86_ext_save_areas[XSTATE_PKRU_BIT].offset;
- do_xsave_pkru(env, ptr + off, ra);
+ do_xsave_pkru(env, ptr + XO(pkru_state), ra);
}
/* Update the XSTATE_BV field. */
- old_bv = cpu_ldq_data_ra(env, ptr + 512, ra);
+ old_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra);
new_bv = (old_bv & ~rfbm) | (inuse & rfbm);
- cpu_stq_data_ra(env, ptr + 512, new_bv, ra);
+ cpu_stq_data_ra(env, ptr + XO(header.xstate_bv), new_bv, ra);
}
void helper_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
@@ -1281,12 +1284,13 @@ void helper_xsaveopt(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
- int i, fpus, fptag;
+ int i, fpuc, fpus, fptag;
target_ulong addr;
- cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, ra));
- fpus = cpu_lduw_data_ra(env, ptr + 2, ra);
- fptag = cpu_lduw_data_ra(env, ptr + 4, ra);
+ fpuc = cpu_lduw_data_ra(env, ptr + XO(legacy.fcw), ra);
+ fpus = cpu_lduw_data_ra(env, ptr + XO(legacy.fsw), ra);
+ fptag = cpu_lduw_data_ra(env, ptr + XO(legacy.ftw), ra);
+ cpu_set_fpuc(env, fpuc);
env->fpstt = (fpus >> 11) & 7;
env->fpus = fpus & ~0x3800;
fptag ^= 0xff;
@@ -1294,7 +1298,7 @@ static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
env->fptags[i] = ((fptag >> i) & 1);
}
- addr = ptr + 0x20;
+ addr = ptr + XO(legacy.fpregs);
for (i = 0; i < 8; i++) {
floatx80 tmp = helper_fldt(env, addr, ra);
ST(i) = tmp;
@@ -1304,7 +1308,7 @@ static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
static void do_xrstor_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
- cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + 0x18, ra));
+ cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + XO(legacy.mxcsr), ra));
}
static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
@@ -1318,7 +1322,7 @@ static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
nb_xmm_regs = 8;
}
- addr = ptr + 0xa0;
+ addr = ptr + XO(legacy.xmm_regs);
for (i = 0; i < nb_xmm_regs; i++) {
env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, ra);
env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, ra);
@@ -1326,8 +1330,9 @@ static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
}
}
-static void do_xrstor_bndregs(CPUX86State *env, target_ulong addr, uintptr_t ra)
+static void do_xrstor_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
+ target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs);
int i;
for (i = 0; i < 4; i++, addr += 16) {
@@ -1336,16 +1341,18 @@ static void do_xrstor_bndregs(CPUX86State *env, target_ulong addr, uintptr_t ra)
}
}
-static void do_xrstor_bndcsr(CPUX86State *env, target_ulong addr, uintptr_t ra)
+static void do_xrstor_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
/* FIXME: Extend highest implemented bit of linear address. */
- env->bndcs_regs.cfgu = cpu_ldq_data_ra(env, addr, ra);
- env->bndcs_regs.sts = cpu_ldq_data_ra(env, addr + 8, ra);
+ env->bndcs_regs.cfgu
+ = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu), ra);
+ env->bndcs_regs.sts
+ = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts), ra);
}
-static void do_xrstor_pkru(CPUX86State *env, target_ulong addr, uintptr_t ra)
+static void do_xrstor_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
- env->pkru = cpu_ldq_data_ra(env, addr, ra);
+ env->pkru = cpu_ldq_data_ra(env, ptr, ra);
}
void helper_fxrstor(CPUX86State *env, target_ulong ptr)
@@ -1373,7 +1380,7 @@ void helper_fxrstor(CPUX86State *env, target_ulong ptr)
void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
{
uintptr_t ra = GETPC();
- uint64_t xstate_bv, xcomp_bv0, xcomp_bv1;
+ uint64_t xstate_bv, xcomp_bv, reserve0;
rfbm &= env->xcr0;
@@ -1387,7 +1394,7 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
raise_exception_ra(env, EXCP0D_GPF, ra);
}
- xstate_bv = cpu_ldq_data_ra(env, ptr + 512, ra);
+ xstate_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra);
if ((int64_t)xstate_bv < 0) {
/* FIXME: Compact form. */
@@ -1396,15 +1403,19 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
/* Standard form. */
- /* The XSTATE field must not set bits not present in XCR0. */
+ /* The XSTATE_BV field must not set bits not present in XCR0. */
if (xstate_bv & ~env->xcr0) {
raise_exception_ra(env, EXCP0D_GPF, ra);
}
- /* The XCOMP field must be zero. */
- xcomp_bv0 = cpu_ldq_data_ra(env, ptr + 520, ra);
- xcomp_bv1 = cpu_ldq_data_ra(env, ptr + 528, ra);
- if (xcomp_bv0 || xcomp_bv1) {
+ /* The XCOMP_BV field must be zero. Note that, as of the April 2016
+ revision, the description of the XSAVE Header (Vol 1, Sec 13.4.2)
+ describes only XCOMP_BV, but the description of the standard form
+ of XRSTOR (Vol 1, Sec 13.8.1) checks bytes 23:8 for zero, which
+ includes the next 64-bit field. */
+ xcomp_bv = cpu_ldq_data_ra(env, ptr + XO(header.xcomp_bv), ra);
+ reserve0 = cpu_ldq_data_ra(env, ptr + XO(header.reserve0), ra);
+ if (xcomp_bv || reserve0) {
raise_exception_ra(env, EXCP0D_GPF, ra);
}
@@ -1430,8 +1441,7 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
}
if (rfbm & XSTATE_BNDREGS_MASK) {
if (xstate_bv & XSTATE_BNDREGS_MASK) {
- target_ulong off = x86_ext_save_areas[XSTATE_BNDREGS_BIT].offset;
- do_xrstor_bndregs(env, ptr + off, ra);
+ do_xrstor_bndregs(env, ptr + XO(bndreg_state), ra);
env->hflags |= HF_MPX_IU_MASK;
} else {
memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
@@ -1440,8 +1450,7 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
}
if (rfbm & XSTATE_BNDCSR_MASK) {
if (xstate_bv & XSTATE_BNDCSR_MASK) {
- target_ulong off = x86_ext_save_areas[XSTATE_BNDCSR_BIT].offset;
- do_xrstor_bndcsr(env, ptr + off, ra);
+ do_xrstor_bndcsr(env, ptr + XO(bndcsr_state), ra);
} else {
memset(&env->bndcs_regs, 0, sizeof(env->bndcs_regs));
}
@@ -1450,8 +1459,7 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
if (rfbm & XSTATE_PKRU_MASK) {
uint64_t old_pkru = env->pkru;
if (xstate_bv & XSTATE_PKRU_MASK) {
- target_ulong off = x86_ext_save_areas[XSTATE_PKRU_BIT].offset;
- do_xrstor_pkru(env, ptr + off, ra);
+ do_xrstor_pkru(env, ptr + XO(pkru_state), ra);
} else {
env->pkru = 0;
}
@@ -1462,6 +1470,8 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
}
}
+#undef XO
+
uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx)
{
/* The OS must have enabled XSAVE. */
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 1c250b8245..9bc961bff3 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -1113,7 +1113,6 @@ out:
typedef struct MCEInjectionParams {
Monitor *mon;
- X86CPU *cpu;
int bank;
uint64_t status;
uint64_t mcg_status;
@@ -1122,14 +1121,14 @@ typedef struct MCEInjectionParams {
int flags;
} MCEInjectionParams;
-static void do_inject_x86_mce(void *data)
+static void do_inject_x86_mce(CPUState *cs, void *data)
{
MCEInjectionParams *params = data;
- CPUX86State *cenv = &params->cpu->env;
- CPUState *cpu = CPU(params->cpu);
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *cenv = &cpu->env;
uint64_t *banks = cenv->mce_banks + 4 * params->bank;
- cpu_synchronize_state(cpu);
+ cpu_synchronize_state(cs);
/*
* If there is an MCE exception being processed, ignore this SRAO MCE
@@ -1149,7 +1148,7 @@ static void do_inject_x86_mce(void *data)
if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
monitor_printf(params->mon,
"CPU %d: Uncorrected error reporting disabled\n",
- cpu->cpu_index);
+ cs->cpu_index);
return;
}
@@ -1161,7 +1160,7 @@ static void do_inject_x86_mce(void *data)
monitor_printf(params->mon,
"CPU %d: Uncorrected error reporting disabled for"
" bank %d\n",
- cpu->cpu_index, params->bank);
+ cs->cpu_index, params->bank);
return;
}
@@ -1170,7 +1169,7 @@ static void do_inject_x86_mce(void *data)
monitor_printf(params->mon,
"CPU %d: Previous MCE still in progress, raising"
" triple fault\n",
- cpu->cpu_index);
+ cs->cpu_index);
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
qemu_system_reset_request();
return;
@@ -1182,7 +1181,7 @@ static void do_inject_x86_mce(void *data)
banks[3] = params->misc;
cenv->mcg_status = params->mcg_status;
banks[1] = params->status;
- cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
+ cpu_interrupt(cs, CPU_INTERRUPT_MCE);
} else if (!(banks[1] & MCI_STATUS_VAL)
|| !(banks[1] & MCI_STATUS_UC)) {
if (banks[1] & MCI_STATUS_VAL) {
@@ -1204,7 +1203,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
CPUX86State *cenv = &cpu->env;
MCEInjectionParams params = {
.mon = mon,
- .cpu = cpu,
.bank = bank,
.status = status,
.mcg_status = mcg_status,
@@ -1245,7 +1243,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
if (other_cs == cs) {
continue;
}
- params.cpu = X86_CPU(other_cs);
run_on_cpu(other_cs, do_inject_x86_mce, &params);
}
}
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index d1a25c5465..ee1f53e569 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -83,23 +83,17 @@ static bool has_msr_tsc_aux;
static bool has_msr_tsc_adjust;
static bool has_msr_tsc_deadline;
static bool has_msr_feature_control;
-static bool has_msr_async_pf_en;
-static bool has_msr_pv_eoi_en;
static bool has_msr_misc_enable;
static bool has_msr_smbase;
static bool has_msr_bndcfgs;
-static bool has_msr_kvm_steal_time;
static int lm_capable_kernel;
static bool has_msr_hv_hypercall;
-static bool has_msr_hv_vapic;
-static bool has_msr_hv_tsc;
static bool has_msr_hv_crash;
static bool has_msr_hv_reset;
static bool has_msr_hv_vpindex;
static bool has_msr_hv_runtime;
static bool has_msr_hv_synic;
static bool has_msr_hv_stimer;
-static bool has_msr_mtrr;
static bool has_msr_xss;
static bool has_msr_architectural_pmu;
@@ -156,10 +150,8 @@ static int kvm_get_tsc(CPUState *cs)
return 0;
}
-static inline void do_kvm_synchronize_tsc(void *arg)
+static inline void do_kvm_synchronize_tsc(CPUState *cpu, void *arg)
{
- CPUState *cpu = arg;
-
kvm_get_tsc(cpu);
}
@@ -169,7 +161,7 @@ void kvm_synchronize_all_tsc(void)
if (kvm_enabled()) {
CPU_FOREACH(cpu) {
- run_on_cpu(cpu, do_kvm_synchronize_tsc, cpu);
+ run_on_cpu(cpu, do_kvm_synchronize_tsc, NULL);
}
}
}
@@ -604,20 +596,22 @@ static int hyperv_handle_properties(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ if (cpu->hyperv_time &&
+ kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
+ cpu->hyperv_time = false;
+ }
+
if (cpu->hyperv_relaxed_timing) {
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
}
if (cpu->hyperv_vapic) {
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
- has_msr_hv_vapic = true;
}
- if (cpu->hyperv_time &&
- kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
+ if (cpu->hyperv_time) {
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
env->features[FEAT_HYPERV_EAX] |= 0x200;
- has_msr_hv_tsc = true;
}
if (cpu->hyperv_crash && has_msr_hv_crash) {
env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
@@ -729,7 +723,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (cpu->hyperv_relaxed_timing) {
c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
}
- if (has_msr_hv_vapic) {
+ if (cpu->hyperv_vapic) {
c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
}
c->ebx = cpu->hyperv_spinlock_attempts;
@@ -755,12 +749,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
c = &cpuid_data.entries[cpuid_i++];
c->function = KVM_CPUID_FEATURES | kvm_base;
c->eax = env->features[FEAT_KVM];
-
- has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
-
- has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
-
- has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
}
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
@@ -975,9 +963,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
- if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
- has_msr_mtrr = true;
- }
if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
has_msr_tsc_aux = false;
}
@@ -1532,6 +1517,22 @@ static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
msrs->nmsrs++;
}
+static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
+{
+ kvm_msr_buf_reset(cpu);
+ kvm_msr_entry_add(cpu, index, value);
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+}
+
+void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
+{
+ int ret;
+
+ ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
+ assert(ret == 1);
+}
+
static int kvm_put_tscdeadline_msr(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
@@ -1541,10 +1542,7 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
return 0;
}
- kvm_msr_buf_reset(cpu);
- kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
-
- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+ ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
if (ret < 0) {
return ret;
}
@@ -1567,11 +1565,8 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
return 0;
}
- kvm_msr_buf_reset(cpu);
- kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL,
- cpu->env.msr_ia32_feature_control);
-
- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+ ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
+ cpu->env.msr_ia32_feature_control);
if (ret < 0) {
return ret;
}
@@ -1633,13 +1628,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
- if (has_msr_async_pf_en) {
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
}
- if (has_msr_pv_eoi_en) {
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
}
- if (has_msr_kvm_steal_time) {
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
}
if (has_msr_architectural_pmu) {
@@ -1675,11 +1670,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
env->msr_hv_hypercall);
}
- if (has_msr_hv_vapic) {
+ if (cpu->hyperv_vapic) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
- if (has_msr_hv_tsc) {
+ if (cpu->hyperv_time) {
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
}
if (has_msr_hv_crash) {
@@ -1725,7 +1720,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
env->msr_hv_stimer_count[j]);
}
}
- if (has_msr_mtrr) {
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
@@ -2042,13 +2037,13 @@ static int kvm_get_msrs(X86CPU *cpu)
#endif
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
- if (has_msr_async_pf_en) {
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
}
- if (has_msr_pv_eoi_en) {
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
}
- if (has_msr_kvm_steal_time) {
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
}
if (has_msr_architectural_pmu) {
@@ -2080,10 +2075,10 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
}
- if (has_msr_hv_vapic) {
+ if (cpu->hyperv_vapic) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
}
- if (has_msr_hv_tsc) {
+ if (cpu->hyperv_time) {
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
}
if (has_msr_hv_crash) {
@@ -2115,7 +2110,7 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, msr, 0);
}
}
- if (has_msr_mtrr) {
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
@@ -2416,19 +2411,6 @@ static int kvm_get_apic(X86CPU *cpu)
return 0;
}
-static int kvm_put_apic(X86CPU *cpu)
-{
- DeviceState *apic = cpu->apic_state;
- struct kvm_lapic_state kapic;
-
- if (apic && kvm_irqchip_in_kernel()) {
- kvm_put_apic_state(apic, &kapic);
-
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_LAPIC, &kapic);
- }
- return 0;
-}
-
static int kvm_put_vcpu_events(X86CPU *cpu, int level)
{
CPUState *cs = CPU(cpu);
@@ -2455,6 +2437,7 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
events.nmi.pad = 0;
events.sipi_vector = env->sipi_vector;
+ events.flags = 0;
if (has_msr_smbase) {
events.smi.smm = !!(env->hflags & HF_SMM_MASK);
@@ -2474,7 +2457,6 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
events.flags |= KVM_VCPUEVENT_VALID_SMM;
}
- events.flags = 0;
if (level >= KVM_PUT_RESET_STATE) {
events.flags |=
KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
@@ -2670,10 +2652,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
if (ret < 0) {
return ret;
}
- ret = kvm_put_apic(x86_cpu);
- if (ret < 0) {
- return ret;
- }
}
ret = kvm_put_tscdeadline_msr(x86_cpu);
diff --git a/target-i386/kvm_i386.h b/target-i386/kvm_i386.h
index 42b00af1b1..36407e0a5d 100644
--- a/target-i386/kvm_i386.h
+++ b/target-i386/kvm_i386.h
@@ -41,4 +41,6 @@ int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
int kvm_device_msix_assign(KVMState *s, uint32_t dev_id);
int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id);
+void kvm_put_apicbase(X86CPU *cpu, uint64_t value);
+
#endif
diff --git a/target-i386/monitor.c b/target-i386/monitor.c
index fccfe40ab7..9a3b4d746e 100644
--- a/target-i386/monitor.c
+++ b/target-i386/monitor.c
@@ -504,7 +504,8 @@ void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
{
- if (kvm_irqchip_in_kernel()) {
+ if (kvm_irqchip_in_kernel() &&
+ !kvm_irqchip_is_split()) {
kvm_ioapic_dump_state(mon, qdict);
} else {
ioapic_dump_state(mon, qdict);
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 6cbdf17426..fb79f3180d 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -1137,25 +1137,27 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
int error_code, target_ulong next_eip)
{
- SegmentCache *dt;
- target_ulong ptr;
- int dpl, cpl, shift;
- uint32_t e2;
+ if (is_int) {
+ SegmentCache *dt;
+ target_ulong ptr;
+ int dpl, cpl, shift;
+ uint32_t e2;
- dt = &env->idt;
- if (env->hflags & HF_LMA_MASK) {
- shift = 4;
- } else {
- shift = 3;
- }
- ptr = dt->base + (intno << shift);
- e2 = cpu_ldl_kernel(env, ptr + 4);
+ dt = &env->idt;
+ if (env->hflags & HF_LMA_MASK) {
+ shift = 4;
+ } else {
+ shift = 3;
+ }
+ ptr = dt->base + (intno << shift);
+ e2 = cpu_ldl_kernel(env, ptr + 4);
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
- /* check privilege if software int */
- if (is_int && dpl < cpl) {
- raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
+ }
}
/* Since we emulate only user space, we cannot do more than
diff --git a/target-i386/translate.c b/target-i386/translate.c
index fa2ac48173..9447557911 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -8012,13 +8012,21 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
+ tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
break;
case 0xe8 ... 0xef: /* lfence */
+ if (!(s->cpuid_features & CPUID_SSE)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
+ break;
case 0xf0 ... 0xf7: /* mfence */
if (!(s->cpuid_features & CPUID_SSE2)
|| (prefixes & PREFIX_LOCK)) {
goto illegal_op;
}
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
break;
default: