diff options
author | Peter Maydell | 2019-08-21 10:00:49 +0200 |
---|---|---|
committer | Peter Maydell | 2019-08-21 10:00:49 +0200 |
commit | f2cfa1229e539ee1bb1822912075cf25538ad6b9 (patch) | |
tree | b0d325ec6f68410fd19615af2c47becdb365af07 /target/i386/kvm.c | |
parent | Merge remote-tracking branch 'remotes/huth-gitlab/tags/pull-request-2019-08-2... (diff) | |
parent | x86: Intel AVX512_BF16 feature enabling (diff) | |
download | qemu-f2cfa1229e539ee1bb1822912075cf25538ad6b9.tar.gz qemu-f2cfa1229e539ee1bb1822912075cf25538ad6b9.tar.xz qemu-f2cfa1229e539ee1bb1822912075cf25538ad6b9.zip |
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* New KVM PV features (Marcelo, Wanpeng)
* valgrind fixes (Andrey)
* Remove clock reset notifiers (David)
* KConfig and Makefile cleanups (Paolo)
* Replay and icount improvements (Pavel)
* x86 FP fixes (Peter M.)
* TCG locking assertions (Roman)
* x86 support for mmap-ed -kernel/-initrd (Stefano)
* Other cleanups (Wei Yang, Yan Zhao, Tony)
* LSI fix for infinite loop (Prasad)
* ARM migration fix (Catherine)
* AVX512_BF16 feature (Jing)
# gpg: Signature made Tue 20 Aug 2019 19:00:54 BST
# gpg: using RSA key BFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1
# Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83
* remotes/bonzini/tags/for-upstream: (33 commits)
x86: Intel AVX512_BF16 feature enabling
scsi: lsi: exit infinite loop while executing script (CVE-2019-12068)
test-bitmap: test set 1 bit case for bitmap_set
migration: do not rom_reset() during incoming migration
HACKING: Document 'struct' keyword usage
kvm: vmxcap: Enhance with latest features
cpus-common: nuke finish_safe_work
icount: remove unnecessary gen_io_end calls
icount: clean up cpu_can_io at the entry to the block
replay: rename step-related variables and functions
replay: refine replay-time module
replay: fix replay shutdown
util/qemu-timer: refactor deadline calculation for external timers
replay: document development rules
replay: add missing fix for internal function
timer: last, remove last bits of last
replay: Remove host_clock_last
timer: Remove reset notifiers
mc146818rtc: Remove reset notifiers
memory: fix race between TCG and accesses to dirty bitmap
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/i386/kvm.c')
-rw-r--r-- | target/i386/kvm.c | 205 |
1 files changed, 110 insertions, 95 deletions
diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 2abc881324..8023c679ea 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -193,6 +193,7 @@ static int kvm_get_tsc(CPUState *cs) return 0; } + memset(&msr_data, 0, sizeof(msr_data)); msr_data.info.nmsrs = 1; msr_data.entries[0].index = MSR_IA32_TSC; env->tsc_valid = !runstate_is_running(); @@ -1500,6 +1501,7 @@ int kvm_arch_init_vcpu(CPUState *cs) c = &cpuid_data.entries[cpuid_i++]; } break; + case 0x7: case 0x14: { uint32_t times; @@ -1512,7 +1514,7 @@ int kvm_arch_init_vcpu(CPUState *cs) for (j = 1; j <= times; ++j) { if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { fprintf(stderr, "cpuid_data is full, no space for " - "cpuid(eax:0x14,ecx:0x%x)\n", j); + "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); abort(); } c = &cpuid_data.entries[cpuid_i++]; @@ -1709,6 +1711,7 @@ int kvm_arch_init_vcpu(CPUState *cs) if (has_xsave) { env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave)); + memset(env->xsave_buf, 0, sizeof(struct kvm_xsave)); } max_nested_state_len = kvm_max_nested_state_length(); @@ -1785,6 +1788,8 @@ void kvm_arch_reset_vcpu(X86CPU *cpu) hyperv_x86_synic_reset(cpu); } + /* enabled by default */ + env->poll_control_msr = 1; } void kvm_arch_do_init_vcpu(X86CPU *cpu) @@ -1840,108 +1845,105 @@ static int kvm_get_supported_feature_msrs(KVMState *s) static int kvm_get_supported_msrs(KVMState *s) { - static int kvm_supported_msrs; int ret = 0; + struct kvm_msr_list msr_list, *kvm_msr_list; - /* first time */ - if (kvm_supported_msrs == 0) { - struct kvm_msr_list msr_list, *kvm_msr_list; + /* + * Obtain MSR list from KVM. These are the MSRs that we must + * save/restore. + */ + msr_list.nmsrs = 0; + ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); + if (ret < 0 && ret != -E2BIG) { + return ret; + } + /* + * Old kernel modules had a bug and could write beyond the provided + * memory. Allocate at least a safe amount of 1K. + */ + kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + + msr_list.nmsrs * + sizeof(msr_list.indices[0]))); - kvm_supported_msrs = -1; + kvm_msr_list->nmsrs = msr_list.nmsrs; + ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); + if (ret >= 0) { + int i; - /* Obtain MSR list from KVM. These are the MSRs that we must - * save/restore */ - msr_list.nmsrs = 0; - ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); - if (ret < 0 && ret != -E2BIG) { - return ret; - } - /* Old kernel modules had a bug and could write beyond the provided - memory. Allocate at least a safe amount of 1K. */ - kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + - msr_list.nmsrs * - sizeof(msr_list.indices[0]))); - - kvm_msr_list->nmsrs = msr_list.nmsrs; - ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); - if (ret >= 0) { - int i; - - for (i = 0; i < kvm_msr_list->nmsrs; i++) { - switch (kvm_msr_list->indices[i]) { - case MSR_STAR: - has_msr_star = true; - break; - case MSR_VM_HSAVE_PA: - has_msr_hsave_pa = true; - break; - case MSR_TSC_AUX: - has_msr_tsc_aux = true; - break; - case MSR_TSC_ADJUST: - has_msr_tsc_adjust = true; - break; - case MSR_IA32_TSCDEADLINE: - has_msr_tsc_deadline = true; - break; - case MSR_IA32_SMBASE: - has_msr_smbase = true; - break; - case MSR_SMI_COUNT: - has_msr_smi_count = true; - break; - case MSR_IA32_MISC_ENABLE: - has_msr_misc_enable = true; - break; - case MSR_IA32_BNDCFGS: - has_msr_bndcfgs = true; - break; - case MSR_IA32_XSS: - has_msr_xss = true; - break; - case HV_X64_MSR_CRASH_CTL: - has_msr_hv_crash = true; - break; - case HV_X64_MSR_RESET: - has_msr_hv_reset = true; - break; - case HV_X64_MSR_VP_INDEX: - has_msr_hv_vpindex = true; - break; - case HV_X64_MSR_VP_RUNTIME: - has_msr_hv_runtime = true; - break; - case HV_X64_MSR_SCONTROL: - has_msr_hv_synic = true; - break; - case HV_X64_MSR_STIMER0_CONFIG: - has_msr_hv_stimer = true; - break; - case HV_X64_MSR_TSC_FREQUENCY: - has_msr_hv_frequencies = true; - break; - case HV_X64_MSR_REENLIGHTENMENT_CONTROL: - has_msr_hv_reenlightenment = true; - break; - case MSR_IA32_SPEC_CTRL: - has_msr_spec_ctrl = true; - break; - case MSR_VIRT_SSBD: - has_msr_virt_ssbd = true; - break; - case MSR_IA32_ARCH_CAPABILITIES: - has_msr_arch_capabs = true; - break; - case MSR_IA32_CORE_CAPABILITY: - has_msr_core_capabs = true; - break; - } + for (i = 0; i < kvm_msr_list->nmsrs; i++) { + switch (kvm_msr_list->indices[i]) { + case MSR_STAR: + has_msr_star = true; + break; + case MSR_VM_HSAVE_PA: + has_msr_hsave_pa = true; + break; + case MSR_TSC_AUX: + has_msr_tsc_aux = true; + break; + case MSR_TSC_ADJUST: + has_msr_tsc_adjust = true; + break; + case MSR_IA32_TSCDEADLINE: + has_msr_tsc_deadline = true; + break; + case MSR_IA32_SMBASE: + has_msr_smbase = true; + break; + case MSR_SMI_COUNT: + has_msr_smi_count = true; + break; + case MSR_IA32_MISC_ENABLE: + has_msr_misc_enable = true; + break; + case MSR_IA32_BNDCFGS: + has_msr_bndcfgs = true; + break; + case MSR_IA32_XSS: + has_msr_xss = true; + break; + case HV_X64_MSR_CRASH_CTL: + has_msr_hv_crash = true; + break; + case HV_X64_MSR_RESET: + has_msr_hv_reset = true; + break; + case HV_X64_MSR_VP_INDEX: + has_msr_hv_vpindex = true; + break; + case HV_X64_MSR_VP_RUNTIME: + has_msr_hv_runtime = true; + break; + case HV_X64_MSR_SCONTROL: + has_msr_hv_synic = true; + break; + case HV_X64_MSR_STIMER0_CONFIG: + has_msr_hv_stimer = true; + break; + case HV_X64_MSR_TSC_FREQUENCY: + has_msr_hv_frequencies = true; + break; + case HV_X64_MSR_REENLIGHTENMENT_CONTROL: + has_msr_hv_reenlightenment = true; + break; + case MSR_IA32_SPEC_CTRL: + has_msr_spec_ctrl = true; + break; + case MSR_VIRT_SSBD: + has_msr_virt_ssbd = true; + break; + case MSR_IA32_ARCH_CAPABILITIES: + has_msr_arch_capabs = true; + break; + case MSR_IA32_CORE_CAPABILITY: + has_msr_core_capabs = true; + break; } } - - g_free(kvm_msr_list); } + g_free(kvm_msr_list); + return ret; } @@ -2493,6 +2495,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); } + + if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { + kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr); + } + if (has_architectural_pmu_version > 0) { if (has_architectural_pmu_version > 1) { /* Stop the counter. */ @@ -2878,6 +2885,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); } + if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { + kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); + } if (has_architectural_pmu_version > 0) { if (has_architectural_pmu_version > 1) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); @@ -3112,6 +3122,10 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_KVM_STEAL_TIME: env->steal_time_msr = msrs[i].data; break; + case MSR_KVM_POLL_CONTROL: { + env->poll_control_msr = msrs[i].data; + break; + } case MSR_CORE_PERF_FIXED_CTR_CTRL: env->msr_fixed_ctr_ctrl = msrs[i].data; break; @@ -3480,6 +3494,7 @@ static int kvm_put_debugregs(X86CPU *cpu) return 0; } + memset(&dbgregs, 0, sizeof(dbgregs)); for (i = 0; i < 4; i++) { dbgregs.db[i] = env->dr[i]; } |