summaryrefslogtreecommitdiffstats
path: root/target/i386
diff options
context:
space:
mode:
authorZeng Guang2022-02-17 07:04:33 +0100
committerPaolo Bonzini2022-03-15 11:50:50 +0100
commitcdec2b753b487d9e8aab028231c35d87789ea083 (patch)
treedfa03fe16915fbf6d80ea43012a502e9d1de17d7 /target/i386
parentx86: add support for KVM_CAP_XSAVE2 and AMX state migration (diff)
downloadqemu-cdec2b753b487d9e8aab028231c35d87789ea083.tar.gz
qemu-cdec2b753b487d9e8aab028231c35d87789ea083.tar.xz
qemu-cdec2b753b487d9e8aab028231c35d87789ea083.zip
x86: Support XFD and AMX xsave data migration
XFD(eXtended Feature Disable) allows to enable a feature on xsave state while preventing specific user threads from using the feature. Support save and restore XFD MSRs if CPUID.D.1.EAX[4] enumerate to be valid. Likewise migrate the MSRs and related xsave state necessarily. Signed-off-by: Zeng Guang <guang.zeng@intel.com> Signed-off-by: Wei Wang <wei.w.wang@intel.com> Signed-off-by: Yang Zhong <yang.zhong@intel.com> Message-Id: <20220217060434.52460-8-yang.zhong@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target/i386')
-rw-r--r--target/i386/cpu.h9
-rw-r--r--target/i386/kvm/kvm.c18
-rw-r--r--target/i386/machine.c46
3 files changed, 73 insertions, 0 deletions
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index d803cd74b1..5e406088a9 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -507,6 +507,9 @@ typedef enum X86Seg {
#define MSR_VM_HSAVE_PA 0xc0010117
+#define MSR_IA32_XFD 0x000001c4
+#define MSR_IA32_XFD_ERR 0x000001c5
+
#define MSR_IA32_BNDCFGS 0x00000d90
#define MSR_IA32_XSS 0x00000da0
#define MSR_IA32_UMWAIT_CONTROL 0xe1
@@ -872,6 +875,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
/* AVX512 BFloat16 Instruction */
#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
+/* XFD Extend Feature Disabled */
+#define CPUID_D_1_EAX_XFD (1U << 4)
/* Packets which contain IP payload have LIP values */
#define CPUID_14_0_ECX_LIP (1U << 31)
@@ -1616,6 +1621,10 @@ typedef struct CPUArchState {
uint64_t msr_rtit_cr3_match;
uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
+ /* Per-VCPU XFD MSRs */
+ uint64_t msr_xfd;
+ uint64_t msr_xfd_err;
+
/* exception/interrupt handling */
int error_code;
int exception_is_int;
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 4e871a648a..ef2c68a6f4 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -3277,6 +3277,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
env->msr_ia32_sgxlepubkeyhash[3]);
}
+ if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
+ kvm_msr_entry_add(cpu, MSR_IA32_XFD,
+ env->msr_xfd);
+ kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR,
+ env->msr_xfd_err);
+ }
+
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
* kvm_put_msr_feature_control. */
}
@@ -3669,6 +3676,11 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0);
}
+ if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
+ kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
+ }
+
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
if (ret < 0) {
return ret;
@@ -3965,6 +3977,12 @@ static int kvm_get_msrs(X86CPU *cpu)
env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] =
msrs[i].data;
break;
+ case MSR_IA32_XFD:
+ env->msr_xfd = msrs[i].data;
+ break;
+ case MSR_IA32_XFD_ERR:
+ env->msr_xfd_err = msrs[i].data;
+ break;
}
}
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 6202f47793..7c54bada81 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1483,6 +1483,48 @@ static const VMStateDescription vmstate_pdptrs = {
}
};
+static bool xfd_msrs_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD);
+}
+
+static const VMStateDescription vmstate_msr_xfd = {
+ .name = "cpu/msr_xfd",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xfd_msrs_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_xfd, X86CPU),
+ VMSTATE_UINT64(env.msr_xfd_err, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#ifdef TARGET_X86_64
+static bool amx_xtile_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE);
+}
+
+static const VMStateDescription vmstate_amx_xtile = {
+ .name = "cpu/intel_amx_xtile",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = amx_xtile_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64),
+ VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif
+
const VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
@@ -1622,6 +1664,10 @@ const VMStateDescription vmstate_x86_cpu = {
&vmstate_msr_tsx_ctrl,
&vmstate_msr_intel_sgx,
&vmstate_pdptrs,
+ &vmstate_msr_xfd,
+#ifdef TARGET_X86_64
+ &vmstate_amx_xtile,
+#endif
NULL
}
};