diff options
author | Paolo Bonzini | 2015-04-22 11:40:41 +0200 |
---|---|---|
committer | Paolo Bonzini | 2015-06-05 17:10:01 +0200 |
commit | 9982f74bad70479939491b69522da047a3be5a0d (patch) | |
tree | 419ce637c765afe938c462f48ed42a085fb2cb0a /target-i386 | |
parent | target-i386: Use correct memory attributes for ioport accesses (diff) | |
download | qemu-9982f74bad70479939491b69522da047a3be5a0d.tar.gz qemu-9982f74bad70479939491b69522da047a3be5a0d.tar.xz qemu-9982f74bad70479939491b69522da047a3be5a0d.zip |
target-i386: mask NMIs on entry to SMM
QEMU is not blocking NMIs on entry to SMM. Implementing this has to
cover a few corner cases, because:
- NMIs can then be enabled by an IRET instruction and there
is no mechanism to _set_ the "NMIs masked" flag on exit from SMM:
"A special case can occur if an SMI handler nests inside an NMI handler
and then another NMI occurs. [...] When the processor enters SMM while
executing an NMI handler, the processor saves the SMRAM state save map
but does not save the attribute to keep NMI interrupts disabled.
- However, there is some hidden state, because "If NMIs were blocked
before the SMI occurred [and no IRET is executed while in SMM], they
are blocked after execution of RSM." This is represented by the new
HF2_SMM_INSIDE_NMI_MASK bit. If it is zero, NMIs are _unblocked_
on exit from RSM.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/cpu.h | 20 | ||||
-rw-r--r-- | target-i386/smm_helper.c | 9 |
2 files changed, 20 insertions, 9 deletions
diff --git a/target-i386/cpu.h b/target-i386/cpu.h index 7a06495834..9dae9ab854 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -180,15 +180,17 @@ /* hflags2 */ -#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ -#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ -#define HF2_NMI_SHIFT 2 /* CPU serving NMI */ -#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ - -#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) -#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) -#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) -#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) +#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ +#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ +#define HF2_NMI_SHIFT 2 /* CPU serving NMI */ +#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ +#define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ + +#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) +#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) +#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) +#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) +#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) #define CR0_PE_SHIFT 0 #define CR0_MP_SHIFT 1 diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c index b9971b6e19..6207c3a143 100644 --- a/target-i386/smm_helper.c +++ b/target-i386/smm_helper.c @@ -52,6 +52,11 @@ void do_smm_enter(X86CPU *cpu) log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); env->hflags |= HF_SMM_MASK; + if (env->hflags2 & HF2_NMI_MASK) { + env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; + } else { + env->hflags2 |= HF2_NMI_MASK; + } cpu_smm_update(env); sm_state = env->smbase + 0x8000; @@ -307,6 +312,10 @@ void helper_rsm(CPUX86State *env) env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8) & ~0x7fff; } #endif + if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) { + env->hflags2 &= ~HF2_NMI_MASK; + } + env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; env->hflags &= ~HF_SMM_MASK; cpu_smm_update(env); |