summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorAvi Kivity2010-11-29 15:12:30 +0100
committerAvi Kivity2011-01-12 10:30:02 +0100
commit6389ee946303cb4313dba0a49865e495a53351ff (patch)
tree478c8f5f4d9f75d77808d648adbc8948050ffee7 /arch/x86/kvm/x86.c
parentKVM: Push struct x86_exception into walk_addr() (diff)
downloadkernel-qcow2-linux-6389ee946303cb4313dba0a49865e495a53351ff.tar.gz
kernel-qcow2-linux-6389ee946303cb4313dba0a49865e495a53351ff.tar.xz
kernel-qcow2-linux-6389ee946303cb4313dba0a49865e495a53351ff.zip
KVM: Pull extra page fault information into struct x86_exception
Currently page fault cr2 and nesting infomation are carried outside the fault data structure. Instead they are placed in the vcpu struct, which results in confusion as global variables are manipulated instead of passing parameters. Fix this issue by adding address and nested fields to struct x86_exception, so this struct can carry all information associated with a fault. Signed-off-by: Avi Kivity <avi@redhat.com> Tested-by: Joerg Roedel <joerg.roedel@amd.com> Tested-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c44
1 files changed, 24 insertions, 20 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a7a7decba43f..47e5a41cc40e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -334,23 +334,19 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
-void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
- unsigned error_code = vcpu->arch.fault.error_code;
-
++vcpu->stat.pf_guest;
- vcpu->arch.cr2 = vcpu->arch.fault.address;
- kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
+ vcpu->arch.cr2 = fault->address;
+ kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
}
-void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
- if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
- vcpu->arch.nested_mmu.inject_page_fault(vcpu);
+ if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
+ vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else
- vcpu->arch.mmu.inject_page_fault(vcpu);
-
- vcpu->arch.fault.nested = false;
+ vcpu->arch.mmu.inject_page_fault(vcpu, fault);
}
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -3610,8 +3606,6 @@ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
/* NPT walks are always user-walks */
access |= PFERR_USER_MASK;
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
- if (t_gpa == UNMAPPED_GVA)
- vcpu->arch.fault.nested = true;
return t_gpa;
}
@@ -4259,7 +4253,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
if (ctxt->exception.vector == PF_VECTOR)
- kvm_propagate_fault(vcpu);
+ kvm_propagate_fault(vcpu, &ctxt->exception);
else if (ctxt->exception.error_code_valid)
kvm_queue_exception_e(vcpu, ctxt->exception.vector,
ctxt->exception.error_code);
@@ -6264,6 +6258,8 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
+ struct x86_exception fault;
+
trace_kvm_async_pf_not_present(work->arch.token, work->gva);
kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
@@ -6272,15 +6268,20 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
kvm_x86_ops->get_cpl(vcpu) == 0))
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
- vcpu->arch.fault.error_code = 0;
- vcpu->arch.fault.address = work->arch.token;
- kvm_inject_page_fault(vcpu);
+ fault.vector = PF_VECTOR;
+ fault.error_code_valid = true;
+ fault.error_code = 0;
+ fault.nested_page_fault = false;
+ fault.address = work->arch.token;
+ kvm_inject_page_fault(vcpu, &fault);
}
}
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
+ struct x86_exception fault;
+
trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (is_error_page(work->page))
work->arch.token = ~0; /* broadcast wakeup */
@@ -6289,9 +6290,12 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
- vcpu->arch.fault.error_code = 0;
- vcpu->arch.fault.address = work->arch.token;
- kvm_inject_page_fault(vcpu);
+ fault.vector = PF_VECTOR;
+ fault.error_code_valid = true;
+ fault.error_code = 0;
+ fault.nested_page_fault = false;
+ fault.address = work->arch.token;
+ kvm_inject_page_fault(vcpu, &fault);
}
vcpu->arch.apf.halted = false;
}