summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.h
diff options
context:
space:
mode:
authorMarcelo Tosatti2013-03-13 02:36:43 +0100
committerGleb Natapov2013-03-13 10:46:09 +0100
commit5d218814328da91a27e982748443e7e375e11396 (patch)
tree46ef91eb8d845cea7500450fb041b9e5e444e63f /arch/x86/kvm/mmu.h
parentKVM: x86: Drop unused return code from VCPU reset callback (diff)
downloadkernel-qcow2-linux-5d218814328da91a27e982748443e7e375e11396.tar.gz
kernel-qcow2-linux-5d218814328da91a27e982748443e7e375e11396.tar.xz
kernel-qcow2-linux-5d218814328da91a27e982748443e7e375e11396.zip
KVM: MMU: make kvm_mmu_available_pages robust against n_used_mmu_pages > n_max_mmu_pages
As noticed by Ulrich Obergfell <uobergfe@redhat.com>, the mmu counters are for beancounting purposes only - so n_used_mmu_pages and n_max_mmu_pages could be relaxed (example: before f0f5933a1626c8df7b), resulting in n_used_mmu_pages > n_max_mmu_pages. Make code robust against n_used_mmu_pages > n_max_mmu_pages. Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.h')
-rw-r--r--arch/x86/kvm/mmu.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 69871080e866..3b1ad0049ea4 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -57,8 +57,11 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
- return kvm->arch.n_max_mmu_pages -
- kvm->arch.n_used_mmu_pages;
+ if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
+ return kvm->arch.n_max_mmu_pages -
+ kvm->arch.n_used_mmu_pages;
+
+ return 0;
}
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)