summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorDavid Matlack2016-12-21 00:25:57 +0100
committerRadim Krčmář2017-01-09 14:46:03 +0100
commitf3414bc77419463c0d81eaa2cea7ee4ccb447c7d (patch)
tree153185d45ad2105c030648945e650687fe3adfec /arch/x86/kvm/mmu.c
parentKVM: x86: simplify conditions with split/kernel irqchip (diff)
downloadkernel-qcow2-linux-f3414bc77419463c0d81eaa2cea7ee4ccb447c7d.tar.gz
kernel-qcow2-linux-f3414bc77419463c0d81eaa2cea7ee4ccb447c7d.tar.xz
kernel-qcow2-linux-f3414bc77419463c0d81eaa2cea7ee4ccb447c7d.zip
kvm: x86: export maximum number of mmu_page_hash collisions
Report the maximum number of mmu_page_hash collisions as a per-VM stat. This will make it easy to identify problems with the mmu_page_hash in the future. Signed-off-by: David Matlack <dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7012de4a1fed..45ee7ae88239 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1904,17 +1904,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
* since it has been deleted from active_mmu_pages but still can be found
* at hast list.
*
- * for_each_gfn_valid_sp() has skipped that kind of pages.
+ * for_each_valid_sp() has skipped that kind of pages.
*/
-#define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
+#define for_each_valid_sp(_kvm, _sp, _gfn) \
hlist_for_each_entry(_sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
- if ((_sp)->gfn != (_gfn) || is_obsolete_sp((_kvm), (_sp)) \
- || (_sp)->role.invalid) {} else
+ if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
+ } else
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
- for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
- if ((_sp)->role.direct) {} else
+ for_each_valid_sp(_kvm, _sp, _gfn) \
+ if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
/* @sp->gfn should be write-protected at the call site */
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
@@ -2116,6 +2116,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp;
bool need_sync = false;
bool flush = false;
+ int collisions = 0;
LIST_HEAD(invalid_list);
role = vcpu->arch.mmu.base_role;
@@ -2130,7 +2131,12 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
}
- for_each_gfn_valid_sp(vcpu->kvm, sp, gfn) {
+ for_each_valid_sp(vcpu->kvm, sp, gfn) {
+ if (sp->gfn != gfn) {
+ collisions++;
+ continue;
+ }
+
if (!need_sync && sp->unsync)
need_sync = true;
@@ -2153,7 +2159,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
__clear_sp_write_flooding_count(sp);
trace_kvm_mmu_get_page(sp, false);
- return sp;
+ goto out;
}
++vcpu->kvm->stat.mmu_cache_miss;
@@ -2183,6 +2189,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
trace_kvm_mmu_get_page(sp, true);
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+out:
+ if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
+ vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
return sp;
}