summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorJunaid Shahid2016-12-07 01:46:14 +0100
committerRadim Krčmář2017-01-09 14:46:09 +0100
commitf39a058d0ea2f58b9c69cfcf7c93184f33302c98 (patch)
treed0c4c29051d64f1999d0caced137136ce8a92ef7 /arch/x86/kvm/mmu.c
parentkvm: x86: mmu: Refactor accessed/dirty checks in mmu_spte_update/clear (diff)
downloadkernel-qcow2-linux-f39a058d0ea2f58b9c69cfcf7c93184f33302c98.tar.gz
kernel-qcow2-linux-f39a058d0ea2f58b9c69cfcf7c93184f33302c98.tar.xz
kernel-qcow2-linux-f39a058d0ea2f58b9c69cfcf7c93184f33302c98.zip
kvm: x86: mmu: Introduce a no-tracking version of mmu_spte_update
mmu_spte_update() tracks changes in the accessed/dirty state of the SPTE being updated and calls kvm_set_pfn_accessed/dirty appropriately. However, in some cases (e.g. when aging the SPTE), this shouldn't be done. mmu_spte_update_no_track() is introduced for use in such cases. Signed-off-by: Junaid Shahid <junaids@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c40
1 files changed, 27 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cfef95969335..b8b5259c8ebb 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -528,27 +528,19 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
__set_spte(sptep, new_spte);
}
-/* Rules for using mmu_spte_update:
- * Update the state bits, it means the mapped pfn is not changed.
- *
- * Whenever we overwrite a writable spte with a read-only one we
- * should flush remote TLBs. Otherwise rmap_write_protect
- * will find a read-only spte, even though the writable spte
- * might be cached on a CPU's TLB, the return value indicates this
- * case.
- *
- * Returns true if the TLB needs to be flushed
+/*
+ * Update the SPTE (excluding the PFN), but do not track changes in its
+ * accessed/dirty status.
*/
-static bool mmu_spte_update(u64 *sptep, u64 new_spte)
+static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
{
u64 old_spte = *sptep;
- bool flush = false;
WARN_ON(!is_shadow_present_pte(new_spte));
if (!is_shadow_present_pte(old_spte)) {
mmu_spte_set(sptep, new_spte);
- return flush;
+ return old_spte;
}
if (!spte_has_volatile_bits(old_spte))
@@ -558,6 +550,28 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
+ return old_spte;
+}
+
+/* Rules for using mmu_spte_update:
+ * Update the state bits, it means the mapped pfn is not changed.
+ *
+ * Whenever we overwrite a writable spte with a read-only one we
+ * should flush remote TLBs. Otherwise rmap_write_protect
+ * will find a read-only spte, even though the writable spte
+ * might be cached on a CPU's TLB, the return value indicates this
+ * case.
+ *
+ * Returns true if the TLB needs to be flushed
+ */
+static bool mmu_spte_update(u64 *sptep, u64 new_spte)
+{
+ bool flush = false;
+ u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
+
+ if (!is_shadow_present_pte(old_spte))
+ return false;
+
/*
* For the spte updated out of mmu-lock is safe, since
* we always atomically update it, see the comments in