summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_mmu_hv.c
diff options
context:
space:
mode:
authorPaul Mackerras2011-12-12 13:28:55 +0100
committerAvi Kivity2012-03-05 13:52:36 +0100
commit93e602490c1da83162a8b6ba86b4b48a7a0f0c9e (patch)
tree7dd0407af03944cffcfdb7798d0ea6155156700e /arch/powerpc/kvm/book3s_64_mmu_hv.c
parentKVM: PPC: Keep page physical addresses in per-slot arrays (diff)
downloadkernel-qcow2-linux-93e602490c1da83162a8b6ba86b4b48a7a0f0c9e.tar.gz
kernel-qcow2-linux-93e602490c1da83162a8b6ba86b4b48a7a0f0c9e.tar.xz
kernel-qcow2-linux-93e602490c1da83162a8b6ba86b4b48a7a0f0c9e.zip
KVM: PPC: Add an interface for pinning guest pages in Book3s HV guests
This adds two new functions, kvmppc_pin_guest_page() and kvmppc_unpin_guest_page(), and uses them to pin the guest pages where the guest has registered areas of memory for the hypervisor to update, (i.e. the per-cpu virtual processor areas, SLB shadow buffers and dispatch trace logs) and then unpin them when they are no longer required. Although it is not strictly necessary to pin the pages at this point, since all guest pages are already pinned, later commits in this series will mean that guest pages aren't all pinned. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c38
1 files changed, 38 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index e4c60698f41a..dcd39dc64f07 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -184,6 +184,44 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return -ENOENT;
}
+void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
+ unsigned long *nb_ret)
+{
+ struct kvm_memory_slot *memslot;
+ unsigned long gfn = gpa >> PAGE_SHIFT;
+ struct page *page;
+ unsigned long offset;
+ unsigned long pfn, pa;
+ unsigned long *physp;
+
+ memslot = gfn_to_memslot(kvm, gfn);
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
+ return NULL;
+ physp = kvm->arch.slot_phys[memslot->id];
+ if (!physp)
+ return NULL;
+ physp += (gfn - memslot->base_gfn) >>
+ (kvm->arch.ram_porder - PAGE_SHIFT);
+ pa = *physp;
+ if (!pa)
+ return NULL;
+ pfn = pa >> PAGE_SHIFT;
+ page = pfn_to_page(pfn);
+ get_page(page);
+ offset = gpa & (kvm->arch.ram_psize - 1);
+ if (nb_ret)
+ *nb_ret = kvm->arch.ram_psize - offset;
+ return page_address(page) + offset;
+}
+
+void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
+{
+ struct page *page = virt_to_page(va);
+
+ page = compound_head(page);
+ put_page(page);
+}
+
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{
struct kvmppc_mmu *mmu = &vcpu->arch.mmu;