diff options
author | Thomas Huth | 2017-02-15 10:21:44 +0100 |
---|---|---|
committer | David Gibson | 2017-02-22 04:28:53 +0100 |
commit | df58713396f8b2deb923e39c00b10744c5c63909 (patch) | |
tree | cec2364995459c2347a0c57e7f45d1dadb848a53 /target/ppc/kvm.c | |
parent | target-ppc: fix Book-E TLB matching (diff) | |
download | qemu-df58713396f8b2deb923e39c00b10744c5c63909.tar.gz qemu-df58713396f8b2deb923e39c00b10744c5c63909.tar.xz qemu-df58713396f8b2deb923e39c00b10744c5c63909.zip |
hw/ppc/spapr: Check for valid page size when hot plugging memory
On POWER, the valid page sizes that the guest can use are bound
to the CPU and not to the memory region. QEMU already has some
fancy logic to find out the right maximum memory size to tell
it to the guest during boot (see getrampagesize() in the file
target/ppc/kvm.c for more information).
However, once we're booted and the guest is using huge pages
already, it is currently still possible to hot-plug memory regions
that does not support huge pages - which of course does not work
on POWER, since the guest thinks that it is possible to use huge
pages everywhere. The KVM_RUN ioctl will then abort with -EFAULT,
QEMU spills out a not very helpful error message together with
a register dump and the user is annoyed that the VM unexpectedly
died.
To avoid this situation, we should check the page size of hot-plugged
DIMMs to see whether it is possible to use it in the current VM.
If it does not fit, we can print out a better error message and
refuse to add it, so that the VM does not die unexpectely and the
user has a second chance to plug a DIMM with a matching memory
backend instead.
Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1419466
Signed-off-by: Thomas Huth <thuth@redhat.com>
[dwg: Fix a build error on 32-bit builds with KVM]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'target/ppc/kvm.c')
-rw-r--r-- | target/ppc/kvm.c | 32 |
1 files changed, 28 insertions, 4 deletions
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 663d2e79c9..52bbea514a 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -438,12 +438,13 @@ static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift) return (1ul << shift) <= rampgsize; } +static long max_cpu_page_size; + static void kvm_fixup_page_sizes(PowerPCCPU *cpu) { static struct kvm_ppc_smmu_info smmu_info; static bool has_smmu_info; CPUPPCState *env = &cpu->env; - long rampagesize; int iq, ik, jq, jk; bool has_64k_pages = false; @@ -458,7 +459,9 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu) has_smmu_info = true; } - rampagesize = getrampagesize(); + if (!max_cpu_page_size) { + max_cpu_page_size = getrampagesize(); + } /* Convert to QEMU form */ memset(&env->sps, 0, sizeof(env->sps)); @@ -478,14 +481,14 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu) struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq]; struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik]; - if (!kvm_valid_page_size(smmu_info.flags, rampagesize, + if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size, ksps->page_shift)) { continue; } qsps->page_shift = ksps->page_shift; qsps->slb_enc = ksps->slb_enc; for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) { - if (!kvm_valid_page_size(smmu_info.flags, rampagesize, + if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size, ksps->enc[jk].page_shift)) { continue; } @@ -510,12 +513,33 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu) env->mmu_model &= ~POWERPC_MMU_64K; } } + +bool kvmppc_is_mem_backend_page_size_ok(char *obj_path) +{ + Object *mem_obj = object_resolve_path(obj_path, NULL); + char *mempath = object_property_get_str(mem_obj, "mem-path", NULL); + long pagesize; + + if (mempath) { + pagesize = gethugepagesize(mempath); + } else { + pagesize = getpagesize(); + } + + return pagesize >= max_cpu_page_size; +} + #else /* defined (TARGET_PPC64) */ static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu) { } +bool kvmppc_is_mem_backend_page_size_ok(char *obj_path) +{ + return true; +} + #endif /* !defined (TARGET_PPC64) */ unsigned long kvm_arch_vcpu_id(CPUState *cpu) |