diff options
author | Sam Bobroff | 2017-08-18 07:50:22 +0200 |
---|---|---|
committer | David Gibson | 2017-09-08 01:30:55 +0200 |
commit | fa98fbfcdfcb980b4a690b8bc93ab597935087b1 (patch) | |
tree | df06da704c52b1e04a645f9104dd03e00ad97b64 | |
parent | spapr: fallback to raw mode if best compat mode cannot be set during CAS (diff) | |
download | qemu-fa98fbfcdfcb980b4a690b8bc93ab597935087b1.tar.gz qemu-fa98fbfcdfcb980b4a690b8bc93ab597935087b1.tar.xz qemu-fa98fbfcdfcb980b4a690b8bc93ab597935087b1.zip |
PPC: KVM: Support machine option to set VSMT mode
KVM now allows writing to KVM_CAP_PPC_SMT which has previously been
read only. Doing so causes KVM to act, for that VM, as if the host's
SMT mode was the given value. This is particularly important on Power
9 systems because their default value is 1, but they are able to
support values up to 8.
This patch introduces a way to control this capability via a new
machine property called VSMT ("Virtual SMT"). If the value is not set
on the command line a default is chosen that is, when possible,
compatible with legacy systems.
Note that the intialization of KVM_CAP_PPC_SMT has changed slightly
because it has changed (in KVM) from a global capability to a
VM-specific one. This won't cause a problem on older KVMs because VM
capabilities fall back to global ones.
Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
-rw-r--r-- | hw/ppc/spapr.c | 75 | ||||
-rw-r--r-- | include/hw/ppc/spapr.h | 1 | ||||
-rw-r--r-- | target/ppc/kvm.c | 39 | ||||
-rw-r--r-- | target/ppc/kvm_ppc.h | 12 | ||||
-rw-r--r-- | target/ppc/translate_init.c | 14 |
5 files changed, 126 insertions, 15 deletions
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 067f571416..caffa12763 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -26,6 +26,7 @@ */ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qapi/visitor.h" #include "sysemu/sysemu.h" #include "sysemu/numa.h" #include "hw/hw.h" @@ -2165,6 +2166,61 @@ static void spapr_init_cpus(sPAPRMachineState *spapr) g_free(type); } +static void spapr_set_vsmt_mode(sPAPRMachineState *spapr, Error **errp) +{ + Error *local_err = NULL; + bool vsmt_user = !!spapr->vsmt; + int kvm_smt = kvmppc_smt_threads(); + int ret; + + if (!kvm_enabled() && (smp_threads > 1)) { + error_setg(&local_err, "TCG cannot support more than 1 thread/core " + "on a pseries machine"); + goto out; + } + if (!is_power_of_2(smp_threads)) { + error_setg(&local_err, "Cannot support %d threads/core on a pseries " + "machine because it must be a power of 2", smp_threads); + goto out; + } + + /* Detemine the VSMT mode to use: */ + if (vsmt_user) { + if (spapr->vsmt < smp_threads) { + error_setg(&local_err, "Cannot support VSMT mode %d" + " because it must be >= threads/core (%d)", + spapr->vsmt, smp_threads); + goto out; + } + /* In this case, spapr->vsmt has been set by the command line */ + } else { + /* Choose a VSMT mode that may be higher than necessary but is + * likely to be compatible with hosts that don't have VSMT. */ + spapr->vsmt = MAX(kvm_smt, smp_threads); + } + + /* KVM: If necessary, set the SMT mode: */ + if (kvm_enabled() && (spapr->vsmt != kvm_smt)) { + ret = kvmppc_set_smt_threads(spapr->vsmt); + if (ret) { + error_setg(&local_err, + "Failed to set KVM's VSMT mode to %d (errno %d)", + spapr->vsmt, ret); + if (!vsmt_user) { + error_append_hint(&local_err, "On PPC, a VM with %d threads/" + "core on a host with %d threads/core requires " + " the use of VSMT mode %d.\n", + smp_threads, kvm_smt, spapr->vsmt); + } + kvmppc_hint_smt_possible(&local_err); + goto out; + } + } + /* else TCG: nothing to do currently */ +out: + error_propagate(errp, local_err); +} + /* pSeries LPAR / sPAPR hardware init */ static void ppc_spapr_init(MachineState *machine) { @@ -2297,6 +2353,8 @@ static void ppc_spapr_init(MachineState *machine) spapr_cpu_parse_features(spapr); + spapr_set_vsmt_mode(spapr, &error_fatal); + spapr_init_cpus(spapr); if (kvm_enabled()) { @@ -2681,6 +2739,18 @@ static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp) } } +static void spapr_get_vsmt(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + visit_type_uint32(v, name, (uint32_t *)opaque, errp); +} + +static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + visit_type_uint32(v, name, (uint32_t *)opaque, errp); +} + static void spapr_machine_initfn(Object *obj) { sPAPRMachineState *spapr = SPAPR_MACHINE(obj); @@ -2711,6 +2781,11 @@ static void spapr_machine_initfn(Object *obj) object_property_set_description(obj, "resize-hpt", "Resizing of the Hash Page Table (enabled, disabled, required)", NULL); + object_property_add(obj, "vsmt", "uint32", spapr_get_vsmt, + spapr_set_vsmt, NULL, &spapr->vsmt, &error_abort); + object_property_set_description(obj, "vsmt", + "Virtual SMT: KVM behaves as if this were" + " the host's SMT mode", &error_abort); } static void spapr_machine_finalizefn(Object *obj) diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 91617e3277..c1b365f564 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -99,6 +99,7 @@ struct sPAPRMachineState { uint64_t rtc_offset; /* Now used only during incoming migration */ struct PPCTimebase tb; bool has_graphics; + uint32_t vsmt; /* Virtual SMT mode (KVM's "core stride") */ Notifier epow_notifier; QTAILQ_HEAD(, sPAPREventLogEntry) pending_events; diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index f1d54106ac..b6bd0fa7a6 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -74,6 +74,7 @@ static int cap_interrupt_level = false; static int cap_segstate; static int cap_booke_sregs; static int cap_ppc_smt; +static int cap_ppc_smt_possible; static int cap_ppc_rma; static int cap_spapr_tce; static int cap_spapr_tce_64; @@ -130,7 +131,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL); cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE); cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS); - cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT); + cap_ppc_smt_possible = kvm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE); cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA); cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE); cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64); @@ -144,6 +145,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) * only activated after this by kvmppc_set_papr() */ cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD); cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL); + cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT); cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM); cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX); cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3); @@ -2134,6 +2136,41 @@ int kvmppc_smt_threads(void) return cap_ppc_smt ? cap_ppc_smt : 1; } +int kvmppc_set_smt_threads(int smt) +{ + int ret; + + ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0); + if (!ret) { + cap_ppc_smt = smt; + } + return ret; +} + +void kvmppc_hint_smt_possible(Error **errp) +{ + int i; + GString *g; + char *s; + + assert(kvm_enabled()); + if (cap_ppc_smt_possible) { + g = g_string_new("Available VSMT modes:"); + for (i = 63; i >= 0; i--) { + if ((1UL << i) & cap_ppc_smt_possible) { + g_string_append_printf(g, " %lu", (1UL << i)); + } + } + s = g_string_free(g, false); + error_append_hint(errp, "%s.\n", s); + g_free(s); + } else { + error_append_hint(errp, + "This KVM seems to be too old to support VSMT.\n"); + } +} + + #ifdef TARGET_PPC64 off_t kvmppc_alloc_rma(void **rma) { diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h index 381afe6240..e6711fc2b7 100644 --- a/target/ppc/kvm_ppc.h +++ b/target/ppc/kvm_ppc.h @@ -29,6 +29,8 @@ void kvmppc_set_papr(PowerPCCPU *cpu); int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr); void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy); int kvmppc_smt_threads(void); +void kvmppc_hint_smt_possible(Error **errp); +int kvmppc_set_smt_threads(int smt); int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); int kvmppc_set_tcr(PowerPCCPU *cpu); @@ -148,6 +150,16 @@ static inline int kvmppc_smt_threads(void) return 1; } +static inline void kvmppc_hint_smt_possible(Error **errp) +{ + return; +} + +static inline int kvmppc_set_smt_threads(int smt) +{ + return 0; +} + static inline int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) { return 0; diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c index 08ef74f064..703ea12f2a 100644 --- a/target/ppc/translate_init.c +++ b/target/ppc/translate_init.c @@ -9907,20 +9907,6 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp) int max_smt = kvmppc_smt_threads(); #endif -#if !defined(CONFIG_USER_ONLY) - if (smp_threads > max_smt) { - error_setg(errp, "Cannot support more than %d threads on PPC with %s", - max_smt, kvm_enabled() ? "KVM" : "TCG"); - return; - } - if (!is_power_of_2(smp_threads)) { - error_setg(errp, "Cannot support %d threads on PPC with %s, " - "threads count must be a power of 2.", - smp_threads, kvm_enabled() ? "KVM" : "TCG"); - return; - } -#endif - cpu_exec_realizefn(cs, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); |