summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xconfigure6
-rw-r--r--hw/ppc/spapr.c101
-rw-r--r--hw/ppc/spapr_drc.c37
-rw-r--r--hw/ppc/spapr_pci.c47
-rw-r--r--hw/vfio/common.c6
-rw-r--r--hw/vfio/pci.c19
-rw-r--r--hw/vfio/platform.c1
-rw-r--r--include/hw/ppc/spapr.h7
-rw-r--r--include/hw/ppc/spapr_drc.h3
-rw-r--r--include/hw/ppc/spapr_ovec.h1
-rw-r--r--include/hw/vfio/vfio-common.h1
-rw-r--r--include/migration/misc.h1
-rw-r--r--include/migration/register.h6
-rw-r--r--migration/block.c4
-rw-r--r--migration/colo.c2
-rw-r--r--migration/migration.c27
-rw-r--r--migration/ram.c63
-rw-r--r--migration/ram.h6
-rw-r--r--migration/savevm.c61
-rw-r--r--migration/savevm.h3
-rw-r--r--migration/trace-events4
-rw-r--r--qemu-options.hx6
-rw-r--r--target/ppc/excp_helper.c1
-rw-r--r--target/ppc/kvm.c5
-rw-r--r--target/ppc/mmu-radix64.c67
-rw-r--r--target/ppc/mmu-radix64.h1
-rw-r--r--target/ppc/mmu_helper.c3
-rw-r--r--vl.c26
28 files changed, 309 insertions, 206 deletions
diff --git a/configure b/configure
index 806658c98b..5096cbcf14 100755
--- a/configure
+++ b/configure
@@ -202,9 +202,9 @@ supported_kvm_target() {
supported_xen_target() {
test "$xen" = "yes" || return 1
glob "$1" "*-softmmu" || return 1
- case "${1%-softmmu}:$cpu" in
- arm:arm | aarch64:aarch64 | \
- i386:i386 | i386:x86_64 | x86_64:i386 | x86_64:x86_64)
+ # Only i386 and x86_64 provide the xenpv machine.
+ case "${1%-softmmu}" in
+ i386|x86_64)
return 0
;;
esac
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 0ee9fac50b..d38563d9a4 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -778,6 +778,11 @@ static int spapr_dt_cas_updates(sPAPRMachineState *spapr, void *fdt,
}
}
+ /* /interrupt controller */
+ if (!spapr_ovec_test(ov5_updates, OV5_XIVE_EXPLOIT)) {
+ spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP);
+ }
+
offset = fdt_path_offset(fdt, "/chosen");
if (offset < 0) {
offset = fdt_add_subnode(fdt, 0, "chosen");
@@ -801,7 +806,7 @@ int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
size -= sizeof(hdr);
- /* Create sceleton */
+ /* Create skeleton */
fdt_skel = g_malloc0(size);
_FDT((fdt_create(fdt_skel, size)));
_FDT((fdt_begin_node(fdt_skel, "")));
@@ -910,7 +915,8 @@ static void spapr_dt_ov5_platform_support(void *fdt, int chosen)
{
PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
- char val[2 * 3] = {
+ char val[2 * 4] = {
+ 23, 0x00, /* Xive mode: 0 = legacy (as in ISA 2.7), 1 = Exploitation */
24, 0x00, /* Hash/Radix, filled in below. */
25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
26, 0x40, /* Radix options: GTSE == yes. */
@@ -918,19 +924,19 @@ static void spapr_dt_ov5_platform_support(void *fdt, int chosen)
if (kvm_enabled()) {
if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
- val[1] = 0x80; /* OV5_MMU_BOTH */
+ val[3] = 0x80; /* OV5_MMU_BOTH */
} else if (kvmppc_has_cap_mmu_radix()) {
- val[1] = 0x40; /* OV5_MMU_RADIX_300 */
+ val[3] = 0x40; /* OV5_MMU_RADIX_300 */
} else {
- val[1] = 0x00; /* Hash */
+ val[3] = 0x00; /* Hash */
}
} else {
if (first_ppc_cpu->env.mmu_model & POWERPC_MMU_V3) {
/* V3 MMU supports both hash and radix (with dynamic switching) */
- val[1] = 0xC0;
+ val[3] = 0xC0;
} else {
/* Otherwise we can only do hash */
- val[1] = 0x00;
+ val[3] = 0x00;
}
}
_FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
@@ -1068,9 +1074,6 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
_FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
_FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
- /* /interrupt controller */
- spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP);
-
ret = spapr_populate_memory(spapr, fdt);
if (ret < 0) {
error_report("couldn't setup memory nodes in fdt");
@@ -1945,7 +1948,7 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-static void htab_cleanup(void *opaque)
+static void htab_save_cleanup(void *opaque)
{
sPAPRMachineState *spapr = opaque;
@@ -1953,10 +1956,10 @@ static void htab_cleanup(void *opaque)
}
static SaveVMHandlers savevm_htab_handlers = {
- .save_live_setup = htab_save_setup,
+ .save_setup = htab_save_setup,
.save_live_iterate = htab_save_iterate,
.save_live_complete_precopy = htab_save_complete,
- .cleanup = htab_cleanup,
+ .save_cleanup = htab_save_cleanup,
.load_state = htab_load,
};
@@ -1967,24 +1970,6 @@ static void spapr_boot_set(void *opaque, const char *boot_device,
machine->boot_order = g_strdup(boot_device);
}
-/*
- * Reset routine for LMB DR devices.
- *
- * Unlike PCI DR devices, LMB DR devices explicitly register this reset
- * routine. Reset for PCI DR devices will be handled by PHB reset routine
- * when it walks all its children devices. LMB devices reset occurs
- * as part of spapr_ppc_reset().
- */
-static void spapr_drc_reset(void *opaque)
-{
- sPAPRDRConnector *drc = opaque;
- DeviceState *d = DEVICE(drc);
-
- if (d) {
- device_reset(d);
- }
-}
-
static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
{
MachineState *machine = MACHINE(spapr);
@@ -1993,13 +1978,11 @@ static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
int i;
for (i = 0; i < nr_lmbs; i++) {
- sPAPRDRConnector *drc;
uint64_t addr;
addr = i * lmb_size + spapr->hotplug_memory.base;
- drc = spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
- addr/lmb_size);
- qemu_register_reset(spapr_drc_reset, drc);
+ spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
+ addr / lmb_size);
}
}
@@ -2093,11 +2076,8 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
int core_id = i * smp_threads;
if (mc->has_hotpluggable_cpus) {
- sPAPRDRConnector *drc =
- spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
- (core_id / smp_threads) * smt);
-
- qemu_register_reset(spapr_drc_reset, drc);
+ spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
+ (core_id / smp_threads) * smt);
}
if (i < boot_cores_nr) {
@@ -2624,6 +2604,7 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
int i, fdt_offset, fdt_size;
void *fdt;
uint64_t addr = addr_start;
+ Error *local_err = NULL;
for (i = 0; i < nr_lmbs; i++) {
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
@@ -2634,7 +2615,18 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
fdt_offset = spapr_populate_memory_node(fdt, node, addr,
SPAPR_MEMORY_BLOCK_SIZE);
- spapr_drc_attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp);
+ spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
+ if (local_err) {
+ while (addr > addr_start) {
+ addr -= SPAPR_MEMORY_BLOCK_SIZE;
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
+ addr / SPAPR_MEMORY_BLOCK_SIZE);
+ spapr_drc_detach(drc, dev, NULL);
+ }
+ g_free(fdt);
+ error_propagate(errp, local_err);
+ return;
+ }
addr += SPAPR_MEMORY_BLOCK_SIZE;
}
/* send hotplug notification to the
@@ -2674,14 +2666,20 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
addr = object_property_get_uint(OBJECT(dimm),
PC_DIMM_ADDR_PROP, &local_err);
if (local_err) {
- pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
- goto out;
+ goto out_unplug;
}
spapr_add_lmbs(dev, addr, size, node,
spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT),
- &error_abort);
+ &local_err);
+ if (local_err) {
+ goto out_unplug;
+ }
+
+ return;
+out_unplug:
+ pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
out:
error_propagate(errp, local_err);
}
@@ -2863,8 +2861,8 @@ out:
error_propagate(errp, local_err);
}
-void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
- sPAPRMachineState *spapr)
+static void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
+ sPAPRMachineState *spapr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
DeviceClass *dc = DEVICE_GET_CLASS(cs);
@@ -2979,17 +2977,10 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
g_assert(drc || !mc->has_hotpluggable_cpus);
- /*
- * Setup CPU DT entries only for hotplugged CPUs. For boot time or
- * coldplugged CPUs DT entries are setup in spapr_build_fdt().
- */
- if (dev->hotplugged) {
- fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
- }
+ fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
if (drc) {
- spapr_drc_attach(drc, dev, fdt, fdt_offset, !dev->hotplugged,
- &local_err);
+ spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
if (local_err) {
g_free(fdt);
error_propagate(errp, local_err);
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index bd40b84cfc..f34355dad1 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -340,7 +340,7 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
}
void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
- int fdt_start_offset, bool coldplug, Error **errp)
+ int fdt_start_offset, Error **errp)
{
trace_spapr_drc_attach(spapr_drc_index(drc));
@@ -351,14 +351,11 @@ void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_PCI) {
g_assert(drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE);
}
- g_assert(fdt || coldplug);
-
- drc->dr_indicator = SPAPR_DR_INDICATOR_ACTIVE;
+ g_assert(fdt);
drc->dev = d;
drc->fdt = fdt;
drc->fdt_start_offset = fdt_start_offset;
- drc->configured = coldplug;
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->awaiting_allocation = true;
@@ -372,24 +369,9 @@ void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
static void spapr_drc_release(sPAPRDRConnector *drc)
{
- drc->dr_indicator = SPAPR_DR_INDICATOR_INACTIVE;
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- /* Calling release callbacks based on spapr_drc_type(drc). */
- switch (spapr_drc_type(drc)) {
- case SPAPR_DR_CONNECTOR_TYPE_CPU:
- spapr_core_release(drc->dev);
- break;
- case SPAPR_DR_CONNECTOR_TYPE_PCI:
- spapr_phb_remove_pci_device_cb(drc->dev);
- break;
- case SPAPR_DR_CONNECTOR_TYPE_LMB:
- spapr_lmb_release(drc->dev);
- break;
- case SPAPR_DR_CONNECTOR_TYPE_PHB:
- case SPAPR_DR_CONNECTOR_TYPE_VIO:
- default:
- g_assert(false);
- }
+ drck->release(drc->dev);
drc->awaiting_release = false;
g_free(drc->fdt);
@@ -430,9 +412,9 @@ static bool release_pending(sPAPRDRConnector *drc)
return drc->awaiting_release;
}
-static void reset(DeviceState *d)
+static void drc_reset(void *opaque)
{
- sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(opaque);
trace_spapr_drc_reset(spapr_drc_index(drc));
@@ -454,12 +436,14 @@ static void reset(DeviceState *d)
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE;
}
+ drc->dr_indicator = SPAPR_DR_INDICATOR_ACTIVE;
} else {
/* Otherwise device is absent, but might be hotplugged */
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED;
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_UNUSABLE;
}
+ drc->dr_indicator = SPAPR_DR_INDICATOR_INACTIVE;
}
}
@@ -540,6 +524,7 @@ static void realize(DeviceState *d, Error **errp)
g_free(child_name);
vmstate_register(DEVICE(drc), spapr_drc_index(drc), &vmstate_spapr_drc,
drc);
+ qemu_register_reset(drc_reset, drc);
trace_spapr_drc_realize_complete(spapr_drc_index(drc));
}
@@ -598,7 +583,6 @@ static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
DeviceClass *dk = DEVICE_CLASS(k);
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
- dk->reset = reset;
dk->realize = realize;
dk->unrealize = unrealize;
drck->release_pending = release_pending;
@@ -633,6 +617,7 @@ static void spapr_drc_cpu_class_init(ObjectClass *k, void *data)
drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_CPU;
drck->typename = "CPU";
drck->drc_name_prefix = "CPU ";
+ drck->release = spapr_core_release;
}
static void spapr_drc_pci_class_init(ObjectClass *k, void *data)
@@ -642,6 +627,7 @@ static void spapr_drc_pci_class_init(ObjectClass *k, void *data)
drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_PCI;
drck->typename = "28";
drck->drc_name_prefix = "C";
+ drck->release = spapr_phb_remove_pci_device_cb;
}
static void spapr_drc_lmb_class_init(ObjectClass *k, void *data)
@@ -651,6 +637,7 @@ static void spapr_drc_lmb_class_init(ObjectClass *k, void *data)
drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_LMB;
drck->typename = "MEM";
drck->drc_name_prefix = "LMB ";
+ drck->release = spapr_lmb_release;
}
static const TypeInfo spapr_dr_connector_info = {
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 3b37dcdc09..a52dcf8ec0 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -1388,8 +1388,8 @@ static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
return spapr_drc_index(drc);
}
-static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
- DeviceState *plugged_dev, Error **errp)
+static void spapr_pci_plug(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
{
sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
PCIDevice *pdev = PCI_DEVICE(plugged_dev);
@@ -1435,8 +1435,7 @@ static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
goto out;
}
- spapr_drc_attach(drc, DEVICE(pdev), fdt, fdt_start_offset,
- !plugged_dev->hotplugged, &local_err);
+ spapr_drc_attach(drc, DEVICE(pdev), fdt, fdt_start_offset, &local_err);
if (local_err) {
goto out;
}
@@ -1470,8 +1469,8 @@ out:
}
}
-static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
- DeviceState *plugged_dev, Error **errp)
+static void spapr_pci_unplug_request(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
{
sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
PCIDevice *pdev = PCI_DEVICE(plugged_dev);
@@ -1486,6 +1485,7 @@ static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
}
g_assert(drc);
+ g_assert(drc->dev == plugged_dev);
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
if (!drck->release_pending(drc)) {
@@ -1745,7 +1745,8 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
}
/* DMA setup */
- if ((sphb->page_size_mask & qemu_getrampagesize()) == 0) {
+ if (((sphb->page_size_mask & qemu_getrampagesize()) == 0)
+ && kvm_enabled()) {
error_report("System page size 0x%lx is not enabled in page_size_mask "
"(0x%"PRIx64"). Performance may be slow",
qemu_getrampagesize(), sphb->page_size_mask);
@@ -1873,20 +1874,6 @@ static void spapr_pci_pre_save(void *opaque)
gpointer key, value;
int i;
- g_free(sphb->msi_devs);
- sphb->msi_devs = NULL;
- sphb->msi_devs_num = g_hash_table_size(sphb->msi);
- if (!sphb->msi_devs_num) {
- return;
- }
- sphb->msi_devs = g_malloc(sphb->msi_devs_num * sizeof(spapr_pci_msi_mig));
-
- g_hash_table_iter_init(&iter, sphb->msi);
- for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
- sphb->msi_devs[i].key = *(uint32_t *) key;
- sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
- }
-
if (sphb->pre_2_8_migration) {
sphb->mig_liobn = sphb->dma_liobn[0];
sphb->mig_mem_win_addr = sphb->mem_win_addr;
@@ -1900,6 +1887,20 @@ static void spapr_pci_pre_save(void *opaque)
sphb->mig_mem_win_size += sphb->mem64_win_size;
}
}
+
+ g_free(sphb->msi_devs);
+ sphb->msi_devs = NULL;
+ sphb->msi_devs_num = g_hash_table_size(sphb->msi);
+ if (!sphb->msi_devs_num) {
+ return;
+ }
+ sphb->msi_devs = g_malloc(sphb->msi_devs_num * sizeof(spapr_pci_msi_mig));
+
+ g_hash_table_iter_init(&iter, sphb->msi);
+ for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
+ sphb->msi_devs[i].key = *(uint32_t *) key;
+ sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
+ }
}
static int spapr_pci_post_load(void *opaque, int version_id)
@@ -1973,8 +1974,8 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data)
/* Supported by TYPE_SPAPR_MACHINE */
dc->user_creatable = true;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
- hp->plug = spapr_phb_hot_plug_child;
- hp->unplug = spapr_phb_hot_unplug_child;
+ hp->plug = spapr_pci_plug;
+ hp->unplug_request = spapr_pci_unplug_request;
}
static const TypeInfo spapr_phb_info = {
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index b9abe77f5a..29923e4990 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -858,13 +858,15 @@ void vfio_reset_handler(void *opaque)
QLIST_FOREACH(group, &vfio_group_list, next) {
QLIST_FOREACH(vbasedev, &group->device_list, next) {
- vbasedev->ops->vfio_compute_needs_reset(vbasedev);
+ if (vbasedev->dev->realized) {
+ vbasedev->ops->vfio_compute_needs_reset(vbasedev);
+ }
}
}
QLIST_FOREACH(group, &vfio_group_list, next) {
QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->needs_reset) {
+ if (vbasedev->dev->realized && vbasedev->needs_reset) {
vbasedev->ops->vfio_hot_reset_multi(vbasedev);
}
}
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 8de8272e96..d4051cb951 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -1743,6 +1743,18 @@ static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
}
+ /*
+ * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
+ * (Niantic errate #35) causing Windows to error with a Code 10 for the
+ * device on Q35. Fixup any such devices to report version 1. If we
+ * were to remove the capability entirely the guest would lose extended
+ * config space.
+ */
+ if ((flags & PCI_EXP_FLAGS_VERS) == 0) {
+ vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
+ 1, PCI_EXP_FLAGS_VERS);
+ }
+
pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
errp);
if (pos < 0) {
@@ -2116,7 +2128,8 @@ static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
/* Prep dependent devices for reset and clear our marker. */
QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
- if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
+ if (!vbasedev_iter->dev->realized ||
+ vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
continue;
}
tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
@@ -2197,7 +2210,8 @@ out:
}
QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
- if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
+ if (!vbasedev_iter->dev->realized ||
+ vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
continue;
}
tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
@@ -2647,6 +2661,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
vdev->vbasedev.ops = &vfio_pci_ops;
vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
+ vdev->vbasedev.dev = &vdev->pdev.qdev;
tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
len = readlink(tmp, group_path, sizeof(group_path));
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
index a4663c918e..7c09deda61 100644
--- a/hw/vfio/platform.c
+++ b/hw/vfio/platform.c
@@ -640,6 +640,7 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp)
int i, ret;
vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM;
+ vbasedev->dev = dev;
vbasedev->ops = &vfio_platform_ops;
trace_vfio_platform_realize(vbasedev->sysfsdev ?
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index a66bbac352..a184ffab0e 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -377,9 +377,8 @@ struct sPAPRMachineState {
* as well.
*
* We also need some hcalls which are specific to qemu / KVM-on-POWER.
- * So far we just need one for H_RTAS, but in future we'll need more
- * for extensions like virtio. We put those into the 0xf000-0xfffc
- * range which is reserved by PAPR for "platform-specific" hcalls.
+ * We put those into the 0xf000-0xfffc range which is reserved by PAPR
+ * for "platform-specific" hcalls.
*/
#define KVMPPC_HCALL_BASE 0xf000
#define KVMPPC_H_RTAS (KVMPPC_HCALL_BASE + 0x0)
@@ -640,8 +639,6 @@ void spapr_hotplug_req_add_by_count_indexed(sPAPRDRConnectorType drc_type,
void spapr_hotplug_req_remove_by_count_indexed(sPAPRDRConnectorType drc_type,
uint32_t count, uint32_t index);
void spapr_cpu_parse_features(sPAPRMachineState *spapr);
-void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
- sPAPRMachineState *spapr);
/* CPU and LMB DRC release callbacks. */
void spapr_core_release(DeviceState *dev);
diff --git a/include/hw/ppc/spapr_drc.h b/include/hw/ppc/spapr_drc.h
index d9cacb368f..d15e9eb3b4 100644
--- a/include/hw/ppc/spapr_drc.h
+++ b/include/hw/ppc/spapr_drc.h
@@ -217,6 +217,7 @@ typedef struct sPAPRDRConnectorClass {
sPAPRDREntitySense (*dr_entity_sense)(sPAPRDRConnector *drc);
uint32_t (*isolate)(sPAPRDRConnector *drc);
uint32_t (*unisolate)(sPAPRDRConnector *drc);
+ void (*release)(DeviceState *dev);
/* QEMU interfaces for managing hotplug operations */
bool (*release_pending)(sPAPRDRConnector *drc);
@@ -233,7 +234,7 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
uint32_t drc_type_mask);
void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
- int fdt_start_offset, bool coldplug, Error **errp);
+ int fdt_start_offset, Error **errp);
void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp);
#endif /* HW_SPAPR_DRC_H */
diff --git a/include/hw/ppc/spapr_ovec.h b/include/hw/ppc/spapr_ovec.h
index f088833204..0b464e22e7 100644
--- a/include/hw/ppc/spapr_ovec.h
+++ b/include/hw/ppc/spapr_ovec.h
@@ -50,6 +50,7 @@ typedef struct sPAPROptionVector sPAPROptionVector;
#define OV5_DRCONF_MEMORY OV_BIT(2, 2)
#define OV5_FORM1_AFFINITY OV_BIT(5, 0)
#define OV5_HP_EVT OV_BIT(6, 5)
+#define OV5_XIVE_EXPLOIT OV_BIT(23, 7)
/* ISA 3.00 MMU features: */
#define OV5_MMU_BOTH OV_BIT(24, 0) /* Radix and hash */
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 9521013d52..0b475a3596 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -115,6 +115,7 @@ typedef struct VFIODevice {
struct VFIOGroup *group;
char *sysfsdev;
char *name;
+ DeviceState *dev;
int fd;
int type;
bool reset_works;
diff --git a/include/migration/misc.h b/include/migration/misc.h
index 22551216bb..c079b7771b 100644
--- a/include/migration/misc.h
+++ b/include/migration/misc.h
@@ -53,7 +53,6 @@ bool migration_has_finished(MigrationState *);
bool migration_has_failed(MigrationState *);
/* ...and after the device transmission */
bool migration_in_postcopy_after_devices(MigrationState *);
-void migration_only_migratable_set(void);
void migration_global_dump(Monitor *mon);
#endif
diff --git a/include/migration/register.h b/include/migration/register.h
index d9498d95eb..a0f1edd8c7 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -18,7 +18,7 @@ typedef struct SaveVMHandlers {
/* This runs inside the iothread lock. */
SaveStateHandler *save_state;
- void (*cleanup)(void *opaque);
+ void (*save_cleanup)(void *opaque);
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
@@ -33,12 +33,14 @@ typedef struct SaveVMHandlers {
int (*save_live_iterate)(QEMUFile *f, void *opaque);
/* This runs outside the iothread lock! */
- int (*save_live_setup)(QEMUFile *f, void *opaque);
+ int (*save_setup)(QEMUFile *f, void *opaque);
void (*save_live_pending)(QEMUFile *f, void *opaque,
uint64_t threshold_size,
uint64_t *non_postcopiable_pending,
uint64_t *postcopiable_pending);
LoadStateHandler *load_state;
+ int (*load_setup)(QEMUFile *f, void *opaque);
+ int (*load_cleanup)(void *opaque);
} SaveVMHandlers;
int register_savevm_live(DeviceState *dev,
diff --git a/migration/block.c b/migration/block.c
index 86c0b96cd1..9171f60028 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -1008,12 +1008,12 @@ static bool block_is_active(void *opaque)
}
static SaveVMHandlers savevm_block_handlers = {
- .save_live_setup = block_save_setup,
+ .save_setup = block_save_setup,
.save_live_iterate = block_save_iterate,
.save_live_complete_precopy = block_save_complete,
.save_live_pending = block_save_pending,
.load_state = block_load,
- .cleanup = block_migration_cleanup,
+ .save_cleanup = block_migration_cleanup,
.is_active = block_is_active,
};
diff --git a/migration/colo.c b/migration/colo.c
index c4ba4c328b..ef35f00c9a 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -350,7 +350,7 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
/* Disable block migration */
migrate_set_block_enabled(false, &local_err);
qemu_savevm_state_header(fb);
- qemu_savevm_state_begin(fb);
+ qemu_savevm_state_setup(fb);
qemu_mutex_lock_iothread();
qemu_savevm_state_complete_precopy(fb, false, false);
qemu_mutex_unlock_iothread();
diff --git a/migration/migration.c b/migration/migration.c
index 51ccd1a4c5..a0db40d364 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -128,11 +128,6 @@ MigrationState *migrate_get_current(void)
return current_migration;
}
-void migration_only_migratable_set(void)
-{
- migrate_get_current()->only_migratable = true;
-}
-
MigrationIncomingState *migration_incoming_get_current(void)
{
static bool once;
@@ -291,7 +286,6 @@ static void process_incoming_migration_bh(void *opaque)
} else {
runstate_set(global_state_get_runstate());
}
- migrate_decompress_threads_join();
/*
* This must happen after any state changes since as soon as an external
* observer sees this event they might start to prod at the VM assuming
@@ -354,12 +348,8 @@ static void process_incoming_migration_co(void *opaque)
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_FAILED);
error_report("load of migration failed: %s", strerror(-ret));
- migrate_decompress_threads_join();
exit(EXIT_FAILURE);
}
-
- free_xbzrle_decoded_buf();
-
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
qemu_bh_schedule(mis->bh);
}
@@ -368,7 +358,6 @@ void migration_fd_process_incoming(QEMUFile *f)
{
Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
- migrate_decompress_threads_create();
qemu_file_set_blocking(f, false);
qemu_coroutine_enter(co);
}
@@ -835,7 +824,6 @@ static void migrate_fd_cleanup(void *opaque)
}
qemu_mutex_lock_iothread();
- migrate_compress_threads_join();
qemu_fclose(s->to_dst_file);
s->to_dst_file = NULL;
}
@@ -1840,7 +1828,7 @@ static void *migration_thread(void *opaque)
qemu_savevm_send_postcopy_advise(s->to_dst_file);
}
- qemu_savevm_state_begin(s->to_dst_file);
+ qemu_savevm_state_setup(s->to_dst_file);
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
@@ -1998,7 +1986,6 @@ void migrate_fd_connect(MigrationState *s)
}
}
- migrate_compress_threads_create();
qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
QEMU_THREAD_JOINABLE);
s->migration_thread_running = true;
@@ -2057,12 +2044,12 @@ static void migration_instance_init(Object *obj)
static const TypeInfo migration_type = {
.name = TYPE_MIGRATION,
/*
- * NOTE: "migration" itself is not really a device. We used
- * TYPE_DEVICE here only to leverage some existing QDev features
- * like "-global" properties, and HW_COMPAT_* fields (which are
- * finally applied as global properties as well). If one day the
- * global property feature can be migrated from QDev to QObject in
- * general, then we can switch to QObject as well.
+ * NOTE: TYPE_MIGRATION is not really a device, as the object is
+ * not created using qdev_create(), it is not attached to the qdev
+ * device tree, and it is never realized.
+ *
+ * TODO: Make this TYPE_OBJECT once QOM provides something like
+ * TYPE_DEVICE's "-global" properties.
*/
.parent = TYPE_DEVICE,
.class_init = migration_class_init,
diff --git a/migration/ram.c b/migration/ram.c
index 0baa1e0d56..1b08296d1b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -85,11 +85,10 @@ static struct {
QemuMutex lock;
/* it will store a page full of zeros */
uint8_t *zero_target_page;
+ /* buffer used for XBZRLE decoding */
+ uint8_t *decoded_buf;
} XBZRLE;
-/* buffer used for XBZRLE decoding */
-static uint8_t *xbzrle_decoded_buf;
-
static void XBZRLE_cache_lock(void)
{
if (migrate_use_xbzrle())
@@ -307,7 +306,7 @@ static inline void terminate_compression_threads(void)
}
}
-void migrate_compress_threads_join(void)
+static void compress_threads_save_cleanup(void)
{
int i, thread_count;
@@ -330,7 +329,7 @@ void migrate_compress_threads_join(void)
comp_param = NULL;
}
-void migrate_compress_threads_create(void)
+static void compress_threads_save_setup(void)
{
int i, thread_count;
@@ -1350,13 +1349,18 @@ uint64_t ram_bytes_total(void)
return total;
}
-void free_xbzrle_decoded_buf(void)
+static void xbzrle_load_setup(void)
{
- g_free(xbzrle_decoded_buf);
- xbzrle_decoded_buf = NULL;
+ XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
}
-static void ram_migration_cleanup(void *opaque)
+static void xbzrle_load_cleanup(void)
+{
+ g_free(XBZRLE.decoded_buf);
+ XBZRLE.decoded_buf = NULL;
+}
+
+static void ram_save_cleanup(void *opaque)
{
RAMState **rsp = opaque;
RAMBlock *block;
@@ -1386,6 +1390,7 @@ static void ram_migration_cleanup(void *opaque)
}
XBZRLE_cache_unlock();
migration_page_queue_free(*rsp);
+ compress_threads_save_cleanup();
g_free(*rsp);
*rsp = NULL;
}
@@ -1919,6 +1924,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
}
rcu_read_unlock();
+ compress_threads_save_setup();
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
@@ -2078,11 +2084,6 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
int xh_flags;
uint8_t *loaded_data;
- if (!xbzrle_decoded_buf) {
- xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
- }
- loaded_data = xbzrle_decoded_buf;
-
/* extract RLE header */
xh_flags = qemu_get_byte(f);
xh_len = qemu_get_be16(f);
@@ -2096,7 +2097,9 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
error_report("Failed to load XBZRLE page - len overflow!");
return -1;
}
+ loaded_data = XBZRLE.decoded_buf;
/* load data and decode */
+ /* it can change loaded_data to point to an internal buffer */
qemu_get_buffer_in_place(f, &loaded_data, xh_len);
/* decode RLE */
@@ -2230,7 +2233,7 @@ static void wait_for_decompress_done(void)
qemu_mutex_unlock(&decomp_done_lock);
}
-void migrate_decompress_threads_create(void)
+static void compress_threads_load_setup(void)
{
int i, thread_count;
@@ -2254,7 +2257,7 @@ void migrate_decompress_threads_create(void)
}
}
-void migrate_decompress_threads_join(void)
+static void compress_threads_load_cleanup(void)
{
int i, thread_count;
@@ -2310,6 +2313,28 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
}
/**
+ * ram_load_setup: Setup RAM for migration incoming side
+ *
+ * Returns zero to indicate success and negative for error
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: RAMState pointer
+ */
+static int ram_load_setup(QEMUFile *f, void *opaque)
+{
+ xbzrle_load_setup();
+ compress_threads_load_setup();
+ return 0;
+}
+
+static int ram_load_cleanup(void *opaque)
+{
+ xbzrle_load_cleanup();
+ compress_threads_load_cleanup();
+ return 0;
+}
+
+/**
* ram_postcopy_incoming_init: allocate postcopy data structures
*
* Returns 0 for success and negative if there was one error
@@ -2623,13 +2648,15 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
static SaveVMHandlers savevm_ram_handlers = {
- .save_live_setup = ram_save_setup,
+ .save_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
.save_live_complete_postcopy = ram_save_complete,
.save_live_complete_precopy = ram_save_complete,
.save_live_pending = ram_save_pending,
.load_state = ram_load,
- .cleanup = ram_migration_cleanup,
+ .save_cleanup = ram_save_cleanup,
+ .load_setup = ram_load_setup,
+ .load_cleanup = ram_load_cleanup,
};
void ram_mig_init(void)
diff --git a/migration/ram.h b/migration/ram.h
index 6272eb0007..c081fde86c 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -39,15 +39,9 @@ int64_t xbzrle_cache_resize(int64_t new_size);
uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_total(void);
-void migrate_compress_threads_create(void);
-void migrate_compress_threads_join(void);
-void migrate_decompress_threads_create(void);
-void migrate_decompress_threads_join(void);
-
uint64_t ram_pagesize_summary(void);
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
void acct_update_position(QEMUFile *f, size_t size, bool zero);
-void free_xbzrle_decoded_buf(void);
void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
unsigned long pages);
void ram_postcopy_migrated_memory_release(MigrationState *ms);
diff --git a/migration/savevm.c b/migration/savevm.c
index be3f885119..fdd15fa0a7 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -596,7 +596,7 @@ int register_savevm_live(DeviceState *dev,
se->opaque = opaque;
se->vmsd = NULL;
/* if this is a live_savem then set is_ram */
- if (ops->save_live_setup != NULL) {
+ if (ops->save_setup != NULL) {
se->is_ram = 1;
}
@@ -955,14 +955,14 @@ void qemu_savevm_state_header(QEMUFile *f)
}
}
-void qemu_savevm_state_begin(QEMUFile *f)
+void qemu_savevm_state_setup(QEMUFile *f)
{
SaveStateEntry *se;
int ret;
- trace_savevm_state_begin();
+ trace_savevm_state_setup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- if (!se->ops || !se->ops->save_live_setup) {
+ if (!se->ops || !se->ops->save_setup) {
continue;
}
if (se->ops && se->ops->is_active) {
@@ -972,7 +972,7 @@ void qemu_savevm_state_begin(QEMUFile *f)
}
save_section_header(f, se, QEMU_VM_SECTION_START);
- ret = se->ops->save_live_setup(f, se->opaque);
+ ret = se->ops->save_setup(f, se->opaque);
save_section_footer(f, se);
if (ret < 0) {
qemu_file_set_error(f, ret);
@@ -1215,8 +1215,8 @@ void qemu_savevm_state_cleanup(void)
trace_savevm_state_cleanup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- if (se->ops && se->ops->cleanup) {
- se->ops->cleanup(se->opaque);
+ if (se->ops && se->ops->save_cleanup) {
+ se->ops->save_cleanup(se->opaque);
}
}
}
@@ -1241,7 +1241,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
qemu_mutex_unlock_iothread();
qemu_savevm_state_header(f);
- qemu_savevm_state_begin(f);
+ qemu_savevm_state_setup(f);
qemu_mutex_lock_iothread();
while (qemu_file_get_error(f) == 0) {
@@ -1541,7 +1541,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
* got a bad migration state).
*/
migration_incoming_state_destroy();
-
+ qemu_loadvm_state_cleanup();
return NULL;
}
@@ -1901,6 +1901,44 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
return 0;
}
+static int qemu_loadvm_state_setup(QEMUFile *f)
+{
+ SaveStateEntry *se;
+ int ret;
+
+ trace_loadvm_state_setup();
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (!se->ops || !se->ops->load_setup) {
+ continue;
+ }
+ if (se->ops && se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
+
+ ret = se->ops->load_setup(f, se->opaque);
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ error_report("Load state of device %s failed", se->idstr);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+void qemu_loadvm_state_cleanup(void)
+{
+ SaveStateEntry *se;
+
+ trace_loadvm_state_cleanup();
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (se->ops && se->ops->load_cleanup) {
+ se->ops->load_cleanup(se->opaque);
+ }
+ }
+}
+
static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
{
uint8_t section_type;
@@ -1973,6 +2011,10 @@ int qemu_loadvm_state(QEMUFile *f)
return -ENOTSUP;
}
+ if (qemu_loadvm_state_setup(f) != 0) {
+ return -EINVAL;
+ }
+
if (migrate_get_current()->send_configuration) {
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
error_report("Configuration section missing");
@@ -2036,6 +2078,7 @@ int qemu_loadvm_state(QEMUFile *f)
}
}
+ qemu_loadvm_state_cleanup();
cpu_synchronize_all_post_init();
return ret;
diff --git a/migration/savevm.h b/migration/savevm.h
index 5a2ed1161d..295c4a1f2c 100644
--- a/migration/savevm.h
+++ b/migration/savevm.h
@@ -30,7 +30,7 @@
#define QEMU_VM_SECTION_FOOTER 0x7e
bool qemu_savevm_state_blocked(Error **errp);
-void qemu_savevm_state_begin(QEMUFile *f);
+void qemu_savevm_state_setup(QEMUFile *f);
void qemu_savevm_state_header(QEMUFile *f);
int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy);
void qemu_savevm_state_cleanup(void);
@@ -53,5 +53,6 @@ void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name,
uint64_t *length_list);
int qemu_loadvm_state(QEMUFile *f);
+void qemu_loadvm_state_cleanup(void);
#endif
diff --git a/migration/trace-events b/migration/trace-events
index 38345be9c3..cb2c4b5b40 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -7,6 +7,8 @@ qemu_loadvm_state_section_partend(uint32_t section_id) "%u"
qemu_loadvm_state_post_main(int ret) "%d"
qemu_loadvm_state_section_startfull(uint32_t section_id, const char *idstr, uint32_t instance_id, uint32_t version_id) "%u(%s) %u %u"
qemu_savevm_send_packaged(void) ""
+loadvm_state_setup(void) ""
+loadvm_state_cleanup(void) ""
loadvm_handle_cmd_packaged(unsigned int length) "%u"
loadvm_handle_cmd_packaged_main(int ret) "%d"
loadvm_handle_cmd_packaged_received(int ret) "%d"
@@ -32,7 +34,7 @@ savevm_send_open_return_path(void) ""
savevm_send_ping(uint32_t val) "%x"
savevm_send_postcopy_listen(void) ""
savevm_send_postcopy_run(void) ""
-savevm_state_begin(void) ""
+savevm_state_setup(void) ""
savevm_state_header(void) ""
savevm_state_iterate(void) ""
savevm_state_cleanup(void) ""
diff --git a/qemu-options.hx b/qemu-options.hx
index ddab656eb3..76b1c67737 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -85,6 +85,12 @@ Enables or disables NVDIMM support. The default is off.
@item s390-squash-mcss=on|off
Enables or disables squashing subchannels into the default css.
The default is off.
+@item enforce-config-section=on|off
+If @option{enforce-config-section} is set to @var{on}, force migration
+code to send configuration section even if the machine-type sets the
+@option{migration.send-configuration} property to @var{off}.
+NOTE: this parameter is deprecated. Please use @option{-global}
+@option{migration.send-configuration}=@var{on|off} instead.
@end table
ETEXI
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 3a9f0861e7..e6009e70e5 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -283,6 +283,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
* precise in the MSR.
*/
msr |= 0x00100000;
+ env->spr[SPR_BOOKE_ESR] = ESR_FP;
break;
case POWERPC_EXCP_INVAL:
LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index f2f7c531bc..f7a7ea5858 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -2445,6 +2445,7 @@ static int kvm_ppc_register_host_cpu_type(void)
.class_init = kvmppc_host_cpu_class_init,
};
PowerPCCPUClass *pvr_pcc;
+ ObjectClass *oc;
DeviceClass *dc;
int i;
@@ -2455,6 +2456,9 @@ static int kvm_ppc_register_host_cpu_type(void)
type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
type_register(&type_info);
+ oc = object_class_by_name(type_info.name);
+ g_assert(oc);
+
#if defined(TARGET_PPC64)
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
type_info.parent = TYPE_SPAPR_CPU_CORE,
@@ -2474,7 +2478,6 @@ static int kvm_ppc_register_host_cpu_type(void)
dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
if (strcmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
- ObjectClass *oc = OBJECT_CLASS(pvr_pcc);
char *suffix;
ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 69fde65276..bbd37e3c7d 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -147,11 +147,10 @@ static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
}
}
-static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr,
+static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
uint64_t base_addr, uint64_t nls,
hwaddr *raddr, int *psize,
- int *fault_cause, int *prot,
- hwaddr *pte_addr)
+ int *fault_cause, hwaddr *pte_addr)
{
CPUState *cs = CPU(cpu);
uint64_t index, pde;
@@ -177,10 +176,6 @@ static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr,
uint64_t rpn = pde & R_PTE_RPN;
uint64_t mask = (1UL << *psize) - 1;
- if (ppc_radix64_check_prot(cpu, rwx, pde, fault_cause, prot)) {
- return 0; /* Protection Denied Access */
- }
-
/* Or high bits of rpn and low bits to ea to form whole real addr */
*raddr = (rpn & ~mask) | (eaddr & mask);
*pte_addr = base_addr + (index * sizeof(pde));
@@ -188,9 +183,8 @@ static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr,
}
/* Next Level of Radix Tree */
- return ppc_radix64_walk_tree(cpu, rwx, eaddr, pde & R_PDE_NLB,
- pde & R_PDE_NLS, raddr, psize,
- fault_cause, prot, pte_addr);
+ return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS,
+ raddr, psize, fault_cause, pte_addr);
}
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
@@ -241,11 +235,11 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
/* Walk Radix Tree from Process Table Entry to Convert EA to RA */
page_size = PRTBE_R_GET_RTS(prtbe0);
- pte = ppc_radix64_walk_tree(cpu, rwx, eaddr & R_EADDR_MASK,
+ pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
- &raddr, &page_size, &fault_cause, &prot,
- &pte_addr);
- if (!pte) {
+ &raddr, &page_size, &fault_cause, &pte_addr);
+ if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) {
+ /* Couldn't get pte or access denied due to protection */
ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
return 1;
}
@@ -257,3 +251,48 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
prot, mmu_idx, 1UL << page_size);
return 0;
}
+
+hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ hwaddr raddr, pte_addr;
+ uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte;
+ int page_size, fault_cause = 0;
+
+ /* Handle Real Mode */
+ if (msr_dr == 0) {
+ /* In real mode top 4 effective addr bits (mostly) ignored */
+ return eaddr & 0x0FFFFFFFFFFFFFFFULL;
+ }
+
+ /* Virtual Mode Access - get the fully qualified address */
+ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
+ return -1;
+ }
+
+ /* Get Process Table */
+ patbe = vhc->get_patbe(cpu->vhyp);
+
+ /* Index Process Table by PID to Find Corresponding Process Table Entry */
+ offset = pid * sizeof(struct prtb_entry);
+ size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12);
+ if (offset >= size) {
+ /* offset exceeds size of the process table */
+ return -1;
+ }
+ prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset);
+
+ /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
+ page_size = PRTBE_R_GET_RTS(prtbe0);
+ pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
+ prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
+ &raddr, &page_size, &fault_cause, &pte_addr);
+ if (!pte) {
+ return -1;
+ }
+
+ return raddr & TARGET_PAGE_MASK;
+}
diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-radix64.h
index 1d5c7cfea5..0ecf063a17 100644
--- a/target/ppc/mmu-radix64.h
+++ b/target/ppc/mmu-radix64.h
@@ -46,6 +46,7 @@
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
int mmu_idx);
+hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
static inline int ppc_radix64_get_prot_eaa(uint64_t pte)
{
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index 65d1c8692d..b7b9088842 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -30,6 +30,7 @@
#include "helper_regs.h"
#include "qemu/error-report.h"
#include "mmu-book3s-v3.h"
+#include "mmu-radix64.h"
//#define DEBUG_MMU
//#define DEBUG_BATS
@@ -1432,7 +1433,7 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return ppc_hash64_get_phys_page_debug(cpu, addr);
case POWERPC_MMU_VER_3_00:
if (ppc64_radix_guest(ppc_env_get_cpu(env))) {
- /* TODO - Unsupported */
+ return ppc_radix64_get_phys_page_debug(cpu, addr);
} else {
return ppc_hash64_get_phys_page_debug(cpu, addr);
}
diff --git a/vl.c b/vl.c
index d17c863409..f7560de622 100644
--- a/vl.c
+++ b/vl.c
@@ -3962,7 +3962,7 @@ int main(int argc, char **argv, char **envp)
*
* "-global migration.only-migratable=true"
*/
- migration_only_migratable_set();
+ qemu_global_option("migration.only-migratable=true");
break;
case QEMU_OPTION_nodefaults:
has_defaults = 0;
@@ -4418,6 +4418,18 @@ int main(int argc, char **argv, char **envp)
configure_accelerator(current_machine);
+ /*
+ * Register all the global properties, including accel properties,
+ * machine properties, and user-specified ones.
+ */
+ register_global_properties(current_machine);
+
+ /*
+ * Migration object can only be created after global properties
+ * are applied correctly.
+ */
+ migration_object_init();
+
if (qtest_chrdev) {
qtest_init(qtest_chrdev, qtest_log, &error_fatal);
}
@@ -4601,18 +4613,6 @@ int main(int argc, char **argv, char **envp)
exit (i == 1 ? 1 : 0);
}
- /*
- * Register all the global properties, including accel properties,
- * machine properties, and user-specified ones.
- */
- register_global_properties(current_machine);
-
- /*
- * Migration object can only be created after global properties
- * are applied correctly.
- */
- migration_object_init();
-
/* This checkpoint is required by replay to separate prior clock
reading from the other reads, because timer polling functions query
clock values from the log. */