summaryrefslogtreecommitdiffstats
path: root/hw
diff options
context:
space:
mode:
authorStefan Hajnoczi2022-12-04 17:00:26 +0100
committerStefan Hajnoczi2022-12-04 17:00:26 +0100
commit42f3253c349ac2f03e73d2dab0c7456ff4f0c9fc (patch)
tree4a5bafa84bb8238b6af420bc9cbbc01553d3f05c /hw
parentUpdate VERSION for v7.2.0-rc3 (diff)
parentinclude/hw: VM state takes precedence in virtio_device_should_start (diff)
downloadqemu-42f3253c349ac2f03e73d2dab0c7456ff4f0c9fc.tar.gz
qemu-42f3253c349ac2f03e73d2dab0c7456ff4f0c9fc.tar.xz
qemu-42f3253c349ac2f03e73d2dab0c7456ff4f0c9fc.zip
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
virtio: regression fix Fixes regression with migration and vsock, as fixing that exposes some known issues in vhost user cleanup, this attempts to fix those as well. More work on vhost user is needed :) Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmOIWaEPHG1zdEByZWRo # YXQuY29tAAoJECgfDbjSjVRp+RQH/2PVAjD/GA3zF5F3Z07vH51c55T6tluZ85c3 # 4u66SSkF5JR1hATCujYCtrt9V0mnqhmhhm4gJH5xcsynFjjyIXd2dDrTFRpCtRgn # icXOmYCc9pCu8XsluJnWvY/5r/KEDxqmGVE8Kyhz551QjvsBkezhI9x9vhJZJLCn # Xn1XQ/3jpUcQLwasu8AxZb0IDW8WdCtonbke6xIyMzOYGR2bnRdXlDXVVG1zJ/SZ # eS3HUad71VekhfzWq0fx8yEJnfvbes9vo007y8rOGdHOcMneWGAie52W1dOBhclh # Zt56zID55t1USEwlPxkZSj7UXNbVl7Uz/XU5ElN0yTesttP4Iq0= # =ZkaX # -----END PGP SIGNATURE----- # gpg: Signature made Thu 01 Dec 2022 02:37:05 EST # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [full] # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: include/hw: VM state takes precedence in virtio_device_should_start hw/virtio: generalise CHR_EVENT_CLOSED handling hw/virtio: add started_vu status field to vhost-user-gpio vhost: enable vrings in vhost_dev_start() for vhost-user devices tests/qtests: override "force-legacy" for gpio virtio-mmio tests Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'hw')
-rw-r--r--hw/block/vhost-user-blk.c45
-rw-r--r--hw/net/vhost_net.c8
-rw-r--r--hw/scsi/vhost-scsi-common.c4
-rw-r--r--hw/virtio/trace-events4
-rw-r--r--hw/virtio/vhost-user-fs.c4
-rw-r--r--hw/virtio/vhost-user-gpio.c26
-rw-r--r--hw/virtio/vhost-user-i2c.c4
-rw-r--r--hw/virtio/vhost-user-rng.c4
-rw-r--r--hw/virtio/vhost-user.c71
-rw-r--r--hw/virtio/vhost-vsock-common.c4
-rw-r--r--hw/virtio/vhost.c44
11 files changed, 149 insertions, 69 deletions
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index 0d5190accf..aff4d2b8cb 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -178,7 +178,7 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
}
s->dev.vq_index_end = s->dev.nvqs;
- ret = vhost_dev_start(&s->dev, vdev);
+ ret = vhost_dev_start(&s->dev, vdev, true);
if (ret < 0) {
error_setg_errno(errp, -ret, "Error starting vhost");
goto err_guest_notifiers;
@@ -213,7 +213,7 @@ static void vhost_user_blk_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&s->dev, vdev);
+ vhost_dev_stop(&s->dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
if (ret < 0) {
@@ -369,17 +369,10 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
vhost_user_blk_stop(vdev);
vhost_dev_cleanup(&s->dev);
-}
-static void vhost_user_blk_chr_closed_bh(void *opaque)
-{
- DeviceState *dev = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserBlk *s = VHOST_USER_BLK(vdev);
-
- vhost_user_blk_disconnect(dev);
+ /* Re-instate the event handler for new connections */
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
- NULL, opaque, NULL, true);
+ NULL, dev, NULL, true);
}
static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
@@ -398,33 +391,9 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
}
break;
case CHR_EVENT_CLOSED:
- if (!runstate_check(RUN_STATE_SHUTDOWN)) {
- /*
- * A close event may happen during a read/write, but vhost
- * code assumes the vhost_dev remains setup, so delay the
- * stop & clear.
- */
- AioContext *ctx = qemu_get_current_aio_context();
-
- qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL,
- NULL, NULL, false);
- aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque);
-
- /*
- * Move vhost device to the stopped state. The vhost-user device
- * will be clean up and disconnected in BH. This can be useful in
- * the vhost migration code. If disconnect was caught there is an
- * option for the general vhost code to get the dev state without
- * knowing its type (in this case vhost-user).
- *
- * FIXME: this is sketchy to be reaching into vhost_dev
- * now because we are forcing something that implies we
- * have executed vhost_dev_stop() but that won't happen
- * until vhost_user_blk_stop() gets called from the bh.
- * Really this state check should be tracked locally.
- */
- s->dev.started = false;
- }
+ /* defer close until later to avoid circular close */
+ vhost_user_async_close(dev, &s->chardev, &s->dev,
+ vhost_user_blk_disconnect);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 26e4930676..043058ff43 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -259,7 +259,7 @@ static int vhost_net_start_one(struct vhost_net *net,
goto fail_notifiers;
}
- r = vhost_dev_start(&net->dev, dev);
+ r = vhost_dev_start(&net->dev, dev, false);
if (r < 0) {
goto fail_start;
}
@@ -308,7 +308,7 @@ fail:
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, true);
}
- vhost_dev_stop(&net->dev, dev);
+ vhost_dev_stop(&net->dev, dev, false);
fail_start:
vhost_dev_disable_notifiers(&net->dev, dev);
fail_notifiers:
@@ -329,7 +329,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, true);
}
- vhost_dev_stop(&net->dev, dev);
+ vhost_dev_stop(&net->dev, dev, false);
if (net->nc->info->stop) {
net->nc->info->stop(net->nc);
}
@@ -606,7 +606,7 @@ err_start:
assert(r >= 0);
}
- vhost_dev_stop(&net->dev, vdev);
+ vhost_dev_stop(&net->dev, vdev, false);
return r;
}
diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c
index 767f827e55..18ea5dcfa1 100644
--- a/hw/scsi/vhost-scsi-common.c
+++ b/hw/scsi/vhost-scsi-common.c
@@ -68,7 +68,7 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc)
goto err_guest_notifiers;
}
- ret = vhost_dev_start(&vsc->dev, vdev);
+ ret = vhost_dev_start(&vsc->dev, vdev, true);
if (ret < 0) {
error_report("Error start vhost dev");
goto err_guest_notifiers;
@@ -101,7 +101,7 @@ void vhost_scsi_common_stop(VHostSCSICommon *vsc)
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret = 0;
- vhost_dev_stop(&vsc->dev, vdev);
+ vhost_dev_stop(&vsc->dev, vdev, true);
if (k->set_guest_notifiers) {
ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 820dadc26c..14fc5b9bb2 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -9,8 +9,8 @@ vhost_section(const char *name) "%s"
vhost_reject_section(const char *name, int d) "%s:%d"
vhost_iotlb_miss(void *dev, int step) "%p step %d"
vhost_dev_cleanup(void *dev) "%p"
-vhost_dev_start(void *dev, const char *name) "%p:%s"
-vhost_dev_stop(void *dev, const char *name) "%p:%s"
+vhost_dev_start(void *dev, const char *name, bool vrings) "%p:%s vrings:%d"
+vhost_dev_stop(void *dev, const char *name, bool vrings) "%p:%s vrings:%d"
# vhost-user.c
diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
index dc4014cdef..d97b179e6f 100644
--- a/hw/virtio/vhost-user-fs.c
+++ b/hw/virtio/vhost-user-fs.c
@@ -76,7 +76,7 @@ static void vuf_start(VirtIODevice *vdev)
}
fs->vhost_dev.acked_features = vdev->guest_features;
- ret = vhost_dev_start(&fs->vhost_dev, vdev);
+ ret = vhost_dev_start(&fs->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost: %d", -ret);
goto err_guest_notifiers;
@@ -110,7 +110,7 @@ static void vuf_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&fs->vhost_dev, vdev);
+ vhost_dev_stop(&fs->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false);
if (ret < 0) {
diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c
index 5851cb3bc9..b7b82a1099 100644
--- a/hw/virtio/vhost-user-gpio.c
+++ b/hw/virtio/vhost-user-gpio.c
@@ -81,11 +81,12 @@ static int vu_gpio_start(VirtIODevice *vdev)
*/
vhost_ack_features(&gpio->vhost_dev, feature_bits, vdev->guest_features);
- ret = vhost_dev_start(&gpio->vhost_dev, vdev);
+ ret = vhost_dev_start(&gpio->vhost_dev, vdev, false);
if (ret < 0) {
error_report("Error starting vhost-user-gpio: %d", ret);
goto err_guest_notifiers;
}
+ gpio->started_vu = true;
/*
* guest_notifier_mask/pending not used yet, so just unmask
@@ -126,20 +127,16 @@ static void vu_gpio_stop(VirtIODevice *vdev)
struct vhost_dev *vhost_dev = &gpio->vhost_dev;
int ret;
- if (!k->set_guest_notifiers) {
+ if (!gpio->started_vu) {
return;
}
+ gpio->started_vu = false;
- /*
- * We can call vu_gpio_stop multiple times, for example from
- * vm_state_notify and the final object finalisation. Check we
- * aren't already stopped before doing so.
- */
- if (!vhost_dev_is_started(vhost_dev)) {
+ if (!k->set_guest_notifiers) {
return;
}
- vhost_dev_stop(vhost_dev, vdev);
+ vhost_dev_stop(vhost_dev, vdev, false);
ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
if (ret < 0) {
@@ -236,6 +233,8 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp)
return 0;
}
+static void vu_gpio_event(void *opaque, QEMUChrEvent event);
+
static void vu_gpio_disconnect(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -248,6 +247,11 @@ static void vu_gpio_disconnect(DeviceState *dev)
vu_gpio_stop(vdev);
vhost_dev_cleanup(&gpio->vhost_dev);
+
+ /* Re-instate the event handler for new connections */
+ qemu_chr_fe_set_handlers(&gpio->chardev,
+ NULL, NULL, vu_gpio_event,
+ NULL, dev, NULL, true);
}
static void vu_gpio_event(void *opaque, QEMUChrEvent event)
@@ -265,7 +269,9 @@ static void vu_gpio_event(void *opaque, QEMUChrEvent event)
}
break;
case CHR_EVENT_CLOSED:
- vu_gpio_disconnect(dev);
+ /* defer close until later to avoid circular close */
+ vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev,
+ vu_gpio_disconnect);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c
index 1c9f3d20dc..dc5c828ba6 100644
--- a/hw/virtio/vhost-user-i2c.c
+++ b/hw/virtio/vhost-user-i2c.c
@@ -46,7 +46,7 @@ static void vu_i2c_start(VirtIODevice *vdev)
i2c->vhost_dev.acked_features = vdev->guest_features;
- ret = vhost_dev_start(&i2c->vhost_dev, vdev);
+ ret = vhost_dev_start(&i2c->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost-user-i2c: %d", -ret);
goto err_guest_notifiers;
@@ -80,7 +80,7 @@ static void vu_i2c_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&i2c->vhost_dev, vdev);
+ vhost_dev_stop(&i2c->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false);
if (ret < 0) {
diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c
index f9084cde58..201a39e220 100644
--- a/hw/virtio/vhost-user-rng.c
+++ b/hw/virtio/vhost-user-rng.c
@@ -47,7 +47,7 @@ static void vu_rng_start(VirtIODevice *vdev)
}
rng->vhost_dev.acked_features = vdev->guest_features;
- ret = vhost_dev_start(&rng->vhost_dev, vdev);
+ ret = vhost_dev_start(&rng->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost-user-rng: %d", -ret);
goto err_guest_notifiers;
@@ -81,7 +81,7 @@ static void vu_rng_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&rng->vhost_dev, vdev);
+ vhost_dev_stop(&rng->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false);
if (ret < 0) {
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index abe23d4ebe..8f635844af 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -21,6 +21,7 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
+#include "sysemu/runstate.h"
#include "sysemu/cryptodev.h"
#include "migration/migration.h"
#include "migration/postcopy-ram.h"
@@ -2670,6 +2671,76 @@ void vhost_user_cleanup(VhostUserState *user)
user->chr = NULL;
}
+
+typedef struct {
+ vu_async_close_fn cb;
+ DeviceState *dev;
+ CharBackend *cd;
+ struct vhost_dev *vhost;
+} VhostAsyncCallback;
+
+static void vhost_user_async_close_bh(void *opaque)
+{
+ VhostAsyncCallback *data = opaque;
+ struct vhost_dev *vhost = data->vhost;
+
+ /*
+ * If the vhost_dev has been cleared in the meantime there is
+ * nothing left to do as some other path has completed the
+ * cleanup.
+ */
+ if (vhost->vdev) {
+ data->cb(data->dev);
+ }
+
+ g_free(data);
+}
+
+/*
+ * We only schedule the work if the machine is running. If suspended
+ * we want to keep all the in-flight data as is for migration
+ * purposes.
+ */
+void vhost_user_async_close(DeviceState *d,
+ CharBackend *chardev, struct vhost_dev *vhost,
+ vu_async_close_fn cb)
+{
+ if (!runstate_check(RUN_STATE_SHUTDOWN)) {
+ /*
+ * A close event may happen during a read/write, but vhost
+ * code assumes the vhost_dev remains setup, so delay the
+ * stop & clear.
+ */
+ AioContext *ctx = qemu_get_current_aio_context();
+ VhostAsyncCallback *data = g_new0(VhostAsyncCallback, 1);
+
+ /* Save data for the callback */
+ data->cb = cb;
+ data->dev = d;
+ data->cd = chardev;
+ data->vhost = vhost;
+
+ /* Disable any further notifications on the chardev */
+ qemu_chr_fe_set_handlers(chardev,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ false);
+
+ aio_bh_schedule_oneshot(ctx, vhost_user_async_close_bh, data);
+
+ /*
+ * Move vhost device to the stopped state. The vhost-user device
+ * will be clean up and disconnected in BH. This can be useful in
+ * the vhost migration code. If disconnect was caught there is an
+ * option for the general vhost code to get the dev state without
+ * knowing its type (in this case vhost-user).
+ *
+ * Note if the vhost device is fully cleared by the time we
+ * execute the bottom half we won't continue with the cleanup.
+ */
+ vhost->started = false;
+ }
+}
+
static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
{
if (!virtio_has_feature(dev->protocol_features,
diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index a67a275de2..d21c72b401 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -70,7 +70,7 @@ int vhost_vsock_common_start(VirtIODevice *vdev)
}
vvc->vhost_dev.acked_features = vdev->guest_features;
- ret = vhost_dev_start(&vvc->vhost_dev, vdev);
+ ret = vhost_dev_start(&vvc->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost: %d", -ret);
goto err_guest_notifiers;
@@ -105,7 +105,7 @@ void vhost_vsock_common_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&vvc->vhost_dev, vdev);
+ vhost_dev_stop(&vvc->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false);
if (ret < 0) {
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index d1c4c20b8c..7fb008bc9e 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1777,15 +1777,36 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
return 0;
}
+static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable)
+{
+ if (!hdev->vhost_ops->vhost_set_vring_enable) {
+ return 0;
+ }
+
+ /*
+ * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not
+ * been negotiated, the rings start directly in the enabled state, and
+ * .vhost_set_vring_enable callback will fail since
+ * VHOST_USER_SET_VRING_ENABLE is not supported.
+ */
+ if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER &&
+ !virtio_has_feature(hdev->backend_features,
+ VHOST_USER_F_PROTOCOL_FEATURES)) {
+ return 0;
+ }
+
+ return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable);
+}
+
/* Host notifiers must be enabled at this point. */
-int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
int i, r;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
- trace_vhost_dev_start(hdev, vdev->name);
+ trace_vhost_dev_start(hdev, vdev->name, vrings);
vdev->vhost_started = true;
hdev->started = true;
@@ -1830,10 +1851,16 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
goto fail_log;
}
}
+ if (vrings) {
+ r = vhost_dev_set_vring_enable(hdev, true);
+ if (r) {
+ goto fail_log;
+ }
+ }
if (hdev->vhost_ops->vhost_dev_start) {
r = hdev->vhost_ops->vhost_dev_start(hdev, true);
if (r) {
- goto fail_log;
+ goto fail_start;
}
}
if (vhost_dev_has_iommu(hdev) &&
@@ -1848,6 +1875,10 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
}
}
return 0;
+fail_start:
+ if (vrings) {
+ vhost_dev_set_vring_enable(hdev, false);
+ }
fail_log:
vhost_log_put(hdev, false);
fail_vq:
@@ -1866,18 +1897,21 @@ fail_features:
}
/* Host notifiers must be enabled at this point. */
-void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
+void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
int i;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
- trace_vhost_dev_stop(hdev, vdev->name);
+ trace_vhost_dev_stop(hdev, vdev->name, vrings);
if (hdev->vhost_ops->vhost_dev_start) {
hdev->vhost_ops->vhost_dev_start(hdev, false);
}
+ if (vrings) {
+ vhost_dev_set_vring_enable(hdev, false);
+ }
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_stop(hdev,
vdev,