summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--backends/cryptodev-vhost.c4
-rw-r--r--backends/vhost-user.c4
-rw-r--r--hw/block/vhost-user-blk.c45
-rw-r--r--hw/net/vhost_net.c8
-rw-r--r--hw/nvme/ctrl.c182
-rw-r--r--hw/scsi/vhost-scsi-common.c4
-rw-r--r--hw/virtio/trace-events4
-rw-r--r--hw/virtio/vhost-user-fs.c4
-rw-r--r--hw/virtio/vhost-user-gpio.c26
-rw-r--r--hw/virtio/vhost-user-i2c.c4
-rw-r--r--hw/virtio/vhost-user-rng.c4
-rw-r--r--hw/virtio/vhost-user.c71
-rw-r--r--hw/virtio/vhost-vsock-common.c4
-rw-r--r--hw/virtio/vhost.c44
-rw-r--r--include/hw/virtio/vhost-user-gpio.h10
-rw-r--r--include/hw/virtio/vhost-user.h18
-rw-r--r--include/hw/virtio/vhost.h6
-rw-r--r--include/hw/virtio/virtio.h23
-rw-r--r--target/i386/tcg/decode-new.c.inc3
-rw-r--r--target/i386/tcg/sysemu/excp_helper.c34
-rw-r--r--tests/qtest/libqos/virtio-gpio.c3
21 files changed, 277 insertions, 228 deletions
diff --git a/backends/cryptodev-vhost.c b/backends/cryptodev-vhost.c
index bc13e466b4..572f87b3be 100644
--- a/backends/cryptodev-vhost.c
+++ b/backends/cryptodev-vhost.c
@@ -94,7 +94,7 @@ cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto,
goto fail_notifiers;
}
- r = vhost_dev_start(&crypto->dev, dev);
+ r = vhost_dev_start(&crypto->dev, dev, false);
if (r < 0) {
goto fail_start;
}
@@ -111,7 +111,7 @@ static void
cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto,
VirtIODevice *dev)
{
- vhost_dev_stop(&crypto->dev, dev);
+ vhost_dev_stop(&crypto->dev, dev, false);
vhost_dev_disable_notifiers(&crypto->dev, dev);
}
diff --git a/backends/vhost-user.c b/backends/vhost-user.c
index 5dedb2d987..7bfcaef976 100644
--- a/backends/vhost-user.c
+++ b/backends/vhost-user.c
@@ -85,7 +85,7 @@ vhost_user_backend_start(VhostUserBackend *b)
}
b->dev.acked_features = b->vdev->guest_features;
- ret = vhost_dev_start(&b->dev, b->vdev);
+ ret = vhost_dev_start(&b->dev, b->vdev, true);
if (ret < 0) {
error_report("Error start vhost dev");
goto err_guest_notifiers;
@@ -120,7 +120,7 @@ vhost_user_backend_stop(VhostUserBackend *b)
return;
}
- vhost_dev_stop(&b->dev, b->vdev);
+ vhost_dev_stop(&b->dev, b->vdev, true);
if (k->set_guest_notifiers) {
ret = k->set_guest_notifiers(qbus->parent,
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index 0d5190accf..aff4d2b8cb 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -178,7 +178,7 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
}
s->dev.vq_index_end = s->dev.nvqs;
- ret = vhost_dev_start(&s->dev, vdev);
+ ret = vhost_dev_start(&s->dev, vdev, true);
if (ret < 0) {
error_setg_errno(errp, -ret, "Error starting vhost");
goto err_guest_notifiers;
@@ -213,7 +213,7 @@ static void vhost_user_blk_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&s->dev, vdev);
+ vhost_dev_stop(&s->dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
if (ret < 0) {
@@ -369,17 +369,10 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
vhost_user_blk_stop(vdev);
vhost_dev_cleanup(&s->dev);
-}
-static void vhost_user_blk_chr_closed_bh(void *opaque)
-{
- DeviceState *dev = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserBlk *s = VHOST_USER_BLK(vdev);
-
- vhost_user_blk_disconnect(dev);
+ /* Re-instate the event handler for new connections */
qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
- NULL, opaque, NULL, true);
+ NULL, dev, NULL, true);
}
static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
@@ -398,33 +391,9 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
}
break;
case CHR_EVENT_CLOSED:
- if (!runstate_check(RUN_STATE_SHUTDOWN)) {
- /*
- * A close event may happen during a read/write, but vhost
- * code assumes the vhost_dev remains setup, so delay the
- * stop & clear.
- */
- AioContext *ctx = qemu_get_current_aio_context();
-
- qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL,
- NULL, NULL, false);
- aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque);
-
- /*
- * Move vhost device to the stopped state. The vhost-user device
- * will be clean up and disconnected in BH. This can be useful in
- * the vhost migration code. If disconnect was caught there is an
- * option for the general vhost code to get the dev state without
- * knowing its type (in this case vhost-user).
- *
- * FIXME: this is sketchy to be reaching into vhost_dev
- * now because we are forcing something that implies we
- * have executed vhost_dev_stop() but that won't happen
- * until vhost_user_blk_stop() gets called from the bh.
- * Really this state check should be tracked locally.
- */
- s->dev.started = false;
- }
+ /* defer close until later to avoid circular close */
+ vhost_user_async_close(dev, &s->chardev, &s->dev,
+ vhost_user_blk_disconnect);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 26e4930676..043058ff43 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -259,7 +259,7 @@ static int vhost_net_start_one(struct vhost_net *net,
goto fail_notifiers;
}
- r = vhost_dev_start(&net->dev, dev);
+ r = vhost_dev_start(&net->dev, dev, false);
if (r < 0) {
goto fail_start;
}
@@ -308,7 +308,7 @@ fail:
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, true);
}
- vhost_dev_stop(&net->dev, dev);
+ vhost_dev_stop(&net->dev, dev, false);
fail_start:
vhost_dev_disable_notifiers(&net->dev, dev);
fail_notifiers:
@@ -329,7 +329,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, true);
}
- vhost_dev_stop(&net->dev, dev);
+ vhost_dev_stop(&net->dev, dev, false);
if (net->nc->info->stop) {
net->nc->info->stop(net->nc);
}
@@ -606,7 +606,7 @@ err_start:
assert(r >= 0);
}
- vhost_dev_stop(&net->dev, vdev);
+ vhost_dev_stop(&net->dev, vdev, false);
return r;
}
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index ac3885ce50..e54276dc1d 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -2329,7 +2329,6 @@ typedef struct NvmeDSMAIOCB {
BlockAIOCB common;
BlockAIOCB *aiocb;
NvmeRequest *req;
- QEMUBH *bh;
int ret;
NvmeDsmRange *range;
@@ -2351,7 +2350,7 @@ static void nvme_dsm_cancel(BlockAIOCB *aiocb)
} else {
/*
* We only reach this if nvme_dsm_cancel() has already been called or
- * the command ran to completion and nvme_dsm_bh is scheduled to run.
+ * the command ran to completion.
*/
assert(iocb->idx == iocb->nr);
}
@@ -2362,17 +2361,6 @@ static const AIOCBInfo nvme_dsm_aiocb_info = {
.cancel_async = nvme_dsm_cancel,
};
-static void nvme_dsm_bh(void *opaque)
-{
- NvmeDSMAIOCB *iocb = opaque;
-
- iocb->common.cb(iocb->common.opaque, iocb->ret);
-
- qemu_bh_delete(iocb->bh);
- iocb->bh = NULL;
- qemu_aio_unref(iocb);
-}
-
static void nvme_dsm_cb(void *opaque, int ret);
static void nvme_dsm_md_cb(void *opaque, int ret)
@@ -2384,16 +2372,10 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
uint64_t slba;
uint32_t nlb;
- if (ret < 0) {
- iocb->ret = ret;
+ if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
goto done;
}
- if (!ns->lbaf.ms) {
- nvme_dsm_cb(iocb, 0);
- return;
- }
-
range = &iocb->range[iocb->idx - 1];
slba = le64_to_cpu(range->slba);
nlb = le32_to_cpu(range->nlb);
@@ -2406,7 +2388,6 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_ZERO);
if (ret) {
if (ret < 0) {
- iocb->ret = ret;
goto done;
}
@@ -2420,8 +2401,7 @@ static void nvme_dsm_md_cb(void *opaque, int ret)
return;
done:
- iocb->aiocb = NULL;
- qemu_bh_schedule(iocb->bh);
+ nvme_dsm_cb(iocb, ret);
}
static void nvme_dsm_cb(void *opaque, int ret)
@@ -2434,7 +2414,9 @@ static void nvme_dsm_cb(void *opaque, int ret)
uint64_t slba;
uint32_t nlb;
- if (ret < 0) {
+ if (iocb->ret < 0) {
+ goto done;
+ } else if (ret < 0) {
iocb->ret = ret;
goto done;
}
@@ -2468,7 +2450,8 @@ next:
done:
iocb->aiocb = NULL;
- qemu_bh_schedule(iocb->bh);
+ iocb->common.cb(iocb->common.opaque, iocb->ret);
+ qemu_aio_unref(iocb);
}
static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
@@ -2486,7 +2469,6 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
nvme_misc_cb, req);
iocb->req = req;
- iocb->bh = qemu_bh_new(nvme_dsm_bh, iocb);
iocb->ret = 0;
iocb->range = g_new(NvmeDsmRange, nr);
iocb->nr = nr;
@@ -2570,7 +2552,6 @@ typedef struct NvmeCopyAIOCB {
BlockAIOCB common;
BlockAIOCB *aiocb;
NvmeRequest *req;
- QEMUBH *bh;
int ret;
void *ranges;
@@ -2608,9 +2589,8 @@ static const AIOCBInfo nvme_copy_aiocb_info = {
.cancel_async = nvme_copy_cancel,
};
-static void nvme_copy_bh(void *opaque)
+static void nvme_copy_done(NvmeCopyAIOCB *iocb)
{
- NvmeCopyAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
NvmeNamespace *ns = req->ns;
BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk);
@@ -2622,9 +2602,6 @@ static void nvme_copy_bh(void *opaque)
qemu_iovec_destroy(&iocb->iov);
g_free(iocb->bounce);
- qemu_bh_delete(iocb->bh);
- iocb->bh = NULL;
-
if (iocb->ret < 0) {
block_acct_failed(stats, &iocb->acct.read);
block_acct_failed(stats, &iocb->acct.write);
@@ -2637,7 +2614,7 @@ static void nvme_copy_bh(void *opaque)
qemu_aio_unref(iocb);
}
-static void nvme_copy_cb(void *opaque, int ret);
+static void nvme_do_copy(NvmeCopyAIOCB *iocb);
static void nvme_copy_source_range_parse_format0(void *ranges, int idx,
uint64_t *slba, uint32_t *nlb,
@@ -2749,7 +2726,7 @@ static void nvme_copy_out_completed_cb(void *opaque, int ret)
iocb->idx++;
iocb->slba += nlb;
out:
- nvme_copy_cb(iocb, iocb->ret);
+ nvme_do_copy(iocb);
}
static void nvme_copy_out_cb(void *opaque, int ret)
@@ -2761,16 +2738,8 @@ static void nvme_copy_out_cb(void *opaque, int ret)
size_t mlen;
uint8_t *mbounce;
- if (ret < 0) {
- iocb->ret = ret;
+ if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
goto out;
- } else if (iocb->ret < 0) {
- goto out;
- }
-
- if (!ns->lbaf.ms) {
- nvme_copy_out_completed_cb(iocb, 0);
- return;
}
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL,
@@ -2789,7 +2758,7 @@ static void nvme_copy_out_cb(void *opaque, int ret)
return;
out:
- nvme_copy_cb(iocb, ret);
+ nvme_copy_out_completed_cb(iocb, ret);
}
static void nvme_copy_in_completed_cb(void *opaque, int ret)
@@ -2883,15 +2852,9 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret)
invalid:
req->status = status;
- iocb->aiocb = NULL;
- if (iocb->bh) {
- qemu_bh_schedule(iocb->bh);
- }
-
- return;
-
+ iocb->ret = -1;
out:
- nvme_copy_cb(iocb, ret);
+ nvme_do_copy(iocb);
}
static void nvme_copy_in_cb(void *opaque, int ret)
@@ -2902,16 +2865,8 @@ static void nvme_copy_in_cb(void *opaque, int ret)
uint64_t slba;
uint32_t nlb;
- if (ret < 0) {
- iocb->ret = ret;
+ if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
goto out;
- } else if (iocb->ret < 0) {
- goto out;
- }
-
- if (!ns->lbaf.ms) {
- nvme_copy_in_completed_cb(iocb, 0);
- return;
}
nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba,
@@ -2927,12 +2882,11 @@ static void nvme_copy_in_cb(void *opaque, int ret)
return;
out:
- nvme_copy_cb(iocb, iocb->ret);
+ nvme_copy_in_completed_cb(iocb, ret);
}
-static void nvme_copy_cb(void *opaque, int ret)
+static void nvme_do_copy(NvmeCopyAIOCB *iocb)
{
- NvmeCopyAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
NvmeNamespace *ns = req->ns;
uint64_t slba;
@@ -2940,10 +2894,7 @@ static void nvme_copy_cb(void *opaque, int ret)
size_t len;
uint16_t status;
- if (ret < 0) {
- iocb->ret = ret;
- goto done;
- } else if (iocb->ret < 0) {
+ if (iocb->ret < 0) {
goto done;
}
@@ -2990,14 +2941,11 @@ static void nvme_copy_cb(void *opaque, int ret)
invalid:
req->status = status;
+ iocb->ret = -1;
done:
- iocb->aiocb = NULL;
- if (iocb->bh) {
- qemu_bh_schedule(iocb->bh);
- }
+ nvme_copy_done(iocb);
}
-
static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
{
NvmeNamespace *ns = req->ns;
@@ -3067,7 +3015,6 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
}
iocb->req = req;
- iocb->bh = qemu_bh_new(nvme_copy_bh, iocb);
iocb->ret = 0;
iocb->nr = nr;
iocb->idx = 0;
@@ -3084,7 +3031,7 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
BLOCK_ACCT_WRITE);
req->aiocb = &iocb->common;
- nvme_copy_cb(iocb, 0);
+ nvme_do_copy(iocb);
return NVME_NO_COMPLETE;
@@ -3160,7 +3107,6 @@ typedef struct NvmeFlushAIOCB {
BlockAIOCB common;
BlockAIOCB *aiocb;
NvmeRequest *req;
- QEMUBH *bh;
int ret;
NvmeNamespace *ns;
@@ -3176,6 +3122,7 @@ static void nvme_flush_cancel(BlockAIOCB *acb)
if (iocb->aiocb) {
blk_aio_cancel_async(iocb->aiocb);
+ iocb->aiocb = NULL;
}
}
@@ -3185,6 +3132,8 @@ static const AIOCBInfo nvme_flush_aiocb_info = {
.get_aio_context = nvme_get_aio_context,
};
+static void nvme_do_flush(NvmeFlushAIOCB *iocb);
+
static void nvme_flush_ns_cb(void *opaque, int ret)
{
NvmeFlushAIOCB *iocb = opaque;
@@ -3206,13 +3155,11 @@ static void nvme_flush_ns_cb(void *opaque, int ret)
}
out:
- iocb->aiocb = NULL;
- qemu_bh_schedule(iocb->bh);
+ nvme_do_flush(iocb);
}
-static void nvme_flush_bh(void *opaque)
+static void nvme_do_flush(NvmeFlushAIOCB *iocb)
{
- NvmeFlushAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
NvmeCtrl *n = nvme_ctrl(req);
int i;
@@ -3239,14 +3186,8 @@ static void nvme_flush_bh(void *opaque)
return;
done:
- qemu_bh_delete(iocb->bh);
- iocb->bh = NULL;
-
iocb->common.cb(iocb->common.opaque, iocb->ret);
-
qemu_aio_unref(iocb);
-
- return;
}
static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
@@ -3258,7 +3199,6 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
iocb = qemu_aio_get(&nvme_flush_aiocb_info, NULL, nvme_misc_cb, req);
iocb->req = req;
- iocb->bh = qemu_bh_new(nvme_flush_bh, iocb);
iocb->ret = 0;
iocb->ns = NULL;
iocb->nsid = 0;
@@ -3280,13 +3220,11 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
}
req->aiocb = &iocb->common;
- qemu_bh_schedule(iocb->bh);
+ nvme_do_flush(iocb);
return NVME_NO_COMPLETE;
out:
- qemu_bh_delete(iocb->bh);
- iocb->bh = NULL;
qemu_aio_unref(iocb);
return status;
@@ -3721,7 +3659,6 @@ typedef struct NvmeZoneResetAIOCB {
BlockAIOCB common;
BlockAIOCB *aiocb;
NvmeRequest *req;
- QEMUBH *bh;
int ret;
bool all;
@@ -3750,17 +3687,6 @@ static const AIOCBInfo nvme_zone_reset_aiocb_info = {
.cancel_async = nvme_zone_reset_cancel,
};
-static void nvme_zone_reset_bh(void *opaque)
-{
- NvmeZoneResetAIOCB *iocb = opaque;
-
- iocb->common.cb(iocb->common.opaque, iocb->ret);
-
- qemu_bh_delete(iocb->bh);
- iocb->bh = NULL;
- qemu_aio_unref(iocb);
-}
-
static void nvme_zone_reset_cb(void *opaque, int ret);
static void nvme_zone_reset_epilogue_cb(void *opaque, int ret)
@@ -3771,14 +3697,8 @@ static void nvme_zone_reset_epilogue_cb(void *opaque, int ret)
int64_t moff;
int count;
- if (ret < 0) {
- nvme_zone_reset_cb(iocb, ret);
- return;
- }
-
- if (!ns->lbaf.ms) {
- nvme_zone_reset_cb(iocb, 0);
- return;
+ if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) {
+ goto out;
}
moff = nvme_moff(ns, iocb->zone->d.zslba);
@@ -3788,6 +3708,9 @@ static void nvme_zone_reset_epilogue_cb(void *opaque, int ret)
BDRV_REQ_MAY_UNMAP,
nvme_zone_reset_cb, iocb);
return;
+
+out:
+ nvme_zone_reset_cb(iocb, ret);
}
static void nvme_zone_reset_cb(void *opaque, int ret)
@@ -3796,7 +3719,9 @@ static void nvme_zone_reset_cb(void *opaque, int ret)
NvmeRequest *req = iocb->req;
NvmeNamespace *ns = req->ns;
- if (ret < 0) {
+ if (iocb->ret < 0) {
+ goto done;
+ } else if (ret < 0) {
iocb->ret = ret;
goto done;
}
@@ -3844,9 +3769,9 @@ static void nvme_zone_reset_cb(void *opaque, int ret)
done:
iocb->aiocb = NULL;
- if (iocb->bh) {
- qemu_bh_schedule(iocb->bh);
- }
+
+ iocb->common.cb(iocb->common.opaque, iocb->ret);
+ qemu_aio_unref(iocb);
}
static uint16_t nvme_zone_mgmt_send_zrwa_flush(NvmeCtrl *n, NvmeZone *zone,
@@ -3951,7 +3876,6 @@ static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req)
nvme_misc_cb, req);
iocb->req = req;
- iocb->bh = qemu_bh_new(nvme_zone_reset_bh, iocb);
iocb->ret = 0;
iocb->all = all;
iocb->idx = zone_idx;
@@ -5741,7 +5665,6 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
typedef struct NvmeFormatAIOCB {
BlockAIOCB common;
BlockAIOCB *aiocb;
- QEMUBH *bh;
NvmeRequest *req;
int ret;
@@ -5756,14 +5679,15 @@ typedef struct NvmeFormatAIOCB {
uint8_t pil;
} NvmeFormatAIOCB;
-static void nvme_format_bh(void *opaque);
-
static void nvme_format_cancel(BlockAIOCB *aiocb)
{
NvmeFormatAIOCB *iocb = container_of(aiocb, NvmeFormatAIOCB, common);
+ iocb->ret = -ECANCELED;
+
if (iocb->aiocb) {
blk_aio_cancel_async(iocb->aiocb);
+ iocb->aiocb = NULL;
}
}
@@ -5787,13 +5711,17 @@ static void nvme_format_set(NvmeNamespace *ns, uint8_t lbaf, uint8_t mset,
nvme_ns_init_format(ns);
}
+static void nvme_do_format(NvmeFormatAIOCB *iocb);
+
static void nvme_format_ns_cb(void *opaque, int ret)
{
NvmeFormatAIOCB *iocb = opaque;
NvmeNamespace *ns = iocb->ns;
int bytes;
- if (ret < 0) {
+ if (iocb->ret < 0) {
+ goto done;
+ } else if (ret < 0) {
iocb->ret = ret;
goto done;
}
@@ -5817,8 +5745,7 @@ static void nvme_format_ns_cb(void *opaque, int ret)
iocb->offset = 0;
done:
- iocb->aiocb = NULL;
- qemu_bh_schedule(iocb->bh);
+ nvme_do_format(iocb);
}
static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi)
@@ -5842,9 +5769,8 @@ static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi)
return NVME_SUCCESS;
}
-static void nvme_format_bh(void *opaque)
+static void nvme_do_format(NvmeFormatAIOCB *iocb)
{
- NvmeFormatAIOCB *iocb = opaque;
NvmeRequest *req = iocb->req;
NvmeCtrl *n = nvme_ctrl(req);
uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
@@ -5882,11 +5808,7 @@ static void nvme_format_bh(void *opaque)
return;
done:
- qemu_bh_delete(iocb->bh);
- iocb->bh = NULL;
-
iocb->common.cb(iocb->common.opaque, iocb->ret);
-
qemu_aio_unref(iocb);
}
@@ -5905,7 +5827,6 @@ static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req)
iocb = qemu_aio_get(&nvme_format_aiocb_info, NULL, nvme_misc_cb, req);
iocb->req = req;
- iocb->bh = qemu_bh_new(nvme_format_bh, iocb);
iocb->ret = 0;
iocb->ns = NULL;
iocb->nsid = 0;
@@ -5934,14 +5855,13 @@ static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req)
}
req->aiocb = &iocb->common;
- qemu_bh_schedule(iocb->bh);
+ nvme_do_format(iocb);
return NVME_NO_COMPLETE;
out:
- qemu_bh_delete(iocb->bh);
- iocb->bh = NULL;
qemu_aio_unref(iocb);
+
return status;
}
diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c
index 767f827e55..18ea5dcfa1 100644
--- a/hw/scsi/vhost-scsi-common.c
+++ b/hw/scsi/vhost-scsi-common.c
@@ -68,7 +68,7 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc)
goto err_guest_notifiers;
}
- ret = vhost_dev_start(&vsc->dev, vdev);
+ ret = vhost_dev_start(&vsc->dev, vdev, true);
if (ret < 0) {
error_report("Error start vhost dev");
goto err_guest_notifiers;
@@ -101,7 +101,7 @@ void vhost_scsi_common_stop(VHostSCSICommon *vsc)
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret = 0;
- vhost_dev_stop(&vsc->dev, vdev);
+ vhost_dev_stop(&vsc->dev, vdev, true);
if (k->set_guest_notifiers) {
ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 820dadc26c..14fc5b9bb2 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -9,8 +9,8 @@ vhost_section(const char *name) "%s"
vhost_reject_section(const char *name, int d) "%s:%d"
vhost_iotlb_miss(void *dev, int step) "%p step %d"
vhost_dev_cleanup(void *dev) "%p"
-vhost_dev_start(void *dev, const char *name) "%p:%s"
-vhost_dev_stop(void *dev, const char *name) "%p:%s"
+vhost_dev_start(void *dev, const char *name, bool vrings) "%p:%s vrings:%d"
+vhost_dev_stop(void *dev, const char *name, bool vrings) "%p:%s vrings:%d"
# vhost-user.c
diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
index dc4014cdef..d97b179e6f 100644
--- a/hw/virtio/vhost-user-fs.c
+++ b/hw/virtio/vhost-user-fs.c
@@ -76,7 +76,7 @@ static void vuf_start(VirtIODevice *vdev)
}
fs->vhost_dev.acked_features = vdev->guest_features;
- ret = vhost_dev_start(&fs->vhost_dev, vdev);
+ ret = vhost_dev_start(&fs->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost: %d", -ret);
goto err_guest_notifiers;
@@ -110,7 +110,7 @@ static void vuf_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&fs->vhost_dev, vdev);
+ vhost_dev_stop(&fs->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false);
if (ret < 0) {
diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c
index 5851cb3bc9..b7b82a1099 100644
--- a/hw/virtio/vhost-user-gpio.c
+++ b/hw/virtio/vhost-user-gpio.c
@@ -81,11 +81,12 @@ static int vu_gpio_start(VirtIODevice *vdev)
*/
vhost_ack_features(&gpio->vhost_dev, feature_bits, vdev->guest_features);
- ret = vhost_dev_start(&gpio->vhost_dev, vdev);
+ ret = vhost_dev_start(&gpio->vhost_dev, vdev, false);
if (ret < 0) {
error_report("Error starting vhost-user-gpio: %d", ret);
goto err_guest_notifiers;
}
+ gpio->started_vu = true;
/*
* guest_notifier_mask/pending not used yet, so just unmask
@@ -126,20 +127,16 @@ static void vu_gpio_stop(VirtIODevice *vdev)
struct vhost_dev *vhost_dev = &gpio->vhost_dev;
int ret;
- if (!k->set_guest_notifiers) {
+ if (!gpio->started_vu) {
return;
}
+ gpio->started_vu = false;
- /*
- * We can call vu_gpio_stop multiple times, for example from
- * vm_state_notify and the final object finalisation. Check we
- * aren't already stopped before doing so.
- */
- if (!vhost_dev_is_started(vhost_dev)) {
+ if (!k->set_guest_notifiers) {
return;
}
- vhost_dev_stop(vhost_dev, vdev);
+ vhost_dev_stop(vhost_dev, vdev, false);
ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
if (ret < 0) {
@@ -236,6 +233,8 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp)
return 0;
}
+static void vu_gpio_event(void *opaque, QEMUChrEvent event);
+
static void vu_gpio_disconnect(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -248,6 +247,11 @@ static void vu_gpio_disconnect(DeviceState *dev)
vu_gpio_stop(vdev);
vhost_dev_cleanup(&gpio->vhost_dev);
+
+ /* Re-instate the event handler for new connections */
+ qemu_chr_fe_set_handlers(&gpio->chardev,
+ NULL, NULL, vu_gpio_event,
+ NULL, dev, NULL, true);
}
static void vu_gpio_event(void *opaque, QEMUChrEvent event)
@@ -265,7 +269,9 @@ static void vu_gpio_event(void *opaque, QEMUChrEvent event)
}
break;
case CHR_EVENT_CLOSED:
- vu_gpio_disconnect(dev);
+ /* defer close until later to avoid circular close */
+ vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev,
+ vu_gpio_disconnect);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c
index 1c9f3d20dc..dc5c828ba6 100644
--- a/hw/virtio/vhost-user-i2c.c
+++ b/hw/virtio/vhost-user-i2c.c
@@ -46,7 +46,7 @@ static void vu_i2c_start(VirtIODevice *vdev)
i2c->vhost_dev.acked_features = vdev->guest_features;
- ret = vhost_dev_start(&i2c->vhost_dev, vdev);
+ ret = vhost_dev_start(&i2c->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost-user-i2c: %d", -ret);
goto err_guest_notifiers;
@@ -80,7 +80,7 @@ static void vu_i2c_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&i2c->vhost_dev, vdev);
+ vhost_dev_stop(&i2c->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false);
if (ret < 0) {
diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c
index f9084cde58..201a39e220 100644
--- a/hw/virtio/vhost-user-rng.c
+++ b/hw/virtio/vhost-user-rng.c
@@ -47,7 +47,7 @@ static void vu_rng_start(VirtIODevice *vdev)
}
rng->vhost_dev.acked_features = vdev->guest_features;
- ret = vhost_dev_start(&rng->vhost_dev, vdev);
+ ret = vhost_dev_start(&rng->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost-user-rng: %d", -ret);
goto err_guest_notifiers;
@@ -81,7 +81,7 @@ static void vu_rng_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&rng->vhost_dev, vdev);
+ vhost_dev_stop(&rng->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false);
if (ret < 0) {
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index abe23d4ebe..8f635844af 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -21,6 +21,7 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/sockets.h"
+#include "sysemu/runstate.h"
#include "sysemu/cryptodev.h"
#include "migration/migration.h"
#include "migration/postcopy-ram.h"
@@ -2670,6 +2671,76 @@ void vhost_user_cleanup(VhostUserState *user)
user->chr = NULL;
}
+
+typedef struct {
+ vu_async_close_fn cb;
+ DeviceState *dev;
+ CharBackend *cd;
+ struct vhost_dev *vhost;
+} VhostAsyncCallback;
+
+static void vhost_user_async_close_bh(void *opaque)
+{
+ VhostAsyncCallback *data = opaque;
+ struct vhost_dev *vhost = data->vhost;
+
+ /*
+ * If the vhost_dev has been cleared in the meantime there is
+ * nothing left to do as some other path has completed the
+ * cleanup.
+ */
+ if (vhost->vdev) {
+ data->cb(data->dev);
+ }
+
+ g_free(data);
+}
+
+/*
+ * We only schedule the work if the machine is running. If suspended
+ * we want to keep all the in-flight data as is for migration
+ * purposes.
+ */
+void vhost_user_async_close(DeviceState *d,
+ CharBackend *chardev, struct vhost_dev *vhost,
+ vu_async_close_fn cb)
+{
+ if (!runstate_check(RUN_STATE_SHUTDOWN)) {
+ /*
+ * A close event may happen during a read/write, but vhost
+ * code assumes the vhost_dev remains setup, so delay the
+ * stop & clear.
+ */
+ AioContext *ctx = qemu_get_current_aio_context();
+ VhostAsyncCallback *data = g_new0(VhostAsyncCallback, 1);
+
+ /* Save data for the callback */
+ data->cb = cb;
+ data->dev = d;
+ data->cd = chardev;
+ data->vhost = vhost;
+
+ /* Disable any further notifications on the chardev */
+ qemu_chr_fe_set_handlers(chardev,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ false);
+
+ aio_bh_schedule_oneshot(ctx, vhost_user_async_close_bh, data);
+
+ /*
+ * Move vhost device to the stopped state. The vhost-user device
+ * will be clean up and disconnected in BH. This can be useful in
+ * the vhost migration code. If disconnect was caught there is an
+ * option for the general vhost code to get the dev state without
+ * knowing its type (in this case vhost-user).
+ *
+ * Note if the vhost device is fully cleared by the time we
+ * execute the bottom half we won't continue with the cleanup.
+ */
+ vhost->started = false;
+ }
+}
+
static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
{
if (!virtio_has_feature(dev->protocol_features,
diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index a67a275de2..d21c72b401 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -70,7 +70,7 @@ int vhost_vsock_common_start(VirtIODevice *vdev)
}
vvc->vhost_dev.acked_features = vdev->guest_features;
- ret = vhost_dev_start(&vvc->vhost_dev, vdev);
+ ret = vhost_dev_start(&vvc->vhost_dev, vdev, true);
if (ret < 0) {
error_report("Error starting vhost: %d", -ret);
goto err_guest_notifiers;
@@ -105,7 +105,7 @@ void vhost_vsock_common_stop(VirtIODevice *vdev)
return;
}
- vhost_dev_stop(&vvc->vhost_dev, vdev);
+ vhost_dev_stop(&vvc->vhost_dev, vdev, true);
ret = k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false);
if (ret < 0) {
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index d1c4c20b8c..7fb008bc9e 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1777,15 +1777,36 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
return 0;
}
+static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable)
+{
+ if (!hdev->vhost_ops->vhost_set_vring_enable) {
+ return 0;
+ }
+
+ /*
+ * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not
+ * been negotiated, the rings start directly in the enabled state, and
+ * .vhost_set_vring_enable callback will fail since
+ * VHOST_USER_SET_VRING_ENABLE is not supported.
+ */
+ if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER &&
+ !virtio_has_feature(hdev->backend_features,
+ VHOST_USER_F_PROTOCOL_FEATURES)) {
+ return 0;
+ }
+
+ return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable);
+}
+
/* Host notifiers must be enabled at this point. */
-int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
int i, r;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
- trace_vhost_dev_start(hdev, vdev->name);
+ trace_vhost_dev_start(hdev, vdev->name, vrings);
vdev->vhost_started = true;
hdev->started = true;
@@ -1830,10 +1851,16 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
goto fail_log;
}
}
+ if (vrings) {
+ r = vhost_dev_set_vring_enable(hdev, true);
+ if (r) {
+ goto fail_log;
+ }
+ }
if (hdev->vhost_ops->vhost_dev_start) {
r = hdev->vhost_ops->vhost_dev_start(hdev, true);
if (r) {
- goto fail_log;
+ goto fail_start;
}
}
if (vhost_dev_has_iommu(hdev) &&
@@ -1848,6 +1875,10 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
}
}
return 0;
+fail_start:
+ if (vrings) {
+ vhost_dev_set_vring_enable(hdev, false);
+ }
fail_log:
vhost_log_put(hdev, false);
fail_vq:
@@ -1866,18 +1897,21 @@ fail_features:
}
/* Host notifiers must be enabled at this point. */
-void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
+void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
int i;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
- trace_vhost_dev_stop(hdev, vdev->name);
+ trace_vhost_dev_stop(hdev, vdev->name, vrings);
if (hdev->vhost_ops->vhost_dev_start) {
hdev->vhost_ops->vhost_dev_start(hdev, false);
}
+ if (vrings) {
+ vhost_dev_set_vring_enable(hdev, false);
+ }
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_stop(hdev,
vdev,
diff --git a/include/hw/virtio/vhost-user-gpio.h b/include/hw/virtio/vhost-user-gpio.h
index 4fe9aeecc0..a9305c5e6c 100644
--- a/include/hw/virtio/vhost-user-gpio.h
+++ b/include/hw/virtio/vhost-user-gpio.h
@@ -28,7 +28,17 @@ struct VHostUserGPIO {
VhostUserState vhost_user;
VirtQueue *command_vq;
VirtQueue *interrupt_vq;
+ /**
+ * There are at least two steps of initialization of the
+ * vhost-user device. The first is a "connect" step and
+ * second is a "start" step. Make a separation between
+ * those initialization phases by using two fields.
+ *
+ * @connected: see vu_gpio_connect()/vu_gpio_disconnect()
+ * @started_vu: see vu_gpio_start()/vu_gpio_stop()
+ */
bool connected;
+ bool started_vu;
/*< public >*/
};
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index c6e693cd3f..191216a74f 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -68,4 +68,22 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp);
*/
void vhost_user_cleanup(VhostUserState *user);
+/**
+ * vhost_user_async_close() - cleanup vhost-user post connection drop
+ * @d: DeviceState for the associated device (passed to callback)
+ * @chardev: the CharBackend associated with the connection
+ * @vhost: the common vhost device
+ * @cb: the user callback function to complete the clean-up
+ *
+ * This function is used to handle the shutdown of a vhost-user
+ * connection to a backend. We handle this centrally to make sure we
+ * do all the steps and handle potential races due to VM shutdowns.
+ * Once the connection is disabled we call a backhalf to ensure
+ */
+typedef void (*vu_async_close_fn)(DeviceState *cb);
+
+void vhost_user_async_close(DeviceState *d,
+ CharBackend *chardev, struct vhost_dev *vhost,
+ vu_async_close_fn cb);
+
#endif
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 353252ac3e..67a6807fac 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -184,24 +184,26 @@ static inline bool vhost_dev_is_started(struct vhost_dev *hdev)
* vhost_dev_start() - start the vhost device
* @hdev: common vhost_dev structure
* @vdev: the VirtIODevice structure
+ * @vrings: true to have vrings enabled in this call
*
* Starts the vhost device. From this point VirtIO feature negotiation
* can start and the device can start processing VirtIO transactions.
*
* Return: 0 on success, < 0 on error.
*/
-int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
/**
* vhost_dev_stop() - stop the vhost device
* @hdev: common vhost_dev structure
* @vdev: the VirtIODevice structure
+ * @vrings: true to have vrings disabled in this call
*
* Stop the vhost device. After the device is stopped the notifiers
* can be disabled (@vhost_dev_disable_notifiers) and the device can
* be torn down (@vhost_dev_cleanup).
*/
-void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
+void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
/**
* DOC: vhost device configuration handling
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index a973811cbf..acfd4df125 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -116,6 +116,13 @@ struct VirtIODevice
bool broken; /* device in invalid state, needs reset */
bool use_disabled_flag; /* allow use of 'disable' flag when needed */
bool disabled; /* device in temporarily disabled state */
+ /**
+ * @use_started: true if the @started flag should be used to check the
+ * current state of the VirtIO device. Otherwise status bits
+ * should be checked for a current status of the device.
+ * @use_started is only set via QMP and defaults to true for all
+ * modern machines (since 4.1).
+ */
bool use_started;
bool started;
bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */
@@ -391,6 +398,16 @@ static inline bool virtio_is_big_endian(VirtIODevice *vdev)
return false;
}
+/**
+ * virtio_device_started() - check if device started
+ * @vdev - the VirtIO device
+ * @status - the devices status bits
+ *
+ * Check if the device is started. For most modern machines this is
+ * tracked via the @vdev->started field (to support migration),
+ * otherwise we check for the final negotiated status bit that
+ * indicates everything is ready.
+ */
static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
{
if (vdev->use_started) {
@@ -411,15 +428,11 @@ static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
*/
static inline bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status)
{
- if (vdev->use_started) {
- return vdev->started;
- }
-
if (!vdev->vm_running) {
return false;
}
- return status & VIRTIO_CONFIG_S_DRIVER_OK;
+ return virtio_device_started(vdev, status);
}
static inline void virtio_set_started(VirtIODevice *vdev, bool started)
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index e4878b967f..80c579164f 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -1488,7 +1488,8 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode)
if (!(s->flags & HF_AVX_EN_MASK)) {
goto illegal;
}
- } else {
+ } else if (e->special != X86_SPECIAL_MMX ||
+ (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) {
if (!(s->flags & HF_OSFXSR_MASK)) {
goto illegal;
}
diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c
index 405a5d414a..55bd1194d3 100644
--- a/target/i386/tcg/sysemu/excp_helper.c
+++ b/target/i386/tcg/sysemu/excp_helper.c
@@ -71,10 +71,11 @@ static bool ptw_translate(PTETranslate *inout, hwaddr addr)
TranslateFault *err = inout->err;
assert(inout->ptw_idx == MMU_NESTED_IDX);
- err->exception_index = 0; /* unused */
- err->error_code = inout->env->error_code;
- err->cr2 = addr;
- err->stage2 = S2_GPT;
+ *err = (TranslateFault){
+ .error_code = inout->env->error_code,
+ .cr2 = addr,
+ .stage2 = S2_GPT,
+ };
return false;
}
return true;
@@ -431,10 +432,11 @@ do_check_protect_pse36:
MMU_NESTED_IDX, true,
&pte_trans.haddr, &full, 0);
if (unlikely(flags & TLB_INVALID_MASK)) {
- err->exception_index = 0; /* unused */
- err->error_code = env->error_code;
- err->cr2 = paddr;
- err->stage2 = S2_GPA;
+ *err = (TranslateFault){
+ .error_code = env->error_code,
+ .cr2 = paddr,
+ .stage2 = S2_GPA,
+ };
return false;
}
@@ -494,10 +496,11 @@ do_check_protect_pse36:
}
break;
}
- err->exception_index = EXCP0E_PAGE;
- err->error_code = error_code;
- err->cr2 = addr;
- err->stage2 = S2_NONE;
+ *err = (TranslateFault){
+ .exception_index = EXCP0E_PAGE,
+ .error_code = error_code,
+ .cr2 = addr,
+ };
return false;
}
@@ -564,9 +567,10 @@ static bool get_physical_address(CPUX86State *env, vaddr addr,
int shift = in.pg_mode & PG_MODE_LA57 ? 56 : 47;
int64_t sext = (int64_t)addr >> shift;
if (sext != 0 && sext != -1) {
- err->exception_index = EXCP0D_GPF;
- err->error_code = 0;
- err->cr2 = addr;
+ *err = (TranslateFault){
+ .exception_index = EXCP0D_GPF,
+ .cr2 = addr,
+ };
return false;
}
}
diff --git a/tests/qtest/libqos/virtio-gpio.c b/tests/qtest/libqos/virtio-gpio.c
index 762aa6695b..f22d7b5eb5 100644
--- a/tests/qtest/libqos/virtio-gpio.c
+++ b/tests/qtest/libqos/virtio-gpio.c
@@ -154,7 +154,8 @@ static void virtio_gpio_register_nodes(void)
QOSGraphEdgeOptions edge_opts = { };
/* vhost-user-gpio-device */
- edge_opts.extra_device_opts = "id=gpio0,chardev=chr-vhost-user-test";
+ edge_opts.extra_device_opts = "id=gpio0,chardev=chr-vhost-user-test "
+ "-global virtio-mmio.force-legacy=false";
qos_node_create_driver("vhost-user-gpio-device",
virtio_gpio_device_create);
qos_node_consumes("vhost-user-gpio-device", "virtio-bus", &edge_opts);