summaryrefslogtreecommitdiffstats
path: root/hw/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'hw/virtio')
-rw-r--r--hw/virtio/vhost-iova-tree.c2
-rw-r--r--hw/virtio/vhost-iova-tree.h2
-rw-r--r--hw/virtio/vhost-shadow-virtqueue.c31
-rw-r--r--hw/virtio/vhost-vdpa.c90
4 files changed, 56 insertions, 69 deletions
diff --git a/hw/virtio/vhost-iova-tree.c b/hw/virtio/vhost-iova-tree.c
index 67bf6d57ab..3d03395a77 100644
--- a/hw/virtio/vhost-iova-tree.c
+++ b/hw/virtio/vhost-iova-tree.c
@@ -104,7 +104,7 @@ int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map)
* @iova_tree: The vhost iova tree
* @map: The map to remove
*/
-void vhost_iova_tree_remove(VhostIOVATree *iova_tree, const DMAMap *map)
+void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map)
{
iova_tree_remove(iova_tree->iova_taddr_map, map);
}
diff --git a/hw/virtio/vhost-iova-tree.h b/hw/virtio/vhost-iova-tree.h
index 6a4f24e0f9..4adfd79ff0 100644
--- a/hw/virtio/vhost-iova-tree.h
+++ b/hw/virtio/vhost-iova-tree.h
@@ -22,6 +22,6 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostIOVATree, vhost_iova_tree_delete);
const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *iova_tree,
const DMAMap *map);
int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map);
-void vhost_iova_tree_remove(VhostIOVATree *iova_tree, const DMAMap *map);
+void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map);
#endif
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index e4956728dd..e8e5bbc368 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -233,9 +233,6 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
/**
* Add an element to a SVQ.
*
- * The caller must check that there is enough slots for the new element. It
- * takes ownership of the element: In case of failure not ENOSPC, it is free.
- *
* Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
*/
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
@@ -252,7 +249,6 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
if (unlikely(!ok)) {
- g_free(elem);
return -EINVAL;
}
@@ -293,7 +289,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
virtio_queue_set_notification(svq->vq, false);
while (true) {
- VirtQueueElement *elem;
+ g_autofree VirtQueueElement *elem;
int r;
if (svq->next_guest_avail_elem) {
@@ -324,12 +320,14 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
* queue the current guest descriptor and ignore kicks
* until some elements are used.
*/
- svq->next_guest_avail_elem = elem;
+ svq->next_guest_avail_elem = g_steal_pointer(&elem);
}
/* VQ is full or broken, just return and ignore kicks */
return;
}
+ /* elem belongs to SVQ or external caller now */
+ elem = NULL;
}
virtio_queue_set_notification(svq->vq, true);
@@ -416,7 +414,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
return NULL;
}
- if (unlikely(!svq->desc_state[used_elem.id].elem)) {
+ if (unlikely(!svq->desc_state[used_elem.id].ndescs)) {
qemu_log_mask(LOG_GUEST_ERROR,
"Device %s says index %u is used, but it was not available",
svq->vdev->name, used_elem.id);
@@ -424,6 +422,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
}
num = svq->desc_state[used_elem.id].ndescs;
+ svq->desc_state[used_elem.id].ndescs = 0;
last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
svq->desc_next[last_used_chain] = svq->free_head;
svq->free_head = used_elem.id;
@@ -500,20 +499,20 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq,
size_t vhost_svq_poll(VhostShadowVirtqueue *svq)
{
int64_t start_us = g_get_monotonic_time();
+ uint32_t len;
+
do {
- uint32_t len;
- VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
- if (elem) {
- return len;
+ if (vhost_svq_more_used(svq)) {
+ break;
}
if (unlikely(g_get_monotonic_time() - start_us > 10e6)) {
return 0;
}
-
- /* Make sure we read new used_idx */
- smp_rmb();
} while (true);
+
+ vhost_svq_get_buf(svq, &len);
+ return len;
}
/**
@@ -602,13 +601,13 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
event_notifier_set_handler(svq_kick, NULL);
}
+ event_notifier_init_fd(svq_kick, svq_kick_fd);
/*
* event_notifier_set_handler already checks for guest's notifications if
* they arrive at the new file descriptor in the switch, so there is no
* need to explicitly check for them.
*/
if (poll_start) {
- event_notifier_init_fd(svq_kick, svq_kick_fd);
event_notifier_set(svq_kick);
event_notifier_set_handler(svq_kick, vhost_handle_guest_kick_notifier);
}
@@ -655,7 +654,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
*/
void vhost_svq_stop(VhostShadowVirtqueue *svq)
{
- event_notifier_set_handler(&svq->svq_kick, NULL);
+ vhost_svq_set_svq_kick_fd(svq, VHOST_FILE_UNBIND);
g_autofree VirtQueueElement *next_avail_elem = NULL;
if (!svq->vq) {
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 3ff9ce3501..7468e44b87 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -176,6 +176,7 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener)
static void vhost_vdpa_listener_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
+ DMAMap mem_region = {};
struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
hwaddr iova;
Int128 llend, llsize;
@@ -212,13 +213,13 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
llsize = int128_sub(llend, int128_make64(iova));
if (v->shadow_vqs_enabled) {
- DMAMap mem_region = {
- .translated_addr = (hwaddr)(uintptr_t)vaddr,
- .size = int128_get64(llsize) - 1,
- .perm = IOMMU_ACCESS_FLAG(true, section->readonly),
- };
+ int r;
- int r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region);
+ mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
+ mem_region.size = int128_get64(llsize) - 1,
+ mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
+
+ r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region);
if (unlikely(r != IOVA_OK)) {
error_report("Can't allocate a mapping (%d)", r);
goto fail;
@@ -232,11 +233,16 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
vaddr, section->readonly);
if (ret) {
error_report("vhost vdpa map fail!");
- goto fail;
+ goto fail_map;
}
return;
+fail_map:
+ if (v->shadow_vqs_enabled) {
+ vhost_iova_tree_remove(v->iova_tree, mem_region);
+ }
+
fail:
/*
* On the initfn path, store the first error in the container so we
@@ -289,8 +295,12 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
};
result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region);
+ if (!result) {
+ /* The memory listener map wasn't mapped */
+ return;
+ }
iova = result->iova;
- vhost_iova_tree_remove(v->iova_tree, result);
+ vhost_iova_tree_remove(v->iova_tree, *result);
}
vhost_vdpa_iotlb_batch_begin_once(v);
ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
@@ -874,41 +884,41 @@ static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
/**
* Unmap a SVQ area in the device
*/
-static bool vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v,
- const DMAMap *needle)
+static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
{
- const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, needle);
+ const DMAMap needle = {
+ .translated_addr = addr,
+ };
+ const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle);
hwaddr size;
int r;
if (unlikely(!result)) {
error_report("Unable to find SVQ address to unmap");
- return false;
+ return;
}
size = ROUND_UP(result->size, qemu_real_host_page_size());
r = vhost_vdpa_dma_unmap(v, result->iova, size);
- return r == 0;
+ if (unlikely(r < 0)) {
+ error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
+ return;
+ }
+
+ vhost_iova_tree_remove(v->iova_tree, *result);
}
-static bool vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
+static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
const VhostShadowVirtqueue *svq)
{
- DMAMap needle = {};
struct vhost_vdpa *v = dev->opaque;
struct vhost_vring_addr svq_addr;
- bool ok;
vhost_svq_get_vring_addr(svq, &svq_addr);
- needle.translated_addr = svq_addr.desc_user_addr;
- ok = vhost_vdpa_svq_unmap_ring(v, &needle);
- if (unlikely(!ok)) {
- return false;
- }
+ vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
- needle.translated_addr = svq_addr.used_user_addr;
- return vhost_vdpa_svq_unmap_ring(v, &needle);
+ vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
}
/**
@@ -934,7 +944,7 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
needle->perm == IOMMU_RO);
if (unlikely(r != 0)) {
error_setg_errno(errp, -r, "Cannot map region to device");
- vhost_iova_tree_remove(v->iova_tree, needle);
+ vhost_iova_tree_remove(v->iova_tree, *needle);
}
return r == 0;
@@ -986,7 +996,7 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
if (unlikely(!ok)) {
error_prepend(errp, "Cannot create vq device region: ");
- vhost_vdpa_svq_unmap_ring(v, &driver_region);
+ vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
}
addr->used_user_addr = device_region.iova;
@@ -1023,13 +1033,6 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
return true;
}
- if (v->migration_blocker) {
- int r = migrate_add_blocker(v->migration_blocker, &err);
- if (unlikely(r < 0)) {
- return false;
- }
- }
-
for (i = 0; i < v->shadow_vqs->len; ++i) {
VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
@@ -1072,33 +1075,21 @@ err:
vhost_svq_stop(svq);
}
- if (v->migration_blocker) {
- migrate_del_blocker(v->migration_blocker);
- }
-
return false;
}
-static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev)
+static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
{
struct vhost_vdpa *v = dev->opaque;
if (!v->shadow_vqs) {
- return true;
+ return;
}
for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
- bool ok = vhost_vdpa_svq_unmap_rings(dev, svq);
- if (unlikely(!ok)) {
- return false;
- }
- }
-
- if (v->migration_blocker) {
- migrate_del_blocker(v->migration_blocker);
+ vhost_vdpa_svq_unmap_rings(dev, svq);
}
- return true;
}
static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
@@ -1115,10 +1106,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
}
vhost_vdpa_set_vring_ready(dev);
} else {
- ok = vhost_vdpa_svqs_stop(dev);
- if (unlikely(!ok)) {
- return -1;
- }
+ vhost_vdpa_svqs_stop(dev);
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
}