summaryrefslogtreecommitdiffstats
path: root/hw/virtio/virtio.c
diff options
context:
space:
mode:
authorPeter Maydell2020-02-27 20:15:14 +0100
committerPeter Maydell2020-02-27 20:15:15 +0100
commit8b6b68e05b43f976714ca1d2afe01a64e1d82cba (patch)
treeaac13ac1b724dff36a5c4d7623b70f93dbc78d02 /hw/virtio/virtio.c
parentMerge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2020-02-26' into st... (diff)
parentFixed assert in vhost_user_set_mem_table_postcopy (diff)
downloadqemu-8b6b68e05b43f976714ca1d2afe01a64e1d82cba.tar.gz
qemu-8b6b68e05b43f976714ca1d2afe01a64e1d82cba.tar.xz
qemu-8b6b68e05b43f976714ca1d2afe01a64e1d82cba.zip
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
virtio, pc: fixes, features New virtio iommu. Unrealize memory leaks. In-band kick/call support. Bugfixes, documentation all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Thu 27 Feb 2020 08:46:33 GMT # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [full] # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * remotes/mst/tags/for_upstream: (30 commits) Fixed assert in vhost_user_set_mem_table_postcopy vhost-user: only set slave channel for first vq acpi: cpuhp: document CPHP_GET_CPU_ID_CMD command libvhost-user: implement in-band notifications docs: vhost-user: add in-band kick/call messages libvhost-user: handle NOFD flag in call/kick/err better libvhost-user-glib: use g_main_context_get_thread_default() libvhost-user-glib: fix VugDev main fd cleanup libvhost-user: implement VHOST_USER_PROTOCOL_F_REPLY_ACK MAINTAINERS: add virtio-iommu related files hw/arm/virt: Add the virtio-iommu device tree mappings virtio-iommu-pci: Add virtio iommu pci support virtio-iommu: Support migration virtio-iommu: Implement fault reporting virtio-iommu: Implement translate virtio-iommu: Implement map/unmap virtio-iommu: Implement attach/detach command virtio-iommu: Decode the command payload virtio-iommu: Add skeleton virtio: gracefully handle invalid region caches ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/virtio/virtio.c')
-rw-r--r--hw/virtio/virtio.c99
1 files changed, 91 insertions, 8 deletions
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 9d06dbe3ef..b2d415e5dd 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -282,15 +282,19 @@ static void vring_packed_flags_write(VirtIODevice *vdev,
/* Called within rcu_read_lock(). */
static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
{
- VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
- assert(caches != NULL);
- return caches;
+ return atomic_rcu_read(&vq->vring.caches);
}
+
/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, flags);
+
+ if (!caches) {
+ return 0;
+ }
+
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
@@ -299,6 +303,11 @@ static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, idx);
+
+ if (!caches) {
+ return 0;
+ }
+
vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
return vq->shadow_avail_idx;
}
@@ -308,6 +317,11 @@ static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, ring[i]);
+
+ if (!caches) {
+ return 0;
+ }
+
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
@@ -323,6 +337,11 @@ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, ring[i]);
+
+ if (!caches) {
+ return;
+ }
+
virtio_tswap32s(vq->vdev, &uelem->id);
virtio_tswap32s(vq->vdev, &uelem->len);
address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
@@ -334,6 +353,11 @@ static uint16_t vring_used_idx(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, idx);
+
+ if (!caches) {
+ return 0;
+ }
+
return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
}
@@ -342,8 +366,12 @@ static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
{
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, idx);
- virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
- address_space_cache_invalidate(&caches->used, pa, sizeof(val));
+
+ if (caches) {
+ virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
+ address_space_cache_invalidate(&caches->used, pa, sizeof(val));
+ }
+
vq->used_idx = val;
}
@@ -353,8 +381,13 @@ static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
hwaddr pa = offsetof(VRingUsed, flags);
- uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
+ uint16_t flags;
+ if (!caches) {
+ return;
+ }
+
+ flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
}
@@ -365,8 +398,13 @@ static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
hwaddr pa = offsetof(VRingUsed, flags);
- uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
+ uint16_t flags;
+ if (!caches) {
+ return;
+ }
+
+ flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
}
@@ -381,6 +419,10 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
}
caches = vring_get_region_caches(vq);
+ if (!caches) {
+ return;
+ }
+
pa = offsetof(VRingUsed, ring[vq->vring.num]);
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
@@ -410,7 +452,11 @@ static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
VRingMemoryRegionCaches *caches;
RCU_READ_LOCK_GUARD();
- caches = vring_get_region_caches(vq);
+ caches = vring_get_region_caches(vq);
+ if (!caches) {
+ return;
+ }
+
vring_packed_event_read(vq->vdev, &caches->used, &e);
if (!enable) {
@@ -597,6 +643,10 @@ static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
}
cache = vring_get_region_caches(vq);
+ if (!cache) {
+ return 1;
+ }
+
vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
vq->last_avail_idx);
@@ -777,6 +827,10 @@ static void virtqueue_packed_fill_desc(VirtQueue *vq,
}
caches = vring_get_region_caches(vq);
+ if (!caches) {
+ return;
+ }
+
vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
}
@@ -949,6 +1003,10 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
max = vq->vring.num;
caches = vring_get_region_caches(vq);
+ if (!caches) {
+ goto err;
+ }
+
while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
MemoryRegionCache *desc_cache = &caches->desc;
unsigned int num_bufs;
@@ -1089,6 +1147,9 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
max = vq->vring.num;
caches = vring_get_region_caches(vq);
+ if (!caches) {
+ goto err;
+ }
for (;;) {
unsigned int num_bufs = total_bufs;
@@ -1194,6 +1255,10 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
}
caches = vring_get_region_caches(vq);
+ if (!caches) {
+ goto err;
+ }
+
desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
sizeof(VRingPackedDesc) : sizeof(VRingDesc);
if (caches->desc.len < vq->vring.num * desc_size) {
@@ -1388,6 +1453,11 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
i = head;
caches = vring_get_region_caches(vq);
+ if (!caches) {
+ virtio_error(vdev, "Region caches not initialized");
+ goto done;
+ }
+
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto done;
@@ -1510,6 +1580,11 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
i = vq->last_avail_idx;
caches = vring_get_region_caches(vq);
+ if (!caches) {
+ virtio_error(vdev, "Region caches not initialized");
+ goto done;
+ }
+
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto done;
@@ -1629,6 +1704,10 @@ static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
VRingPackedDesc desc;
caches = vring_get_region_caches(vq);
+ if (!caches) {
+ return 0;
+ }
+
desc_cache = &caches->desc;
virtio_queue_set_notification(vq, 0);
@@ -2413,6 +2492,10 @@ static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
VRingMemoryRegionCaches *caches;
caches = vring_get_region_caches(vq);
+ if (!caches) {
+ return false;
+ }
+
vring_packed_event_read(vdev, &caches->avail, &e);
old = vq->signalled_used;