summaryrefslogtreecommitdiffstats
path: root/hw/vfio
diff options
context:
space:
mode:
authorDavid Hildenbrand2021-04-13 11:55:26 +0200
committerEduardo Habkost2021-07-08 21:54:45 +0200
commita74317f636eb3352210fff5c58896ddc1e5aabdf (patch)
tree39f2d3af9eda9e6645afe4bdbcc1178cbc8c80f3 /hw/vfio
parentvfio: Query and store the maximum number of possible DMA mappings (diff)
downloadqemu-a74317f636eb3352210fff5c58896ddc1e5aabdf.tar.gz
qemu-a74317f636eb3352210fff5c58896ddc1e5aabdf.tar.xz
qemu-a74317f636eb3352210fff5c58896ddc1e5aabdf.zip
vfio: Sanity check maximum number of DMA mappings with RamDiscardManager
Although RamDiscardManager can handle running into the maximum number of DMA mappings by propagating errors when creating a DMA mapping, we want to sanity check and warn the user early that there is a theoretical setup issue and that virtio-mem might not be able to provide as much memory towards a VM as desired. As suggested by Alex, let's use the number of KVM memory slots to guess how many other mappings we might see over time. Acked-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: Alex Williamson <alex.williamson@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Dr. David Alan Gilbert <dgilbert@redhat.com> Cc: Igor Mammedov <imammedo@redhat.com> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Cc: Peter Xu <peterx@redhat.com> Cc: Auger Eric <eric.auger@redhat.com> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> Cc: teawater <teawaterz@linux.alibaba.com> Cc: Marek Kedzierski <mkedzier@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20210413095531.25603-9-david@redhat.com> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Diffstat (limited to 'hw/vfio')
-rw-r--r--hw/vfio/common.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 79628d60ae..f8a2fe8441 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -728,6 +728,49 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container,
vfio_ram_discard_notify_discard, true);
ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next);
+
+ /*
+ * Sanity-check if we have a theoretically problematic setup where we could
+ * exceed the maximum number of possible DMA mappings over time. We assume
+ * that each mapped section in the same address space as a RamDiscardManager
+ * section consumes exactly one DMA mapping, with the exception of
+ * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections
+ * in the same address space as RamDiscardManager sections.
+ *
+ * We assume that each section in the address space consumes one memslot.
+ * We take the number of KVM memory slots as a best guess for the maximum
+ * number of sections in the address space we could have over time,
+ * also consuming DMA mappings.
+ */
+ if (container->dma_max_mappings) {
+ unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
+
+#ifdef CONFIG_KVM
+ if (kvm_enabled()) {
+ max_memslots = kvm_get_max_memslots();
+ }
+#endif
+
+ QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+ hwaddr start, end;
+
+ start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
+ vrdl->granularity);
+ end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size,
+ vrdl->granularity);
+ vrdl_mappings += (end - start) / vrdl->granularity;
+ vrdl_count++;
+ }
+
+ if (vrdl_mappings + max_memslots - vrdl_count >
+ container->dma_max_mappings) {
+ warn_report("%s: possibly running out of DMA mappings. E.g., try"
+ " increasing the 'block-size' of virtio-mem devies."
+ " Maximum possible DMA mappings: %d, Maximum possible"
+ " memslots: %d", __func__, container->dma_max_mappings,
+ max_memslots);
+ }
+ }
}
static void vfio_unregister_ram_discard_listener(VFIOContainer *container,