summaryrefslogtreecommitdiffstats
path: root/softmmu
diff options
context:
space:
mode:
authorCindy Lu2022-10-31 04:10:19 +0100
committerMichael S. Tsirkin2022-11-07 20:08:17 +0100
commitbaa44bce87fe53ef5c95d39e634b3bace014d235 (patch)
tree4d88102332e623600399a3e295f2141d1a1d563b /softmmu
parenttests: virt: Update expected *.acpihmatvirt tables (diff)
downloadqemu-baa44bce87fe53ef5c95d39e634b3bace014d235.tar.gz
qemu-baa44bce87fe53ef5c95d39e634b3bace014d235.tar.xz
qemu-baa44bce87fe53ef5c95d39e634b3bace014d235.zip
vfio: move implement of vfio_get_xlat_addr() to memory.c
- Move the implement vfio_get_xlat_addr to softmmu/memory.c, and change the name to memory_get_xlat_addr(). So we can use this function on other devices, such as vDPA device. - Add a new function vfio_get_xlat_addr in vfio/common.c, and it will check whether the memory is backed by a discard manager. then device can have its own warning. Signed-off-by: Cindy Lu <lulu@redhat.com> Message-Id: <20221031031020.1405111-2-lulu@redhat.com> Acked-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'softmmu')
-rw-r--r--softmmu/memory.c72
1 files changed, 72 insertions, 0 deletions
diff --git a/softmmu/memory.c b/softmmu/memory.c
index 7ba2048836..bc0be3f62c 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -33,6 +33,7 @@
#include "qemu/accel.h"
#include "hw/boards.h"
#include "migration/vmstate.h"
+#include "exec/address-spaces.h"
//#define DEBUG_UNASSIGNED
@@ -2121,6 +2122,77 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
rdmc->unregister_listener(rdm, rdl);
}
+/* Called with rcu_read_lock held. */
+bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
+ ram_addr_t *ram_addr, bool *read_only,
+ bool *mr_has_discard_manager)
+{
+ MemoryRegion *mr;
+ hwaddr xlat;
+ hwaddr len = iotlb->addr_mask + 1;
+ bool writable = iotlb->perm & IOMMU_WO;
+
+ if (mr_has_discard_manager) {
+ *mr_has_discard_manager = false;
+ }
+ /*
+ * The IOMMU TLB entry we have just covers translation through
+ * this IOMMU to its immediate target. We need to translate
+ * it the rest of the way through to memory.
+ */
+ mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
+ &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
+ if (!memory_region_is_ram(mr)) {
+ error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat);
+ return false;
+ } else if (memory_region_has_ram_discard_manager(mr)) {
+ RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
+ MemoryRegionSection tmp = {
+ .mr = mr,
+ .offset_within_region = xlat,
+ .size = int128_make64(len),
+ };
+ if (mr_has_discard_manager) {
+ *mr_has_discard_manager = true;
+ }
+ /*
+ * Malicious VMs can map memory into the IOMMU, which is expected
+ * to remain discarded. vfio will pin all pages, populating memory.
+ * Disallow that. vmstate priorities make sure any RamDiscardManager
+ * were already restored before IOMMUs are restored.
+ */
+ if (!ram_discard_manager_is_populated(rdm, &tmp)) {
+ error_report("iommu map to discarded memory (e.g., unplugged via"
+ " virtio-mem): %" HWADDR_PRIx "",
+ iotlb->translated_addr);
+ return false;
+ }
+ }
+
+ /*
+ * Translation truncates length to the IOMMU page size,
+ * check that it did not truncate too much.
+ */
+ if (len & iotlb->addr_mask) {
+ error_report("iommu has granularity incompatible with target AS");
+ return false;
+ }
+
+ if (vaddr) {
+ *vaddr = memory_region_get_ram_ptr(mr) + xlat;
+ }
+
+ if (ram_addr) {
+ *ram_addr = memory_region_get_ram_addr(mr) + xlat;
+ }
+
+ if (read_only) {
+ *read_only = !writable || mr->readonly;
+ }
+
+ return true;
+}
+
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
{
uint8_t mask = 1 << client;