diff options
author | Peter Maydell | 2020-11-02 10:54:00 +0100 |
---|---|---|
committer | Peter Maydell | 2020-11-02 10:54:00 +0100 |
commit | 2c6605389c1f76973d92b69b85d40d94b8f1092c (patch) | |
tree | 1f9e9b94e9884f26230db5b9cebac492aa8f1ec1 /hw/vfio | |
parent | Merge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2020-10-27-v2' into... (diff) | |
parent | vfio: fix incorrect print type (diff) | |
download | qemu-2c6605389c1f76973d92b69b85d40d94b8f1092c.tar.gz qemu-2c6605389c1f76973d92b69b85d40d94b8f1092c.tar.xz qemu-2c6605389c1f76973d92b69b85d40d94b8f1092c.zip |
Merge remote-tracking branch 'remotes/awilliam/tags/vfio-update-20201101.0' into staging
VFIO update 2020-11-01
* Migration support (Kirti Wankhede)
* s390 DMA limiting (Matthew Rosato)
* zPCI hardware info (Matthew Rosato)
* Lock guard (Amey Narkhede)
* Print fixes (Zhengui li)
* Warning/build fixes
# gpg: Signature made Sun 01 Nov 2020 20:38:10 GMT
# gpg: using RSA key 239B9B6E3BB08B22
# gpg: Good signature from "Alex Williamson <alex.williamson@redhat.com>" [full]
# gpg: aka "Alex Williamson <alex@shazbot.org>" [full]
# gpg: aka "Alex Williamson <alwillia@redhat.com>" [full]
# gpg: aka "Alex Williamson <alex.l.williamson@gmail.com>" [full]
# Primary key fingerprint: 42F6 C04E 540B D1A9 9E7B 8A90 239B 9B6E 3BB0 8B22
* remotes/awilliam/tags/vfio-update-20201101.0: (32 commits)
vfio: fix incorrect print type
hw/vfio: Use lock guard macros
s390x/pci: get zPCI function info from host
vfio: Add routine for finding VFIO_DEVICE_GET_INFO capabilities
s390x/pci: use a PCI Function structure
s390x/pci: clean up s390 PCI groups
s390x/pci: use a PCI Group structure
s390x/pci: create a header dedicated to PCI CLP
s390x/pci: Honor DMA limits set by vfio
s390x/pci: Add routine to get the vfio dma available count
vfio: Find DMA available capability
vfio: Create shared routine for scanning info capabilities
s390x/pci: Move header files to include/hw/s390x
linux-headers: update against 5.10-rc1
update-linux-headers: Add vfio_zdev.h
qapi: Add VFIO devices migration stats in Migration stats
vfio: Make vfio-pci device migration capable
vfio: Add ioctl to get dirty pages bitmap during dma unmap
vfio: Dirty page tracking when vIOMMU is enabled
vfio: Add vfio_listener_log_sync to mark dirty pages
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/vfio')
-rw-r--r-- | hw/vfio/common.c | 508 | ||||
-rw-r--r-- | hw/vfio/meson.build | 1 | ||||
-rw-r--r-- | hw/vfio/migration.c | 933 | ||||
-rw-r--r-- | hw/vfio/pci.c | 87 | ||||
-rw-r--r-- | hw/vfio/pci.h | 1 | ||||
-rw-r--r-- | hw/vfio/platform.c | 7 | ||||
-rw-r--r-- | hw/vfio/trace-events | 21 |
7 files changed, 1500 insertions, 58 deletions
diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 13471ae294..e18ea2cf91 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -29,6 +29,7 @@ #include "hw/vfio/vfio.h" #include "exec/address-spaces.h" #include "exec/memory.h" +#include "exec/ram_addr.h" #include "hw/hw.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" @@ -37,6 +38,7 @@ #include "sysemu/reset.h" #include "trace.h" #include "qapi/error.h" +#include "migration/migration.h" VFIOGroupList vfio_group_list = QLIST_HEAD_INITIALIZER(vfio_group_list); @@ -203,7 +205,7 @@ void vfio_region_write(void *opaque, hwaddr addr, buf.qword = cpu_to_le64(data); break; default: - hw_error("vfio: unsupported write size, %d bytes", size); + hw_error("vfio: unsupported write size, %u bytes", size); break; } @@ -260,7 +262,7 @@ uint64_t vfio_region_read(void *opaque, data = le64_to_cpu(buf.qword); break; default: - hw_error("vfio: unsupported read size, %d bytes", size); + hw_error("vfio: unsupported read size, %u bytes", size); break; } @@ -287,10 +289,146 @@ const MemoryRegionOps vfio_region_ops = { }; /* + * Device state interfaces + */ + +bool vfio_mig_active(void) +{ + VFIOGroup *group; + VFIODevice *vbasedev; + + if (QLIST_EMPTY(&vfio_group_list)) { + return false; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + if (vbasedev->migration_blocker) { + return false; + } + } + } + return true; +} + +static bool vfio_devices_all_stopped_and_saving(VFIOContainer *container) +{ + VFIOGroup *group; + VFIODevice *vbasedev; + MigrationState *ms = migrate_get_current(); + + if (!migration_is_setup_or_active(ms->state)) { + return false; + } + + QLIST_FOREACH(group, &container->group_list, container_next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + VFIOMigration *migration = vbasedev->migration; + + if (!migration) { + return false; + } + + if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) && + !(migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { + continue; + } else { + return false; + } + } + } + return true; +} + +static bool vfio_devices_all_running_and_saving(VFIOContainer *container) +{ + VFIOGroup *group; + VFIODevice *vbasedev; + MigrationState *ms = migrate_get_current(); + + if (!migration_is_setup_or_active(ms->state)) { + return false; + } + + QLIST_FOREACH(group, &container->group_list, container_next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + VFIOMigration *migration = vbasedev->migration; + + if (!migration) { + return false; + } + + if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) && + (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { + continue; + } else { + return false; + } + } + } + return true; +} + +static int vfio_dma_unmap_bitmap(VFIOContainer *container, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) +{ + struct vfio_iommu_type1_dma_unmap *unmap; + struct vfio_bitmap *bitmap; + uint64_t pages = TARGET_PAGE_ALIGN(size) >> TARGET_PAGE_BITS; + int ret; + + unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); + + unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); + unmap->iova = iova; + unmap->size = size; + unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; + bitmap = (struct vfio_bitmap *)&unmap->data; + + /* + * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of + * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap_pgsize to + * TARGET_PAGE_SIZE. + */ + + bitmap->pgsize = TARGET_PAGE_SIZE; + bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / + BITS_PER_BYTE; + + if (bitmap->size > container->max_dirty_bitmap_size) { + error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, + (uint64_t)bitmap->size); + ret = -E2BIG; + goto unmap_exit; + } + + bitmap->data = g_try_malloc0(bitmap->size); + if (!bitmap->data) { + ret = -ENOMEM; + goto unmap_exit; + } + + ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); + if (!ret) { + cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data, + iotlb->translated_addr, pages); + } else { + error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); + } + + g_free(bitmap->data); +unmap_exit: + g_free(unmap); + return ret; +} + +/* * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 */ static int vfio_dma_unmap(VFIOContainer *container, - hwaddr iova, ram_addr_t size) + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) { struct vfio_iommu_type1_dma_unmap unmap = { .argsz = sizeof(unmap), @@ -299,6 +437,11 @@ static int vfio_dma_unmap(VFIOContainer *container, .size = size, }; + if (iotlb && container->dirty_pages_supported && + vfio_devices_all_running_and_saving(container)) { + return vfio_dma_unmap_bitmap(container, iova, size, iotlb); + } + while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { /* * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c @@ -346,7 +489,7 @@ static int vfio_dma_map(VFIOContainer *container, hwaddr iova, * the VGA ROM space. */ if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || - (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 && + (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 && ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { return 0; } @@ -407,8 +550,8 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) } /* Called with rcu_read_lock held. */ -static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, - bool *read_only) +static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, + ram_addr_t *ram_addr, bool *read_only) { MemoryRegion *mr; hwaddr xlat; @@ -439,8 +582,17 @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, return false; } - *vaddr = memory_region_get_ram_ptr(mr) + xlat; - *read_only = !writable || mr->readonly; + if (vaddr) { + *vaddr = memory_region_get_ram_ptr(mr) + xlat; + } + + if (ram_addr) { + *ram_addr = memory_region_get_ram_addr(mr) + xlat; + } + + if (read_only) { + *read_only = !writable || mr->readonly; + } return true; } @@ -450,7 +602,6 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); VFIOContainer *container = giommu->container; hwaddr iova = iotlb->iova + giommu->iommu_offset; - bool read_only; void *vaddr; int ret; @@ -466,7 +617,9 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) rcu_read_lock(); if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { - if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) { + bool read_only; + + if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { goto out; } /* @@ -486,7 +639,7 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) iotlb->addr_mask + 1, vaddr, ret); } } else { - ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1); + ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); if (ret) { error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%m)", @@ -789,7 +942,7 @@ static void vfio_listener_region_del(MemoryListener *listener, } if (try_unmap) { - ret = vfio_dma_unmap(container, iova, int128_get64(llsize)); + ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); if (ret) { error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%m)", @@ -812,9 +965,156 @@ static void vfio_listener_region_del(MemoryListener *listener, } } +static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, + uint64_t size, ram_addr_t ram_addr) +{ + struct vfio_iommu_type1_dirty_bitmap *dbitmap; + struct vfio_iommu_type1_dirty_bitmap_get *range; + uint64_t pages; + int ret; + + dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); + + dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); + dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; + range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; + range->iova = iova; + range->size = size; + + /* + * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of + * TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to + * TARGET_PAGE_SIZE. + */ + range->bitmap.pgsize = TARGET_PAGE_SIZE; + + pages = TARGET_PAGE_ALIGN(range->size) >> TARGET_PAGE_BITS; + range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / + BITS_PER_BYTE; + range->bitmap.data = g_try_malloc0(range->bitmap.size); + if (!range->bitmap.data) { + ret = -ENOMEM; + goto err_out; + } + + ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); + if (ret) { + error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64 + " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova, + (uint64_t)range->size, errno); + goto err_out; + } + + cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data, + ram_addr, pages); + + trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size, + range->bitmap.size, ram_addr); +err_out: + g_free(range->bitmap.data); + g_free(dbitmap); + + return ret; +} + +typedef struct { + IOMMUNotifier n; + VFIOGuestIOMMU *giommu; +} vfio_giommu_dirty_notifier; + +static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + vfio_giommu_dirty_notifier *gdn = container_of(n, + vfio_giommu_dirty_notifier, n); + VFIOGuestIOMMU *giommu = gdn->giommu; + VFIOContainer *container = giommu->container; + hwaddr iova = iotlb->iova + giommu->iommu_offset; + ram_addr_t translated_addr; + + trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); + + if (iotlb->target_as != &address_space_memory) { + error_report("Wrong target AS \"%s\", only system memory is allowed", + iotlb->target_as->name ? iotlb->target_as->name : "none"); + return; + } + + rcu_read_lock(); + if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { + int ret; + + ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, + translated_addr); + if (ret) { + error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%m)", + container, iova, + iotlb->addr_mask + 1, ret); + } + } + rcu_read_unlock(); +} + +static int vfio_sync_dirty_bitmap(VFIOContainer *container, + MemoryRegionSection *section) +{ + ram_addr_t ram_addr; + + if (memory_region_is_iommu(section->mr)) { + VFIOGuestIOMMU *giommu; + + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + if (MEMORY_REGION(giommu->iommu) == section->mr && + giommu->n.start == section->offset_within_region) { + Int128 llend; + vfio_giommu_dirty_notifier gdn = { .giommu = giommu }; + int idx = memory_region_iommu_attrs_to_index(giommu->iommu, + MEMTXATTRS_UNSPECIFIED); + + llend = int128_add(int128_make64(section->offset_within_region), + section->size); + llend = int128_sub(llend, int128_one()); + + iommu_notifier_init(&gdn.n, + vfio_iommu_map_dirty_notify, + IOMMU_NOTIFIER_MAP, + section->offset_within_region, + int128_get64(llend), + idx); + memory_region_iommu_replay(giommu->iommu, &gdn.n); + break; + } + } + return 0; + } + + ram_addr = memory_region_get_ram_addr(section->mr) + + section->offset_within_region; + + return vfio_get_dirty_bitmap(container, + TARGET_PAGE_ALIGN(section->offset_within_address_space), + int128_get64(section->size), ram_addr); +} + +static void vfio_listerner_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, listener); + + if (vfio_listener_skipped_section(section) || + !container->dirty_pages_supported) { + return; + } + + if (vfio_devices_all_stopped_and_saving(container)) { + vfio_sync_dirty_bitmap(container, section); + } +} + static const MemoryListener vfio_memory_listener = { .region_add = vfio_listener_region_add, .region_del = vfio_listener_region_del, + .log_sync = vfio_listerner_log_sync, }; static void vfio_listener_release(VFIOContainer *container) @@ -825,17 +1125,12 @@ static void vfio_listener_release(VFIOContainer *container) } } -struct vfio_info_cap_header * -vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) +static struct vfio_info_cap_header * +vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id) { struct vfio_info_cap_header *hdr; - void *ptr = info; - if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { - return NULL; - } - - for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { + for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) { if (hdr->id == id) { return hdr; } @@ -844,6 +1139,57 @@ vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) return NULL; } +struct vfio_info_cap_header * +vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) +{ + if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { + return NULL; + } + + return vfio_get_cap((void *)info, info->cap_offset, id); +} + +static struct vfio_info_cap_header * +vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) +{ + if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { + return NULL; + } + + return vfio_get_cap((void *)info, info->cap_offset, id); +} + +struct vfio_info_cap_header * +vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id) +{ + if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) { + return NULL; + } + + return vfio_get_cap((void *)info, info->cap_offset, id); +} + +bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, + unsigned int *avail) +{ + struct vfio_info_cap_header *hdr; + struct vfio_iommu_type1_info_dma_avail *cap; + + /* If the capability cannot be found, assume no DMA limiting */ + hdr = vfio_get_iommu_type1_info_cap(info, + VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL); + if (hdr == NULL) { + return false; + } + + if (avail != NULL) { + cap = (void *) hdr; + *avail = cap->avail; + } + + return true; +} + static int vfio_setup_region_sparse_mmaps(VFIORegion *region, struct vfio_region_info *info) { @@ -924,6 +1270,18 @@ int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, return 0; } +static void vfio_subregion_unmap(VFIORegion *region, int index) +{ + trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem), + region->mmaps[index].offset, + region->mmaps[index].offset + + region->mmaps[index].size - 1); + memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem); + munmap(region->mmaps[index].mmap, region->mmaps[index].size); + object_unparent(OBJECT(®ion->mmaps[index].mem)); + region->mmaps[index].mmap = NULL; +} + int vfio_region_mmap(VFIORegion *region) { int i, prot = 0; @@ -954,10 +1312,7 @@ int vfio_region_mmap(VFIORegion *region) region->mmaps[i].mmap = NULL; for (i--; i >= 0; i--) { - memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); - munmap(region->mmaps[i].mmap, region->mmaps[i].size); - object_unparent(OBJECT(®ion->mmaps[i].mem)); - region->mmaps[i].mmap = NULL; + vfio_subregion_unmap(region, i); } return ret; @@ -982,6 +1337,21 @@ int vfio_region_mmap(VFIORegion *region) return 0; } +void vfio_region_unmap(VFIORegion *region) +{ + int i; + + if (!region->mem) { + return; + } + + for (i = 0; i < region->nr_mmaps; i++) { + if (region->mmaps[i].mmap) { + vfio_subregion_unmap(region, i); + } + } +} + void vfio_region_exit(VFIORegion *region) { int i; @@ -1204,6 +1574,75 @@ static int vfio_init_container(VFIOContainer *container, int group_fd, return 0; } +static int vfio_get_iommu_info(VFIOContainer *container, + struct vfio_iommu_type1_info **info) +{ + + size_t argsz = sizeof(struct vfio_iommu_type1_info); + + *info = g_new0(struct vfio_iommu_type1_info, 1); +again: + (*info)->argsz = argsz; + + if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { + g_free(*info); + *info = NULL; + return -errno; + } + + if (((*info)->argsz > argsz)) { + argsz = (*info)->argsz; + *info = g_realloc(*info, argsz); + goto again; + } + + return 0; +} + +static struct vfio_info_cap_header * +vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) +{ + struct vfio_info_cap_header *hdr; + void *ptr = info; + + if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { + return NULL; + } + + for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { + if (hdr->id == id) { + return hdr; + } + } + + return NULL; +} + +static void vfio_get_iommu_info_migration(VFIOContainer *container, + struct vfio_iommu_type1_info *info) +{ + struct vfio_info_cap_header *hdr; + struct vfio_iommu_type1_info_cap_migration *cap_mig; + + hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); + if (!hdr) { + return; + } + + cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, + header); + + /* + * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of + * TARGET_PAGE_SIZE to mark those dirty. + */ + if (cap_mig->pgsize_bitmap & TARGET_PAGE_SIZE) { + container->dirty_pages_supported = true; + container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; + container->dirty_pgsizes = cap_mig->pgsize_bitmap; + } +} + static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, Error **errp) { @@ -1273,6 +1712,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, container->space = space; container->fd = fd; container->error = NULL; + container->dirty_pages_supported = false; QLIST_INIT(&container->giommu_list); QLIST_INIT(&container->hostwin_list); @@ -1285,7 +1725,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, case VFIO_TYPE1v2_IOMMU: case VFIO_TYPE1_IOMMU: { - struct vfio_iommu_type1_info info; + struct vfio_iommu_type1_info *info; /* * FIXME: This assumes that a Type1 IOMMU can map any 64-bit @@ -1294,15 +1734,19 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, * existing Type1 IOMMUs generally support any IOVA we're * going to actually try in practice. */ - info.argsz = sizeof(info); - ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info); - /* Ignore errors */ - if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) { + ret = vfio_get_iommu_info(container, &info); + + if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) { /* Assume 4k IOVA page size */ - info.iova_pgsizes = 4096; + info->iova_pgsizes = 4096; + } + vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes); + container->pgsizes = info->iova_pgsizes; + + if (!ret) { + vfio_get_iommu_info_migration(container, info); } - vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes); - container->pgsizes = info.iova_pgsizes; + g_free(info); break; } case VFIO_SPAPR_TCE_v2_IOMMU: diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build index 37efa74018..da9af297a0 100644 --- a/hw/vfio/meson.build +++ b/hw/vfio/meson.build @@ -2,6 +2,7 @@ vfio_ss = ss.source_set() vfio_ss.add(files( 'common.c', 'spapr.c', + 'migration.c', )) vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files( 'display.c', diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c new file mode 100644 index 0000000000..3ce285ea39 --- /dev/null +++ b/hw/vfio/migration.c @@ -0,0 +1,933 @@ +/* + * Migration support for VFIO devices + * + * Copyright NVIDIA, Inc. 2020 + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/cutils.h" +#include <linux/vfio.h> +#include <sys/ioctl.h> + +#include "sysemu/runstate.h" +#include "hw/vfio/vfio-common.h" +#include "cpu.h" +#include "migration/migration.h" +#include "migration/vmstate.h" +#include "migration/qemu-file.h" +#include "migration/register.h" +#include "migration/blocker.h" +#include "migration/misc.h" +#include "qapi/error.h" +#include "exec/ramlist.h" +#include "exec/ram_addr.h" +#include "pci.h" +#include "trace.h" +#include "hw/hw.h" + +/* + * Flags to be used as unique delimiters for VFIO devices in the migration + * stream. These flags are composed as: + * 0xffffffff => MSB 32-bit all 1s + * 0xef10 => Magic ID, represents emulated (virtual) function IO + * 0x0000 => 16-bits reserved for flags + * + * The beginning of state information is marked by _DEV_CONFIG_STATE, + * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a + * certain state information is marked by _END_OF_STATE. + */ +#define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL) +#define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL) +#define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL) +#define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL) + +static int64_t bytes_transferred; + +static inline int vfio_mig_access(VFIODevice *vbasedev, void *val, int count, + off_t off, bool iswrite) +{ + int ret; + + ret = iswrite ? pwrite(vbasedev->fd, val, count, off) : + pread(vbasedev->fd, val, count, off); + if (ret < count) { + error_report("vfio_mig_%s %d byte %s: failed at offset 0x%" + HWADDR_PRIx", err: %s", iswrite ? "write" : "read", count, + vbasedev->name, off, strerror(errno)); + return (ret < 0) ? ret : -EINVAL; + } + return 0; +} + +static int vfio_mig_rw(VFIODevice *vbasedev, __u8 *buf, size_t count, + off_t off, bool iswrite) +{ + int ret, done = 0; + __u8 *tbuf = buf; + + while (count) { + int bytes = 0; + + if (count >= 8 && !(off % 8)) { + bytes = 8; + } else if (count >= 4 && !(off % 4)) { + bytes = 4; + } else if (count >= 2 && !(off % 2)) { + bytes = 2; + } else { + bytes = 1; + } + + ret = vfio_mig_access(vbasedev, tbuf, bytes, off, iswrite); + if (ret) { + return ret; + } + + count -= bytes; + done += bytes; + off += bytes; + tbuf += bytes; + } + return done; +} + +#define vfio_mig_read(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, false) +#define vfio_mig_write(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, true) + +#define VFIO_MIG_STRUCT_OFFSET(f) \ + offsetof(struct vfio_device_migration_info, f) +/* + * Change the device_state register for device @vbasedev. Bits set in @mask + * are preserved, bits set in @value are set, and bits not set in either @mask + * or @value are cleared in device_state. If the register cannot be accessed, + * the resulting state would be invalid, or the device enters an error state, + * an error is returned. + */ + +static int vfio_migration_set_state(VFIODevice *vbasedev, uint32_t mask, + uint32_t value) +{ + VFIOMigration *migration = vbasedev->migration; + VFIORegion *region = &migration->region; + off_t dev_state_off = region->fd_offset + + VFIO_MIG_STRUCT_OFFSET(device_state); + uint32_t device_state; + int ret; + + ret = vfio_mig_read(vbasedev, &device_state, sizeof(device_state), + dev_state_off); + if (ret < 0) { + return ret; + } + + device_state = (device_state & mask) | value; + + if (!VFIO_DEVICE_STATE_VALID(device_state)) { + return -EINVAL; + } + + ret = vfio_mig_write(vbasedev, &device_state, sizeof(device_state), + dev_state_off); + if (ret < 0) { + int rret; + + rret = vfio_mig_read(vbasedev, &device_state, sizeof(device_state), + dev_state_off); + + if ((rret < 0) || (VFIO_DEVICE_STATE_IS_ERROR(device_state))) { + hw_error("%s: Device in error state 0x%x", vbasedev->name, + device_state); + return rret ? rret : -EIO; + } + return ret; + } + + migration->device_state = device_state; + trace_vfio_migration_set_state(vbasedev->name, device_state); + return 0; +} + +static void *get_data_section_size(VFIORegion *region, uint64_t data_offset, + uint64_t data_size, uint64_t *size) +{ + void *ptr = NULL; + uint64_t limit = 0; + int i; + + if (!region->mmaps) { + if (size) { + *size = MIN(data_size, region->size - data_offset); + } + return ptr; + } + + for (i = 0; i < region->nr_mmaps; i++) { + VFIOMmap *map = region->mmaps + i; + + if ((data_offset >= map->offset) && + (data_offset < map->offset + map->size)) { + + /* check if data_offset is within sparse mmap areas */ + ptr = map->mmap + data_offset - map->offset; + if (size) { + *size = MIN(data_size, map->offset + map->size - data_offset); + } + break; + } else if ((data_offset < map->offset) && + (!limit || limit > map->offset)) { + /* + * data_offset is not within sparse mmap areas, find size of + * non-mapped area. Check through all list since region->mmaps list + * is not sorted. + */ + limit = map->offset; + } + } + + if (!ptr && size) { + *size = limit ? MIN(data_size, limit - data_offset) : data_size; + } + return ptr; +} + +static int vfio_save_buffer(QEMUFile *f, VFIODevice *vbasedev, uint64_t *size) +{ + VFIOMigration *migration = vbasedev->migration; + VFIORegion *region = &migration->region; + uint64_t data_offset = 0, data_size = 0, sz; + int ret; + + ret = vfio_mig_read(vbasedev, &data_offset, sizeof(data_offset), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_offset)); + if (ret < 0) { + return ret; + } + + ret = vfio_mig_read(vbasedev, &data_size, sizeof(data_size), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_size)); + if (ret < 0) { + return ret; + } + + trace_vfio_save_buffer(vbasedev->name, data_offset, data_size, + migration->pending_bytes); + + qemu_put_be64(f, data_size); + sz = data_size; + + while (sz) { + void *buf; + uint64_t sec_size; + bool buf_allocated = false; + + buf = get_data_section_size(region, data_offset, sz, &sec_size); + + if (!buf) { + buf = g_try_malloc(sec_size); + if (!buf) { + error_report("%s: Error allocating buffer ", __func__); + return -ENOMEM; + } + buf_allocated = true; + + ret = vfio_mig_read(vbasedev, buf, sec_size, + region->fd_offset + data_offset); + if (ret < 0) { + g_free(buf); + return ret; + } + } + + qemu_put_buffer(f, buf, sec_size); + + if (buf_allocated) { + g_free(buf); + } + sz -= sec_size; + data_offset += sec_size; + } + + ret = qemu_file_get_error(f); + + if (!ret && size) { + *size = data_size; + } + + bytes_transferred += data_size; + return ret; +} + +static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev, + uint64_t data_size) +{ + VFIORegion *region = &vbasedev->migration->region; + uint64_t data_offset = 0, size, report_size; + int ret; + + do { + ret = vfio_mig_read(vbasedev, &data_offset, sizeof(data_offset), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_offset)); + if (ret < 0) { + return ret; + } + + if (data_offset + data_size > region->size) { + /* + * If data_size is greater than the data section of migration region + * then iterate the write buffer operation. This case can occur if + * size of migration region at destination is smaller than size of + * migration region at source. + */ + report_size = size = region->size - data_offset; + data_size -= size; + } else { + report_size = size = data_size; + data_size = 0; + } + + trace_vfio_load_state_device_data(vbasedev->name, data_offset, size); + + while (size) { + void *buf; + uint64_t sec_size; + bool buf_alloc = false; + + buf = get_data_section_size(region, data_offset, size, &sec_size); + + if (!buf) { + buf = g_try_malloc(sec_size); + if (!buf) { + error_report("%s: Error allocating buffer ", __func__); + return -ENOMEM; + } + buf_alloc = true; + } + + qemu_get_buffer(f, buf, sec_size); + + if (buf_alloc) { + ret = vfio_mig_write(vbasedev, buf, sec_size, + region->fd_offset + data_offset); + g_free(buf); + + if (ret < 0) { + return ret; + } + } + size -= sec_size; + data_offset += sec_size; + } + + ret = vfio_mig_write(vbasedev, &report_size, sizeof(report_size), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_size)); + if (ret < 0) { + return ret; + } + } while (data_size); + + return 0; +} + +static int vfio_update_pending(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + VFIORegion *region = &migration->region; + uint64_t pending_bytes = 0; + int ret; + + ret = vfio_mig_read(vbasedev, &pending_bytes, sizeof(pending_bytes), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(pending_bytes)); + if (ret < 0) { + migration->pending_bytes = 0; + return ret; + } + + migration->pending_bytes = pending_bytes; + trace_vfio_update_pending(vbasedev->name, pending_bytes); + return 0; +} + +static int vfio_save_device_config_state(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_STATE); + + if (vbasedev->ops && vbasedev->ops->vfio_save_config) { + vbasedev->ops->vfio_save_config(vbasedev, f); + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + trace_vfio_save_device_config_state(vbasedev->name); + + return qemu_file_get_error(f); +} + +static int vfio_load_device_config_state(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + uint64_t data; + + if (vbasedev->ops && vbasedev->ops->vfio_load_config) { + int ret; + + ret = vbasedev->ops->vfio_load_config(vbasedev, f); + if (ret) { + error_report("%s: Failed to load device config space", + vbasedev->name); + return ret; + } + } + + data = qemu_get_be64(f); + if (data != VFIO_MIG_FLAG_END_OF_STATE) { + error_report("%s: Failed loading device config space, " + "end flag incorrect 0x%"PRIx64, vbasedev->name, data); + return -EINVAL; + } + + trace_vfio_load_device_config_state(vbasedev->name); + return qemu_file_get_error(f); +} + +static int vfio_set_dirty_page_tracking(VFIODevice *vbasedev, bool start) +{ + int ret; + VFIOMigration *migration = vbasedev->migration; + VFIOContainer *container = vbasedev->group->container; + struct vfio_iommu_type1_dirty_bitmap dirty = { + .argsz = sizeof(dirty), + }; + + if (start) { + if (migration->device_state & VFIO_DEVICE_STATE_SAVING) { + dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; + } else { + return -EINVAL; + } + } else { + dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP; + } + + ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); + if (ret) { + error_report("Failed to set dirty tracking flag 0x%x errno: %d", + dirty.flags, errno); + return -errno; + } + return ret; +} + +static void vfio_migration_cleanup(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + + vfio_set_dirty_page_tracking(vbasedev, false); + + if (migration->region.mmaps) { + vfio_region_unmap(&migration->region); + } +} + +/* ---------------------------------------------------------------------- */ + +static int vfio_save_setup(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret; + + trace_vfio_save_setup(vbasedev->name); + + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_SETUP_STATE); + + if (migration->region.mmaps) { + /* + * Calling vfio_region_mmap() from migration thread. Memory API called + * from this function require locking the iothread when called from + * outside the main loop thread. + */ + qemu_mutex_lock_iothread(); + ret = vfio_region_mmap(&migration->region); + qemu_mutex_unlock_iothread(); + if (ret) { + error_report("%s: Failed to mmap VFIO migration region: %s", + vbasedev->name, strerror(-ret)); + error_report("%s: Falling back to slow path", vbasedev->name); + } + } + + ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_MASK, + VFIO_DEVICE_STATE_SAVING); + if (ret) { + error_report("%s: Failed to set state SAVING", vbasedev->name); + return ret; + } + + ret = vfio_set_dirty_page_tracking(vbasedev, true); + if (ret) { + return ret; + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + + return 0; +} + +static void vfio_save_cleanup(void *opaque) +{ + VFIODevice *vbasedev = opaque; + + vfio_migration_cleanup(vbasedev); + trace_vfio_save_cleanup(vbasedev->name); +} + +static void vfio_save_pending(QEMUFile *f, void *opaque, + uint64_t threshold_size, + uint64_t *res_precopy_only, + uint64_t *res_compatible, + uint64_t *res_postcopy_only) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret; + + ret = vfio_update_pending(vbasedev); + if (ret) { + return; + } + + *res_precopy_only += migration->pending_bytes; + + trace_vfio_save_pending(vbasedev->name, *res_precopy_only, + *res_postcopy_only, *res_compatible); +} + +static int vfio_save_iterate(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + uint64_t data_size; + int ret; + + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); + + if (migration->pending_bytes == 0) { + ret = vfio_update_pending(vbasedev); + if (ret) { + return ret; + } + + if (migration->pending_bytes == 0) { + qemu_put_be64(f, 0); + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + /* indicates data finished, goto complete phase */ + return 1; + } + } + + ret = vfio_save_buffer(f, vbasedev, &data_size); + if (ret) { + error_report("%s: vfio_save_buffer failed %s", vbasedev->name, + strerror(errno)); + return ret; + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + + /* + * Reset pending_bytes as .save_live_pending is not called during savevm or + * snapshot case, in such case vfio_update_pending() at the start of this + * function updates pending_bytes. + */ + migration->pending_bytes = 0; + trace_vfio_save_iterate(vbasedev->name, data_size); + return 0; +} + +static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + uint64_t data_size; + int ret; + + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_RUNNING, + VFIO_DEVICE_STATE_SAVING); + if (ret) { + error_report("%s: Failed to set state STOP and SAVING", + vbasedev->name); + return ret; + } + + ret = vfio_save_device_config_state(f, opaque); + if (ret) { + return ret; + } + + ret = vfio_update_pending(vbasedev); + if (ret) { + return ret; + } + + while (migration->pending_bytes > 0) { + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); + ret = vfio_save_buffer(f, vbasedev, &data_size); + if (ret < 0) { + error_report("%s: Failed to save buffer", vbasedev->name); + return ret; + } + + if (data_size == 0) { + break; + } + + ret = vfio_update_pending(vbasedev); + if (ret) { + return ret; + } + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_SAVING, 0); + if (ret) { + error_report("%s: Failed to set state STOPPED", vbasedev->name); + return ret; + } + + trace_vfio_save_complete_precopy(vbasedev->name); + return ret; +} + +static int vfio_load_setup(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret = 0; + + if (migration->region.mmaps) { + ret = vfio_region_mmap(&migration->region); + if (ret) { + error_report("%s: Failed to mmap VFIO migration region %d: %s", + vbasedev->name, migration->region.nr, + strerror(-ret)); + error_report("%s: Falling back to slow path", vbasedev->name); + } + } + + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_MASK, + VFIO_DEVICE_STATE_RESUMING); + if (ret) { + error_report("%s: Failed to set state RESUMING", vbasedev->name); + if (migration->region.mmaps) { + vfio_region_unmap(&migration->region); + } + } + return ret; +} + +static int vfio_load_cleanup(void *opaque) +{ + VFIODevice *vbasedev = opaque; + + vfio_migration_cleanup(vbasedev); + trace_vfio_load_cleanup(vbasedev->name); + return 0; +} + +static int vfio_load_state(QEMUFile *f, void *opaque, int version_id) +{ + VFIODevice *vbasedev = opaque; + int ret = 0; + uint64_t data; + + data = qemu_get_be64(f); + while (data != VFIO_MIG_FLAG_END_OF_STATE) { + + trace_vfio_load_state(vbasedev->name, data); + + switch (data) { + case VFIO_MIG_FLAG_DEV_CONFIG_STATE: + { + ret = vfio_load_device_config_state(f, opaque); + if (ret) { + return ret; + } + break; + } + case VFIO_MIG_FLAG_DEV_SETUP_STATE: + { + data = qemu_get_be64(f); + if (data == VFIO_MIG_FLAG_END_OF_STATE) { + return ret; + } else { + error_report("%s: SETUP STATE: EOS not found 0x%"PRIx64, + vbasedev->name, data); + return -EINVAL; + } + break; + } + case VFIO_MIG_FLAG_DEV_DATA_STATE: + { + uint64_t data_size = qemu_get_be64(f); + + if (data_size) { + ret = vfio_load_buffer(f, vbasedev, data_size); + if (ret < 0) { + return ret; + } + } + break; + } + default: + error_report("%s: Unknown tag 0x%"PRIx64, vbasedev->name, data); + return -EINVAL; + } + + data = qemu_get_be64(f); + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + } + return ret; +} + +static SaveVMHandlers savevm_vfio_handlers = { + .save_setup = vfio_save_setup, + .save_cleanup = vfio_save_cleanup, + .save_live_pending = vfio_save_pending, + .save_live_iterate = vfio_save_iterate, + .save_live_complete_precopy = vfio_save_complete_precopy, + .load_setup = vfio_load_setup, + .load_cleanup = vfio_load_cleanup, + .load_state = vfio_load_state, +}; + +/* ---------------------------------------------------------------------- */ + +static void vfio_vmstate_change(void *opaque, int running, RunState state) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + uint32_t value, mask; + int ret; + + if (vbasedev->migration->vm_running == running) { + return; + } + + if (running) { + /* + * Here device state can have one of _SAVING, _RESUMING or _STOP bit. + * Transition from _SAVING to _RUNNING can happen if there is migration + * failure, in that case clear _SAVING bit. + * Transition from _RESUMING to _RUNNING occurs during resuming + * phase, in that case clear _RESUMING bit. + * In both the above cases, set _RUNNING bit. + */ + mask = ~VFIO_DEVICE_STATE_MASK; + value = VFIO_DEVICE_STATE_RUNNING; + } else { + /* + * Here device state could be either _RUNNING or _SAVING|_RUNNING. Reset + * _RUNNING bit + */ + mask = ~VFIO_DEVICE_STATE_RUNNING; + value = 0; + } + + ret = vfio_migration_set_state(vbasedev, mask, value); + if (ret) { + /* + * Migration should be aborted in this case, but vm_state_notify() + * currently does not support reporting failures. + */ + error_report("%s: Failed to set device state 0x%x", vbasedev->name, + (migration->device_state & mask) | value); + qemu_file_set_error(migrate_get_current()->to_dst_file, ret); + } + vbasedev->migration->vm_running = running; + trace_vfio_vmstate_change(vbasedev->name, running, RunState_str(state), + (migration->device_state & mask) | value); +} + +static void vfio_migration_state_notifier(Notifier *notifier, void *data) +{ + MigrationState *s = data; + VFIOMigration *migration = container_of(notifier, VFIOMigration, + migration_state); + VFIODevice *vbasedev = migration->vbasedev; + int ret; + + trace_vfio_migration_state_notifier(vbasedev->name, + MigrationStatus_str(s->state)); + + switch (s->state) { + case MIGRATION_STATUS_CANCELLING: + case MIGRATION_STATUS_CANCELLED: + case MIGRATION_STATUS_FAILED: + bytes_transferred = 0; + ret = vfio_migration_set_state(vbasedev, + ~(VFIO_DEVICE_STATE_SAVING | VFIO_DEVICE_STATE_RESUMING), + VFIO_DEVICE_STATE_RUNNING); + if (ret) { + error_report("%s: Failed to set state RUNNING", vbasedev->name); + } + } +} + +static void vfio_migration_exit(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + + vfio_region_exit(&migration->region); + vfio_region_finalize(&migration->region); + g_free(vbasedev->migration); + vbasedev->migration = NULL; +} + +static int vfio_migration_init(VFIODevice *vbasedev, + struct vfio_region_info *info) +{ + int ret; + Object *obj; + VFIOMigration *migration; + char id[256] = ""; + g_autofree char *path = NULL, *oid = NULL; + + if (!vbasedev->ops->vfio_get_object) { + return -EINVAL; + } + + obj = vbasedev->ops->vfio_get_object(vbasedev); + if (!obj) { + return -EINVAL; + } + + vbasedev->migration = g_new0(VFIOMigration, 1); + + ret = vfio_region_setup(obj, vbasedev, &vbasedev->migration->region, + info->index, "migration"); + if (ret) { + error_report("%s: Failed to setup VFIO migration region %d: %s", + vbasedev->name, info->index, strerror(-ret)); + goto err; + } + + if (!vbasedev->migration->region.size) { + error_report("%s: Invalid zero-sized VFIO migration region %d", + vbasedev->name, info->index); + ret = -EINVAL; + goto err; + } + + migration = vbasedev->migration; + migration->vbasedev = vbasedev; + + oid = vmstate_if_get_id(VMSTATE_IF(DEVICE(obj))); + if (oid) { + path = g_strdup_printf("%s/vfio", oid); + } else { + path = g_strdup("vfio"); + } + strpadcpy(id, sizeof(id), path, '\0'); + + register_savevm_live(id, VMSTATE_INSTANCE_ID_ANY, 1, &savevm_vfio_handlers, + vbasedev); + + migration->vm_state = qemu_add_vm_change_state_handler(vfio_vmstate_change, + vbasedev); + migration->migration_state.notify = vfio_migration_state_notifier; + add_migration_state_change_notifier(&migration->migration_state); + return 0; + +err: + vfio_migration_exit(vbasedev); + return ret; +} + +/* ---------------------------------------------------------------------- */ + +int64_t vfio_mig_bytes_transferred(void) +{ + return bytes_transferred; +} + +int vfio_migration_probe(VFIODevice *vbasedev, Error **errp) +{ + VFIOContainer *container = vbasedev->group->container; + struct vfio_region_info *info = NULL; + Error *local_err = NULL; + int ret = -ENOTSUP; + + if (!container->dirty_pages_supported) { + goto add_blocker; + } + + ret = vfio_get_dev_region_info(vbasedev, VFIO_REGION_TYPE_MIGRATION, + VFIO_REGION_SUBTYPE_MIGRATION, &info); + if (ret) { + goto add_blocker; + } + + ret = vfio_migration_init(vbasedev, info); + if (ret) { + goto add_blocker; + } + + g_free(info); + trace_vfio_migration_probe(vbasedev->name, info->index); + return 0; + +add_blocker: + error_setg(&vbasedev->migration_blocker, + "VFIO device doesn't support migration"); + g_free(info); + + ret = migrate_add_blocker(vbasedev->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(vbasedev->migration_blocker); + vbasedev->migration_blocker = NULL; + } + return ret; +} + +void vfio_migration_finalize(VFIODevice *vbasedev) +{ + if (vbasedev->migration) { + VFIOMigration *migration = vbasedev->migration; + + remove_migration_state_change_notifier(&migration->migration_state); + qemu_del_vm_change_state_handler(migration->vm_state); + vfio_migration_exit(vbasedev); + } + + if (vbasedev->migration_blocker) { + migrate_del_blocker(vbasedev->migration_blocker); + error_free(vbasedev->migration_blocker); + vbasedev->migration_blocker = NULL; + } +} diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 0d83eb0e47..58c0ce8971 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -41,6 +41,7 @@ #include "trace.h" #include "qapi/error.h" #include "migration/blocker.h" +#include "migration/qemu-file.h" #define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug" @@ -2394,10 +2395,68 @@ static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev) } } +static Object *vfio_pci_get_object(VFIODevice *vbasedev) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + + return OBJECT(vdev); +} + +static bool vfio_msix_present(void *opaque, int version_id) +{ + PCIDevice *pdev = opaque; + + return msix_present(pdev); +} + +const VMStateDescription vmstate_vfio_pci_config = { + .name = "VFIOPCIDevice", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice), + VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, vfio_msix_present), + VMSTATE_END_OF_LIST() + } +}; + +static void vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + + vmstate_save_state(f, &vmstate_vfio_pci_config, vdev, NULL); +} + +static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + PCIDevice *pdev = &vdev->pdev; + int ret; + + ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1); + if (ret) { + return ret; + } + + vfio_pci_write_config(pdev, PCI_COMMAND, + pci_get_word(pdev->config + PCI_COMMAND), 2); + + if (msi_enabled(pdev)) { + vfio_msi_enable(vdev); + } else if (msix_enabled(pdev)) { + vfio_msix_enable(vdev); + } + + return ret; +} + static VFIODeviceOps vfio_pci_ops = { .vfio_compute_needs_reset = vfio_pci_compute_needs_reset, .vfio_hot_reset_multi = vfio_pci_hot_reset_multi, .vfio_eoi = vfio_intx_eoi, + .vfio_get_object = vfio_pci_get_object, + .vfio_save_config = vfio_pci_save_config, + .vfio_load_config = vfio_pci_load_config, }; int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp) @@ -2732,17 +2791,6 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) return; } - if (!pdev->failover_pair_id) { - error_setg(&vdev->migration_blocker, - "VFIO device doesn't support migration"); - ret = migrate_add_blocker(vdev->migration_blocker, errp); - if (ret) { - error_free(vdev->migration_blocker); - vdev->migration_blocker = NULL; - return; - } - } - vdev->vbasedev.name = g_path_get_basename(vdev->vbasedev.sysfsdev); vdev->vbasedev.ops = &vfio_pci_ops; vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI; @@ -3010,6 +3058,13 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } } + if (!pdev->failover_pair_id) { + ret = vfio_migration_probe(&vdev->vbasedev, errp); + if (ret) { + error_report("%s: Migration disabled", vdev->vbasedev.name); + } + } + vfio_register_err_notifier(vdev); vfio_register_req_notifier(vdev); vfio_setup_resetfn_quirk(vdev); @@ -3024,11 +3079,6 @@ out_teardown: vfio_bars_exit(vdev); error: error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); - if (vdev->migration_blocker) { - migrate_del_blocker(vdev->migration_blocker); - error_free(vdev->migration_blocker); - vdev->migration_blocker = NULL; - } } static void vfio_instance_finalize(Object *obj) @@ -3040,10 +3090,6 @@ static void vfio_instance_finalize(Object *obj) vfio_bars_finalize(vdev); g_free(vdev->emulated_config_bits); g_free(vdev->rom); - if (vdev->migration_blocker) { - migrate_del_blocker(vdev->migration_blocker); - error_free(vdev->migration_blocker); - } /* * XXX Leaking igd_opregion is not an oversight, we can't remove the * fw_cfg entry therefore leaking this allocation seems like the safest @@ -3071,6 +3117,7 @@ static void vfio_exitfn(PCIDevice *pdev) } vfio_teardown_msi(vdev); vfio_bars_exit(vdev); + vfio_migration_finalize(&vdev->vbasedev); } static void vfio_pci_reset(DeviceState *dev) diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index bce71a9ac9..1574ef983f 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -172,7 +172,6 @@ struct VFIOPCIDevice { bool no_vfio_ioeventfd; bool enable_ramfb; VFIODisplay *dpy; - Error *migration_blocker; Notifier irqchip_change_notifier; }; diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c index 869ed2c39d..cc3f66f7e4 100644 --- a/hw/vfio/platform.c +++ b/hw/vfio/platform.c @@ -166,7 +166,7 @@ static void vfio_intp_mmap_enable(void *opaque) VFIOINTp *tmp; VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque; - qemu_mutex_lock(&vdev->intp_mutex); + QEMU_LOCK_GUARD(&vdev->intp_mutex); QLIST_FOREACH(tmp, &vdev->intp_list, next) { if (tmp->state == VFIO_IRQ_ACTIVE) { trace_vfio_platform_intp_mmap_enable(tmp->pin); @@ -174,12 +174,10 @@ static void vfio_intp_mmap_enable(void *opaque) timer_mod(vdev->mmap_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->mmap_timeout); - qemu_mutex_unlock(&vdev->intp_mutex); return; } } vfio_mmap_set_enabled(vdev, true); - qemu_mutex_unlock(&vdev->intp_mutex); } /** @@ -289,7 +287,7 @@ static void vfio_platform_eoi(VFIODevice *vbasedev) VFIOPlatformDevice *vdev = container_of(vbasedev, VFIOPlatformDevice, vbasedev); - qemu_mutex_lock(&vdev->intp_mutex); + QEMU_LOCK_GUARD(&vdev->intp_mutex); QLIST_FOREACH(intp, &vdev->intp_list, next) { if (intp->state == VFIO_IRQ_ACTIVE) { trace_vfio_platform_eoi(intp->pin, @@ -314,7 +312,6 @@ static void vfio_platform_eoi(VFIODevice *vbasedev) vfio_intp_inject_pending_lockheld(intp); QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext); } - qemu_mutex_unlock(&vdev->intp_mutex); } /** diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 93a0bc2522..c0e75f24b7 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -113,6 +113,7 @@ vfio_region_mmap(const char *name, unsigned long offset, unsigned long end) "Reg vfio_region_exit(const char *name, int index) "Device %s, region %d" vfio_region_finalize(const char *name, int index) "Device %s, region %d" vfio_region_mmaps_set_enabled(const char *name, bool enabled) "Region %s mmaps enabled: %d" +vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Region %s unmap [0x%lx - 0x%lx]" vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries" vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%0x8" @@ -144,3 +145,23 @@ vfio_display_edid_link_up(void) "" vfio_display_edid_link_down(void) "" vfio_display_edid_update(uint32_t prefx, uint32_t prefy) "%ux%u" vfio_display_edid_write_error(void) "" + +# migration.c +vfio_migration_probe(const char *name, uint32_t index) " (%s) Region %d" +vfio_migration_set_state(const char *name, uint32_t state) " (%s) state %d" +vfio_vmstate_change(const char *name, int running, const char *reason, uint32_t dev_state) " (%s) running %d reason %s device state %d" +vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s" +vfio_save_setup(const char *name) " (%s)" +vfio_save_cleanup(const char *name) " (%s)" +vfio_save_buffer(const char *name, uint64_t data_offset, uint64_t data_size, uint64_t pending) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64" pending 0x%"PRIx64 +vfio_update_pending(const char *name, uint64_t pending) " (%s) pending 0x%"PRIx64 +vfio_save_device_config_state(const char *name) " (%s)" +vfio_save_pending(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t compatible) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" compatible 0x%"PRIx64 +vfio_save_iterate(const char *name, int data_size) " (%s) data_size %d" +vfio_save_complete_precopy(const char *name) " (%s)" +vfio_load_device_config_state(const char *name) " (%s)" +vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64 +vfio_load_state_device_data(const char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64 +vfio_load_cleanup(const char *name) " (%s)" +vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64 +vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 |