diff options
Diffstat (limited to 'hw')
-rw-r--r-- | hw/arm/virt-acpi-build.c | 5 | ||||
-rw-r--r-- | hw/block/meson.build | 4 | ||||
-rw-r--r-- | hw/block/vhost-user-blk.c | 39 | ||||
-rw-r--r-- | hw/block/virtio-blk-common.c | 39 | ||||
-rw-r--r-- | hw/block/virtio-blk.c | 28 | ||||
-rw-r--r-- | hw/i386/acpi-build.c | 302 | ||||
-rw-r--r-- | hw/i386/intel_iommu.c | 5 | ||||
-rw-r--r-- | hw/mem/cxl_type3.c | 14 | ||||
-rw-r--r-- | hw/net/virtio-net.c | 9 | ||||
-rw-r--r-- | hw/scsi/vhost-scsi.c | 4 | ||||
-rw-r--r-- | hw/scsi/vhost-user-scsi.c | 2 | ||||
-rw-r--r-- | hw/smbios/smbios.c | 63 | ||||
-rw-r--r-- | hw/virtio/Kconfig | 5 | ||||
-rw-r--r-- | hw/virtio/meson.build | 4 | ||||
-rw-r--r-- | hw/virtio/trace-events | 9 | ||||
-rw-r--r-- | hw/virtio/vhost-user-fs.c | 9 | ||||
-rw-r--r-- | hw/virtio/vhost-user-gpio-pci.c | 69 | ||||
-rw-r--r-- | hw/virtio/vhost-user-gpio.c | 411 | ||||
-rw-r--r-- | hw/virtio/vhost-user-i2c.c | 10 | ||||
-rw-r--r-- | hw/virtio/vhost-user-rng.c | 10 | ||||
-rw-r--r-- | hw/virtio/vhost-user-vsock.c | 8 | ||||
-rw-r--r-- | hw/virtio/vhost-user.c | 16 | ||||
-rw-r--r-- | hw/virtio/vhost-vsock-common.c | 3 | ||||
-rw-r--r-- | hw/virtio/vhost-vsock.c | 8 | ||||
-rw-r--r-- | hw/virtio/vhost.c | 6 | ||||
-rw-r--r-- | hw/virtio/virtio-stub.c | 42 | ||||
-rw-r--r-- | hw/virtio/virtio.c | 1049 |
27 files changed, 1940 insertions, 233 deletions
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 9b3aee01bf..13c6e3e468 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -592,8 +592,7 @@ build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) acpi_table_begin(&table, table_data); /* CntControlBase Physical Address */ - /* FIXME: invalid value, should be 0xFFFFFFFFFFFFFFFF if not impl. ? */ - build_append_int_noprefix(table_data, 0, 8); + build_append_int_noprefix(table_data, 0xFFFFFFFFFFFFFFFF, 8); build_append_int_noprefix(table_data, 0, 4); /* Reserved */ /* * FIXME: clarify comment: @@ -618,7 +617,7 @@ build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) /* Non-Secure EL2 timer Flags */ build_append_int_noprefix(table_data, irqflags, 4); /* CntReadBase Physical address */ - build_append_int_noprefix(table_data, 0, 8); + build_append_int_noprefix(table_data, 0xFFFFFFFFFFFFFFFF, 8); /* Platform Timer Count */ build_append_int_noprefix(table_data, 0, 4); /* Platform Timer Offset */ diff --git a/hw/block/meson.build b/hw/block/meson.build index 2389326112..1908abd45c 100644 --- a/hw/block/meson.build +++ b/hw/block/meson.build @@ -16,7 +16,7 @@ softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c')) softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c')) softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c')) -specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c')) -specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c')) +specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c', 'virtio-blk-common.c')) +specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c', 'virtio-blk-common.c')) subdir('dataplane') diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index 9117222456..84902dde17 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -23,6 +23,7 @@ #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" +#include "hw/virtio/virtio-blk-common.h" #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user-blk.h" #include "hw/virtio/virtio.h" @@ -63,7 +64,7 @@ static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config) /* Our num_queues overrides the device backend */ virtio_stw_p(vdev, &s->blkcfg.num_queues, s->num_queues); - memcpy(config, &s->blkcfg, sizeof(struct virtio_blk_config)); + memcpy(config, &s->blkcfg, vdev->config_len); } static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config) @@ -92,12 +93,12 @@ static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) { int ret; struct virtio_blk_config blkcfg; + VirtIODevice *vdev = dev->vdev; VHostUserBlk *s = VHOST_USER_BLK(dev->vdev); Error *local_err = NULL; ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg, - sizeof(struct virtio_blk_config), - &local_err); + vdev->config_len, &local_err); if (ret < 0) { error_report_err(local_err); return ret; @@ -106,7 +107,7 @@ static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) /* valid for resize only */ if (blkcfg.capacity != s->blkcfg.capacity) { s->blkcfg.capacity = blkcfg.capacity; - memcpy(dev->vdev->config, &s->blkcfg, sizeof(struct virtio_blk_config)); + memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len); virtio_notify_config(dev->vdev); } @@ -229,7 +230,7 @@ static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status) return; } - if (s->dev.started == should_start) { + if (vhost_dev_is_started(&s->dev) == should_start) { return; } @@ -259,12 +260,7 @@ static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev, virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE); virtio_add_feature(&features, VIRTIO_BLK_F_FLUSH); virtio_add_feature(&features, VIRTIO_BLK_F_RO); - virtio_add_feature(&features, VIRTIO_BLK_F_DISCARD); - virtio_add_feature(&features, VIRTIO_BLK_F_WRITE_ZEROES); - if (s->config_wce) { - virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE); - } if (s->num_queues > 1) { virtio_add_feature(&features, VIRTIO_BLK_F_MQ); } @@ -286,7 +282,7 @@ static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) return; } - if (s->dev.started) { + if (vhost_dev_is_started(&s->dev)) { return; } @@ -415,6 +411,12 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event) * the vhost migration code. If disconnect was caught there is an * option for the general vhost code to get the dev state without * knowing its type (in this case vhost-user). + * + * FIXME: this is sketchy to be reaching into vhost_dev + * now because we are forcing something that implies we + * have executed vhost_dev_stop() but that won't happen + * until vhost_user_blk_stop() gets called from the bh. + * Really this state check should be tracked locally. */ s->dev.started = false; } @@ -447,7 +449,7 @@ static int vhost_user_blk_realize_connect(VHostUserBlk *s, Error **errp) assert(s->connected); ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg, - sizeof(struct virtio_blk_config), errp); + s->parent_obj.config_len, errp); if (ret < 0) { qemu_chr_fe_disconnect(&s->chardev); vhost_dev_cleanup(&s->dev); @@ -462,6 +464,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) ERRP_GUARD(); VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserBlk *s = VHOST_USER_BLK(vdev); + size_t config_size; int retries; int i, ret; @@ -492,8 +495,9 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) return; } - virtio_init(vdev, VIRTIO_ID_BLOCK, - sizeof(struct virtio_blk_config)); + config_size = virtio_get_config_size(&virtio_blk_cfg_size_params, + vdev->host_features); + virtio_init(vdev, VIRTIO_ID_BLOCK, config_size); s->virtqs = g_new(VirtQueue *, s->num_queues); for (i = 0; i < s->num_queues; i++) { @@ -591,7 +595,12 @@ static Property vhost_user_blk_properties[] = { DEFINE_PROP_UINT16("num-queues", VHostUserBlk, num_queues, VHOST_USER_BLK_AUTO_NUM_QUEUES), DEFINE_PROP_UINT32("queue-size", VHostUserBlk, queue_size, 128), - DEFINE_PROP_BIT("config-wce", VHostUserBlk, config_wce, 0, true), + DEFINE_PROP_BIT64("config-wce", VHostUserBlk, parent_obj.host_features, + VIRTIO_BLK_F_CONFIG_WCE, true), + DEFINE_PROP_BIT64("discard", VHostUserBlk, parent_obj.host_features, + VIRTIO_BLK_F_DISCARD, true), + DEFINE_PROP_BIT64("write-zeroes", VHostUserBlk, parent_obj.host_features, + VIRTIO_BLK_F_WRITE_ZEROES, true), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/block/virtio-blk-common.c b/hw/block/virtio-blk-common.c new file mode 100644 index 0000000000..ac52d7c176 --- /dev/null +++ b/hw/block/virtio-blk-common.c @@ -0,0 +1,39 @@ +/* + * Virtio Block Device common helpers + * + * Copyright IBM, Corp. 2007 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "standard-headers/linux/virtio_blk.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-blk-common.h" + +/* Config size before the discard support (hide associated config fields) */ +#define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \ + max_discard_sectors) + +/* + * Starting from the discard feature, we can use this array to properly + * set the config size depending on the features enabled. + */ +static const VirtIOFeature feature_sizes[] = { + {.flags = 1ULL << VIRTIO_BLK_F_DISCARD, + .end = endof(struct virtio_blk_config, discard_sector_alignment)}, + {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, + .end = endof(struct virtio_blk_config, write_zeroes_may_unmap)}, + {} +}; + +const VirtIOConfigSizeParams virtio_blk_cfg_size_params = { + .min_size = VIRTIO_BLK_CFG_SIZE, + .max_size = sizeof(struct virtio_blk_config), + .feature_sizes = feature_sizes +}; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index e9ba752f6b..8131ec2dbc 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -32,31 +32,9 @@ #include "hw/virtio/virtio-bus.h" #include "migration/qemu-file-types.h" #include "hw/virtio/virtio-access.h" +#include "hw/virtio/virtio-blk-common.h" #include "qemu/coroutine.h" -/* Config size before the discard support (hide associated config fields) */ -#define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \ - max_discard_sectors) -/* - * Starting from the discard feature, we can use this array to properly - * set the config size depending on the features enabled. - */ -static const VirtIOFeature feature_sizes[] = { - {.flags = 1ULL << VIRTIO_BLK_F_DISCARD, - .end = endof(struct virtio_blk_config, discard_sector_alignment)}, - {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, - .end = endof(struct virtio_blk_config, write_zeroes_may_unmap)}, - {} -}; - -static void virtio_blk_set_config_size(VirtIOBlock *s, uint64_t host_features) -{ - s->config_size = MAX(VIRTIO_BLK_CFG_SIZE, - virtio_feature_get_config_size(feature_sizes, host_features)); - - assert(s->config_size <= sizeof(struct virtio_blk_config)); -} - static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, VirtIOBlockReq *req) { @@ -1204,8 +1182,8 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) return; } - virtio_blk_set_config_size(s, s->host_features); - + s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params, + s->host_features); virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size); s->blk = conf->conf.blk; diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 0355bd3dda..4f54b61904 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -374,6 +374,25 @@ build_facs(GArray *table_data) g_array_append_vals(table_data, reserved, 40); /* Reserved */ } +Aml *aml_pci_device_dsm(void) +{ + Aml *method; + + method = aml_method("_DSM", 4, AML_SERIALIZED); + { + Aml *params = aml_local(0); + Aml *pkg = aml_package(2); + aml_append(pkg, aml_name("BSEL")); + aml_append(pkg, aml_name("ASUN")); + aml_append(method, aml_store(pkg, params)); + aml_append(method, + aml_return(aml_call5("PDSM", aml_arg(0), aml_arg(1), + aml_arg(2), aml_arg(3), params)) + ); + } + return method; +} + static void build_append_pcihp_notify_entry(Aml *method, int slot) { Aml *if_ctx; @@ -408,13 +427,41 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, int func = PCI_FUNC(devfn); /* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */ int adr = slot << 16 | func; - bool hotplug_enabled_dev; - bool bridge_in_acpi; - bool cold_plugged_bridge; + bool hotpluggbale_slot = false; + bool bridge_in_acpi = false; + bool cold_plugged_bridge = false; + bool is_vga = false; - if (!pdev) { + if (pdev) { + pc = PCI_DEVICE_GET_CLASS(pdev); + dc = DEVICE_GET_CLASS(pdev); + + if (pc->class_id == PCI_CLASS_BRIDGE_ISA) { + continue; + } + + is_vga = pc->class_id == PCI_CLASS_DISPLAY_VGA; + + /* + * Cold plugged bridges aren't themselves hot-pluggable. + * Hotplugged bridges *are* hot-pluggable. + */ + cold_plugged_bridge = pc->is_bridge && !DEVICE(pdev)->hotplugged; + bridge_in_acpi = cold_plugged_bridge && pcihp_bridge_en; + + hotpluggbale_slot = bsel && dc->hotpluggable && + !cold_plugged_bridge; + + /* + * allow describing coldplugged bridges in ACPI even if they are not + * on function 0, as they are not unpluggable, for all other devices + * generate description only for function 0 per slot + */ + if (func && !bridge_in_acpi) { + continue; + } + } else { /* - * add hotplug slots for non present devices. * hotplug is supported only for non-multifunction device * so generate device description only for function 0 */ @@ -422,51 +469,11 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, if (pci_bus_is_express(bus) && slot > 0) { break; } - dev = aml_device("S%.02X", devfn); - aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); - aml_append(dev, aml_name_decl("_ADR", aml_int(adr))); - method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); - aml_append(method, - aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) - ); - aml_append(dev, method); - method = aml_method("_DSM", 4, AML_SERIALIZED); - aml_append(method, - aml_return(aml_call6("PDSM", aml_arg(0), aml_arg(1), - aml_arg(2), aml_arg(3), - aml_name("BSEL"), aml_name("_SUN"))) - ); - aml_append(dev, method); - aml_append(parent_scope, dev); - - build_append_pcihp_notify_entry(notify_method, slot); + /* mark it as empty hotpluggable slot */ + hotpluggbale_slot = true; + } else { + continue; } - continue; - } - - pc = PCI_DEVICE_GET_CLASS(pdev); - dc = DEVICE_GET_CLASS(pdev); - - /* - * Cold plugged bridges aren't themselves hot-pluggable. - * Hotplugged bridges *are* hot-pluggable. - */ - cold_plugged_bridge = pc->is_bridge && !DEVICE(pdev)->hotplugged; - bridge_in_acpi = cold_plugged_bridge && pcihp_bridge_en; - - hotplug_enabled_dev = bsel && dc->hotpluggable && !cold_plugged_bridge; - - if (pc->class_id == PCI_CLASS_BRIDGE_ISA) { - continue; - } - - /* - * allow describing coldplugged bridges in ACPI even if they are not - * on function 0, as they are not unpluggable, for all other devices - * generate description only for function 0 per slot - */ - if (func && !bridge_in_acpi) { - continue; } /* start to compose PCI device descriptor */ @@ -479,15 +486,10 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, * enumeration order in linux kernel, so use another variable for it */ aml_append(dev, aml_name_decl("ASUN", aml_int(slot))); - method = aml_method("_DSM", 4, AML_SERIALIZED); - aml_append(method, aml_return( - aml_call6("PDSM", aml_arg(0), aml_arg(1), aml_arg(2), - aml_arg(3), aml_name("BSEL"), aml_name("ASUN")) - )); - aml_append(dev, method); + aml_append(dev, aml_pci_device_dsm()); } - if (pc->class_id == PCI_CLASS_DISPLAY_VGA) { + if (is_vga) { /* add VGA specific AML methods */ int s3d; @@ -508,19 +510,10 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, method = aml_method("_S3D", 0, AML_NOTSERIALIZED); aml_append(method, aml_return(aml_int(s3d))); aml_append(dev, method); - } else if (hotplug_enabled_dev) { - aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); - /* add _EJ0 to make slot hotpluggable */ - method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); - aml_append(method, - aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) - ); - aml_append(dev, method); + } - if (bsel) { - build_append_pcihp_notify_entry(notify_method, slot); - } - } else if (bridge_in_acpi) { + bridge_in_acpi = cold_plugged_bridge && pcihp_bridge_en; + if (bridge_in_acpi) { /* * device is coldplugged bridge, * add child device descriptions into its scope @@ -529,6 +522,19 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, build_append_pci_bus_devices(dev, sec_bus, pcihp_bridge_en); } + + if (hotpluggbale_slot) { + aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); + /* add _EJ0 to make slot hotpluggable */ + method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); + aml_append(method, + aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) + ); + aml_append(dev, method); + + build_append_pcihp_notify_entry(notify_method, slot); + } + /* device descriptor has been composed, add it into parent context */ aml_append(parent_scope, dev); } @@ -572,84 +578,100 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, qobject_unref(bsel); } -Aml *aml_pci_device_dsm(void) +static Aml *aml_pci_pdsm(void) { - Aml *method, *UUID, *ifctx, *ifctx1, *ifctx2, *ifctx3, *elsectx; - Aml *acpi_index = aml_local(0); + Aml *method, *UUID, *ifctx, *ifctx1; + Aml *ret = aml_local(0); + Aml *caps = aml_local(1); + Aml *acpi_index = aml_local(2); Aml *zero = aml_int(0); - Aml *bnum = aml_arg(4); + Aml *one = aml_int(1); Aml *func = aml_arg(2); Aml *rev = aml_arg(1); - Aml *sunum = aml_arg(5); + Aml *params = aml_arg(4); + Aml *bnum = aml_derefof(aml_index(params, aml_int(0))); + Aml *sunum = aml_derefof(aml_index(params, aml_int(1))); + + method = aml_method("PDSM", 5, AML_SERIALIZED); - method = aml_method("PDSM", 6, AML_SERIALIZED); + /* get supported functions */ + ifctx = aml_if(aml_equal(func, zero)); + { + uint8_t byte_list[1] = { 0 }; /* nothing supported yet */ + aml_append(ifctx, aml_store(aml_buffer(1, byte_list), ret)); + aml_append(ifctx, aml_store(zero, caps)); + + /* + * PCI Firmware Specification 3.1 + * 4.6. _DSM Definitions for PCI + */ + UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); + ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID))); + { + /* call is for unsupported UUID, bail out */ + aml_append(ifctx1, aml_return(ret)); + } + aml_append(ifctx, ifctx1); + ifctx1 = aml_if(aml_lless(rev, aml_int(2))); + { + /* call is for unsupported REV, bail out */ + aml_append(ifctx1, aml_return(ret)); + } + aml_append(ifctx, ifctx1); + + aml_append(ifctx, + aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); + /* + * advertise function 7 if device has acpi-index + * acpi_index values: + * 0: not present (default value) + * FFFFFFFF: not supported (old QEMU without PIDX reg) + * other: device's acpi-index + */ + ifctx1 = aml_if(aml_lnot( + aml_or(aml_equal(acpi_index, zero), + aml_equal(acpi_index, aml_int(0xFFFFFFFF)), NULL) + )); + { + /* have supported functions */ + aml_append(ifctx1, aml_or(caps, one, caps)); + /* support for function 7 */ + aml_append(ifctx1, + aml_or(caps, aml_shiftleft(one, aml_int(7)), caps)); + } + aml_append(ifctx, ifctx1); + + aml_append(ifctx, aml_store(caps, aml_index(ret, zero))); + aml_append(ifctx, aml_return(ret)); + } + aml_append(method, ifctx); + + /* handle specific functions requests */ /* * PCI Firmware Specification 3.1 - * 4.6. _DSM Definitions for PCI + * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under + * Operating Systems */ - UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); - ifctx = aml_if(aml_equal(aml_arg(0), UUID)); + ifctx = aml_if(aml_equal(func, aml_int(7))); { - aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); - ifctx1 = aml_if(aml_equal(func, zero)); - { - uint8_t byte_list[1]; + Aml *pkg = aml_package(2); - ifctx2 = aml_if(aml_equal(rev, aml_int(2))); - { - /* - * advertise function 7 if device has acpi-index - * acpi_index values: - * 0: not present (default value) - * FFFFFFFF: not supported (old QEMU without PIDX reg) - * other: device's acpi-index - */ - ifctx3 = aml_if(aml_lnot( - aml_or(aml_equal(acpi_index, zero), - aml_equal(acpi_index, aml_int(0xFFFFFFFF)), NULL) - )); - { - byte_list[0] = - 1 /* have supported functions */ | - 1 << 7 /* support for function 7 */ - ; - aml_append(ifctx3, aml_return(aml_buffer(1, byte_list))); - } - aml_append(ifctx2, ifctx3); - } - aml_append(ifctx1, ifctx2); - - byte_list[0] = 0; /* nothing supported */ - aml_append(ifctx1, aml_return(aml_buffer(1, byte_list))); - } - aml_append(ifctx, ifctx1); - elsectx = aml_else(); - /* - * PCI Firmware Specification 3.1 - * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under - * Operating Systems - */ - ifctx1 = aml_if(aml_equal(func, aml_int(7))); - { - Aml *pkg = aml_package(2); - Aml *ret = aml_local(1); - - aml_append(pkg, zero); - /* - * optional, if not impl. should return null string - */ - aml_append(pkg, aml_string("%s", "")); - aml_append(ifctx1, aml_store(pkg, ret)); - /* - * update acpi-index to actual value - */ - aml_append(ifctx1, aml_store(acpi_index, aml_index(ret, zero))); - aml_append(ifctx1, aml_return(ret)); - } - aml_append(elsectx, ifctx1); - aml_append(ifctx, elsectx); + aml_append(pkg, zero); + /* + * optional, if not impl. should return null string + */ + aml_append(pkg, aml_string("%s", "")); + aml_append(ifctx, aml_store(pkg, ret)); + + aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); + /* + * update acpi-index to actual value + */ + aml_append(ifctx, aml_store(acpi_index, aml_index(ret, zero))); + aml_append(ifctx, aml_return(ret)); } + aml_append(method, ifctx); return method; } @@ -1339,7 +1361,7 @@ static void build_x86_acpi_pci_hotplug(Aml *table, uint64_t pcihp_addr) aml_append(method, aml_return(aml_local(0))); aml_append(scope, method); - aml_append(scope, aml_pci_device_dsm()); + aml_append(scope, aml_pci_pdsm()); aml_append(table, scope); } @@ -1467,9 +1489,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); - if (misc->has_hpet) { - build_hpet_aml(dsdt); - } build_piix4_isa_bridge(dsdt); if (pm->pcihp_bridge_en || pm->pcihp_root_en) { build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base); @@ -1515,9 +1534,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dsdt, sb_scope); - if (misc->has_hpet) { - build_hpet_aml(dsdt); - } build_q35_isa_bridge(dsdt); if (pm->pcihp_bridge_en) { build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base); @@ -1528,6 +1544,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, } } + if (misc->has_hpet) { + build_hpet_aml(dsdt); + } + if (vmbus_bridge) { sb_scope = aml_scope("_SB"); aml_append(sb_scope, build_vmbus_device_aml(vmbus_bridge)); diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 05d53a1aa9..6524c2ee32 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -3818,6 +3818,11 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split"); return false; } + if (!kvm_enable_x2apic()) { + error_setg(errp, "eim=on requires support on the KVM side" + "(X2APIC_API, first shipped in v4.7)"); + return false; + } } /* Currently only address widths supported are 39 and 48 bits */ diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index ada2108fac..a71bf1afeb 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -14,6 +14,12 @@ #include "sysemu/hostmem.h" #include "hw/cxl/cxl.h" +/* + * Null value of all Fs suggested by IEEE RA guidelines for use of + * EU, OUI and CID + */ +#define UI64_NULL ~(0ULL) + static void build_dvsecs(CXLType3Dev *ct3d) { CXLComponentState *cxl_cstate = &ct3d->cxl_cstate; @@ -149,7 +155,12 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) pci_config_set_class(pci_conf, PCI_CLASS_MEMORY_CXL); pcie_endpoint_cap_init(pci_dev, 0x80); - cxl_cstate->dvsec_offset = 0x100; + if (ct3d->sn != UI64_NULL) { + pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn); + cxl_cstate->dvsec_offset = 0x100 + 0x0c; + } else { + cxl_cstate->dvsec_offset = 0x100; + } ct3d->cxl_cstate.pdev = pci_dev; build_dvsecs(ct3d); @@ -275,6 +286,7 @@ static Property ct3_props[] = { HostMemoryBackend *), DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND, HostMemoryBackend *), + DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 63a8332cd0..e9f696b4cf 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -106,6 +106,12 @@ static const VirtIOFeature feature_sizes[] = { {} }; +static const VirtIOConfigSizeParams cfg_size_params = { + .min_size = endof(struct virtio_net_config, mac), + .max_size = sizeof(struct virtio_net_config), + .feature_sizes = feature_sizes +}; + static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) { VirtIONet *n = qemu_get_nic_opaque(nc); @@ -3241,8 +3247,7 @@ static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) { virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); - n->config_size = virtio_feature_get_config_size(feature_sizes, - host_features); + n->config_size = virtio_get_config_size(&cfg_size_params, host_features); } void virtio_net_set_netclient_name(VirtIONet *n, const char *name, diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c index 3059068175..bdf337a7a2 100644 --- a/hw/scsi/vhost-scsi.c +++ b/hw/scsi/vhost-scsi.c @@ -120,7 +120,7 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val) start = false; } - if (vsc->dev.started == start) { + if (vhost_dev_is_started(&vsc->dev) == start) { return; } @@ -147,7 +147,7 @@ static int vhost_scsi_pre_save(void *opaque) /* At this point, backend must be stopped, otherwise * it might keep writing to memory. */ - assert(!vsc->dev.started); + assert(!vhost_dev_is_started(&vsc->dev)); return 0; } diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c index 1b2f7eed98..bc37317d55 100644 --- a/hw/scsi/vhost-user-scsi.c +++ b/hw/scsi/vhost-user-scsi.c @@ -49,7 +49,7 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); bool start = (status & VIRTIO_CONFIG_S_DRIVER_OK) && vdev->vm_running; - if (vsc->dev.started == start) { + if (vhost_dev_is_started(&vsc->dev) == start) { return; } diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 4c9f664830..51437ca09f 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -111,6 +111,13 @@ static struct { .processor_id = 0, }; +struct type8_instance { + const char *internal_reference, *external_reference; + uint8_t connector_type, port_type; + QTAILQ_ENTRY(type8_instance) next; +}; +static QTAILQ_HEAD(, type8_instance) type8 = QTAILQ_HEAD_INITIALIZER(type8); + static struct { size_t nvalues; char **values; @@ -337,6 +344,29 @@ static const QemuOptDesc qemu_smbios_type4_opts[] = { { /* end of list */ } }; +static const QemuOptDesc qemu_smbios_type8_opts[] = { + { + .name = "internal_reference", + .type = QEMU_OPT_STRING, + .help = "internal reference designator", + }, + { + .name = "external_reference", + .type = QEMU_OPT_STRING, + .help = "external reference designator", + }, + { + .name = "connector_type", + .type = QEMU_OPT_NUMBER, + .help = "connector type", + }, + { + .name = "port_type", + .type = QEMU_OPT_NUMBER, + .help = "port type", + }, +}; + static const QemuOptDesc qemu_smbios_type11_opts[] = { { .name = "value", @@ -718,6 +748,26 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) smbios_type4_count++; } +static void smbios_build_type_8_table(void) +{ + unsigned instance = 0; + struct type8_instance *t8; + + QTAILQ_FOREACH(t8, &type8, next) { + SMBIOS_BUILD_TABLE_PRE(8, T0_BASE + instance, true); + + SMBIOS_TABLE_SET_STR(8, internal_reference_str, t8->internal_reference); + SMBIOS_TABLE_SET_STR(8, external_reference_str, t8->external_reference); + /* most vendors seem to set this to None */ + t->internal_connector_type = 0x0; + t->external_connector_type = t8->connector_type; + t->port_type = t8->port_type; + + SMBIOS_BUILD_TABLE_POST; + instance++; + } +} + static void smbios_build_type_11_table(void) { char count_str[128]; @@ -1030,6 +1080,7 @@ void smbios_get_tables(MachineState *ms, smbios_build_type_4_table(ms, i); } + smbios_build_type_8_table(); smbios_build_type_11_table(); #define MAX_DIMM_SZ (16 * GiB) @@ -1348,6 +1399,18 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) UINT16_MAX); } return; + case 8: + if (!qemu_opts_validate(opts, qemu_smbios_type8_opts, errp)) { + return; + } + struct type8_instance *t; + t = g_new0(struct type8_instance, 1); + save_opt(&t->internal_reference, opts, "internal_reference"); + save_opt(&t->external_reference, opts, "external_reference"); + t->connector_type = qemu_opt_get_number(opts, "connector_type", 0); + t->port_type = qemu_opt_get_number(opts, "port_type", 0); + QTAILQ_INSERT_TAIL(&type8, t, next); + return; case 11: if (!qemu_opts_validate(opts, qemu_smbios_type11_opts, errp)) { return; diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig index e9ecae1f50..cbfd8c7173 100644 --- a/hw/virtio/Kconfig +++ b/hw/virtio/Kconfig @@ -80,3 +80,8 @@ config VHOST_USER_FS bool default y depends on VIRTIO && VHOST_USER + +config VHOST_USER_GPIO + bool + default y + depends on VIRTIO && VHOST_USER diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index 7e8877fd64..dfed1e7af5 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -29,6 +29,8 @@ virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c')) virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c')) +virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c')) virtio_pci_ss = ss.source_set() virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c')) @@ -60,4 +62,6 @@ virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: virtio_ss) softmmu_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss) softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) +softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c')) softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('virtio-stub.c')) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 20af2e7ebd..820dadc26c 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -8,6 +8,10 @@ vhost_region_add_section_aligned(const char *name, uint64_t gpa, uint64_t size, vhost_section(const char *name) "%s" vhost_reject_section(const char *name, int d) "%s:%d" vhost_iotlb_miss(void *dev, int step) "%p step %d" +vhost_dev_cleanup(void *dev) "%p" +vhost_dev_start(void *dev, const char *name) "%p:%s" +vhost_dev_stop(void *dev, const char *name) "%p:%s" + # vhost-user.c vhost_user_postcopy_end_entry(void) "" @@ -140,3 +144,8 @@ virtio_mem_state_response(uint16_t state) "state=%" PRIu16 virtio_pmem_flush_request(void) "flush request" virtio_pmem_response(void) "flush response" virtio_pmem_flush_done(int type) "fsync return=%d" + +# virtio-gpio.c +virtio_gpio_start(void) "start" +virtio_gpio_stop(void) "stop" +virtio_gpio_set_status(uint8_t status) "0x%x" diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c index e513e4fdda..ad0f91c607 100644 --- a/hw/virtio/vhost-user-fs.c +++ b/hw/virtio/vhost-user-fs.c @@ -20,6 +20,7 @@ #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" #include "qemu/error-report.h" +#include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user-fs.h" #include "monitor/monitor.h" #include "sysemu/sysemu.h" @@ -122,13 +123,9 @@ static void vuf_stop(VirtIODevice *vdev) static void vuf_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserFS *fs = VHOST_USER_FS(vdev); - bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + bool should_start = virtio_device_started(vdev, status); - if (!vdev->vm_running) { - should_start = false; - } - - if (fs->vhost_dev.started == should_start) { + if (vhost_dev_is_started(&fs->vhost_dev) == should_start) { return; } diff --git a/hw/virtio/vhost-user-gpio-pci.c b/hw/virtio/vhost-user-gpio-pci.c new file mode 100644 index 0000000000..b3028a24a1 --- /dev/null +++ b/hw/virtio/vhost-user-gpio-pci.c @@ -0,0 +1,69 @@ +/* + * Vhost-user gpio virtio device PCI glue + * + * Copyright (c) 2022 Viresh Kumar <viresh.kumar@linaro.org> + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-gpio.h" +#include "hw/virtio/virtio-pci.h" + +struct VHostUserGPIOPCI { + VirtIOPCIProxy parent_obj; + VHostUserGPIO vdev; +}; + +typedef struct VHostUserGPIOPCI VHostUserGPIOPCI; + +#define TYPE_VHOST_USER_GPIO_PCI "vhost-user-gpio-pci-base" + +DECLARE_INSTANCE_CHECKER(VHostUserGPIOPCI, VHOST_USER_GPIO_PCI, + TYPE_VHOST_USER_GPIO_PCI) + +static void vhost_user_gpio_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserGPIOPCI *dev = VHOST_USER_GPIO_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + vpci_dev->nvectors = 1; + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_gpio_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_gpio_pci_realize; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; +} + +static void vhost_user_gpio_pci_instance_init(Object *obj) +{ + VHostUserGPIOPCI *dev = VHOST_USER_GPIO_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_GPIO); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_gpio_pci_info = { + .base_name = TYPE_VHOST_USER_GPIO_PCI, + .non_transitional_name = "vhost-user-gpio-pci", + .instance_size = sizeof(VHostUserGPIOPCI), + .instance_init = vhost_user_gpio_pci_instance_init, + .class_init = vhost_user_gpio_pci_class_init, +}; + +static void vhost_user_gpio_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_gpio_pci_info); +} + +type_init(vhost_user_gpio_pci_register); diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c new file mode 100644 index 0000000000..8b40fe450c --- /dev/null +++ b/hw/virtio/vhost-user-gpio.c @@ -0,0 +1,411 @@ +/* + * Vhost-user GPIO virtio device + * + * Copyright (c) 2022 Viresh Kumar <viresh.kumar@linaro.org> + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/vhost-user-gpio.h" +#include "qemu/error-report.h" +#include "standard-headers/linux/virtio_ids.h" +#include "trace.h" + +#define REALIZE_CONNECTION_RETRIES 3 + +/* Features required from VirtIO */ +static const int feature_bits[] = { + VIRTIO_F_VERSION_1, + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + VIRTIO_GPIO_F_IRQ, + VHOST_INVALID_FEATURE_BIT +}; + +static void vu_gpio_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + + memcpy(config, &gpio->config, sizeof(gpio->config)); +} + +static int vu_gpio_config_notifier(struct vhost_dev *dev) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(dev->vdev); + + memcpy(dev->vdev->config, &gpio->config, sizeof(gpio->config)); + virtio_notify_config(dev->vdev); + + return 0; +} + +const VhostDevConfigOps gpio_ops = { + .vhost_dev_config_notifier = vu_gpio_config_notifier, +}; + +static int vu_gpio_start(VirtIODevice *vdev) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + struct vhost_dev *vhost_dev = &gpio->vhost_dev; + int ret, i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return -ENOSYS; + } + + ret = vhost_dev_enable_notifiers(vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", ret); + return ret; + } + + ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", ret); + goto err_host_notifiers; + } + + /* + * Before we start up we need to ensure we have the final feature + * set needed for the vhost configuration. The backend may also + * apply backend_features when the feature set is sent. + */ + vhost_ack_features(&gpio->vhost_dev, feature_bits, vdev->guest_features); + + ret = vhost_dev_start(&gpio->vhost_dev, vdev); + if (ret < 0) { + error_report("Error starting vhost-user-gpio: %d", ret); + goto err_guest_notifiers; + } + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < gpio->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&gpio->vhost_dev, vdev, i, false); + } + + /* + * As we must have VHOST_USER_F_PROTOCOL_FEATURES (because + * VHOST_USER_GET_CONFIG requires it) we need to explicitly enable + * the vrings. + */ + g_assert(vhost_dev->vhost_ops && + vhost_dev->vhost_ops->vhost_set_vring_enable); + ret = vhost_dev->vhost_ops->vhost_set_vring_enable(vhost_dev, true); + if (ret == 0) { + return 0; + } + + error_report("Failed to start vrings for vhost-user-gpio: %d", ret); + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, gpio->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&gpio->vhost_dev, vdev); + + return ret; +} + +static void vu_gpio_stop(VirtIODevice *vdev) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + struct vhost_dev *vhost_dev = &gpio->vhost_dev; + int ret; + + if (!k->set_guest_notifiers) { + return; + } + + /* + * We can call vu_gpio_stop multiple times, for example from + * vm_state_notify and the final object finalisation. Check we + * aren't already stopped before doing so. + */ + if (!vhost_dev_is_started(vhost_dev)) { + return; + } + + vhost_dev_stop(vhost_dev, vdev); + + ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(vhost_dev, vdev); +} + +static void vu_gpio_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + bool should_start = virtio_device_started(vdev, status); + + trace_virtio_gpio_set_status(status); + + if (!gpio->connected) { + return; + } + + if (vhost_dev_is_started(&gpio->vhost_dev) == should_start) { + return; + } + + if (should_start) { + if (vu_gpio_start(vdev)) { + qemu_chr_fe_disconnect(&gpio->chardev); + } + } else { + vu_gpio_stop(vdev); + } +} + +static uint64_t vu_gpio_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + + return vhost_get_features(&gpio->vhost_dev, feature_bits, features); +} + +static void vu_gpio_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Not normally called; it's the daemon that handles the queue; + * however virtio's cleanup path can call this. + */ +} + +static void vu_gpio_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) +{ + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + + vhost_virtqueue_mask(&gpio->vhost_dev, vdev, idx, mask); +} + +static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio) +{ + virtio_delete_queue(gpio->command_vq); + virtio_delete_queue(gpio->interrupt_vq); + g_free(gpio->vhost_dev.vqs); + gpio->vhost_dev.vqs = NULL; + virtio_cleanup(vdev); + vhost_user_cleanup(&gpio->vhost_user); +} + +static int vu_gpio_connect(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + struct vhost_dev *vhost_dev = &gpio->vhost_dev; + int ret; + + if (gpio->connected) { + return 0; + } + gpio->connected = true; + + vhost_dev_set_config_notifier(vhost_dev, &gpio_ops); + gpio->vhost_user.supports_config = true; + + ret = vhost_dev_init(vhost_dev, &gpio->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + return ret; + } + + /* restore vhost state */ + if (virtio_device_started(vdev, vdev->status)) { + vu_gpio_start(vdev); + } + + return 0; +} + +static void vu_gpio_disconnect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + + if (!gpio->connected) { + return; + } + gpio->connected = false; + + vu_gpio_stop(vdev); + vhost_dev_cleanup(&gpio->vhost_dev); +} + +static void vu_gpio_event(void *opaque, QEMUChrEvent event) +{ + DeviceState *dev = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev); + Error *local_err = NULL; + + switch (event) { + case CHR_EVENT_OPENED: + if (vu_gpio_connect(dev, &local_err) < 0) { + qemu_chr_fe_disconnect(&gpio->chardev); + return; + } + break; + case CHR_EVENT_CLOSED: + vu_gpio_disconnect(dev); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static int vu_gpio_realize_connect(VHostUserGPIO *gpio, Error **errp) +{ + VirtIODevice *vdev = &gpio->parent_obj; + DeviceState *dev = &vdev->parent_obj; + struct vhost_dev *vhost_dev = &gpio->vhost_dev; + int ret; + + ret = qemu_chr_fe_wait_connected(&gpio->chardev, errp); + if (ret < 0) { + return ret; + } + + /* + * vu_gpio_connect() may have already connected (via the event + * callback) in which case it will just report success. + */ + ret = vu_gpio_connect(dev, errp); + if (ret < 0) { + qemu_chr_fe_disconnect(&gpio->chardev); + return ret; + } + g_assert(gpio->connected); + + ret = vhost_dev_get_config(vhost_dev, (uint8_t *)&gpio->config, + sizeof(gpio->config), errp); + + if (ret < 0) { + error_report("vhost-user-gpio: get config failed"); + + qemu_chr_fe_disconnect(&gpio->chardev); + vhost_dev_cleanup(vhost_dev); + return ret; + } + + return 0; +} + +static void vu_gpio_device_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(dev); + int retries, ret; + + if (!gpio->chardev.chr) { + error_setg(errp, "vhost-user-gpio: chardev is mandatory"); + return; + } + + if (!vhost_user_init(&gpio->vhost_user, &gpio->chardev, errp)) { + return; + } + + virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config)); + + gpio->vhost_dev.nvqs = 2; + gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); + gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); + gpio->vhost_dev.vqs = g_new0(struct vhost_virtqueue, gpio->vhost_dev.nvqs); + + gpio->connected = false; + + qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, vu_gpio_event, NULL, + dev, NULL, true); + + retries = REALIZE_CONNECTION_RETRIES; + g_assert(!*errp); + do { + if (*errp) { + error_prepend(errp, "Reconnecting after error: "); + error_report_err(*errp); + *errp = NULL; + } + ret = vu_gpio_realize_connect(gpio, errp); + } while (ret < 0 && retries--); + + if (ret < 0) { + do_vhost_user_cleanup(vdev, gpio); + } + + return; +} + +static void vu_gpio_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserGPIO *gpio = VHOST_USER_GPIO(dev); + + vu_gpio_set_status(vdev, 0); + qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, NULL, NULL, NULL, NULL, + false); + vhost_dev_cleanup(&gpio->vhost_dev); + do_vhost_user_cleanup(vdev, gpio); +} + +static const VMStateDescription vu_gpio_vmstate = { + .name = "vhost-user-gpio", + .unmigratable = 1, +}; + +static Property vu_gpio_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserGPIO, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vu_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vu_gpio_properties); + dc->vmsd = &vu_gpio_vmstate; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + vdc->realize = vu_gpio_device_realize; + vdc->unrealize = vu_gpio_device_unrealize; + vdc->get_features = vu_gpio_get_features; + vdc->get_config = vu_gpio_get_config; + vdc->set_status = vu_gpio_set_status; + vdc->guest_notifier_mask = vu_gpio_guest_notifier_mask; +} + +static const TypeInfo vu_gpio_info = { + .name = TYPE_VHOST_USER_GPIO, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserGPIO), + .class_init = vu_gpio_class_init, +}; + +static void vu_gpio_register_types(void) +{ + type_register_static(&vu_gpio_info); +} + +type_init(vu_gpio_register_types) diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c index 6020eee093..bc58b6c0d1 100644 --- a/hw/virtio/vhost-user-i2c.c +++ b/hw/virtio/vhost-user-i2c.c @@ -93,13 +93,9 @@ static void vu_i2c_stop(VirtIODevice *vdev) static void vu_i2c_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserI2C *i2c = VHOST_USER_I2C(vdev); - bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + bool should_start = virtio_device_started(vdev, status); - if (!vdev->vm_running) { - should_start = false; - } - - if (i2c->vhost_dev.started == should_start) { + if (vhost_dev_is_started(&i2c->vhost_dev) == should_start) { return; } @@ -178,7 +174,7 @@ static void vu_i2c_disconnect(DeviceState *dev) } i2c->connected = false; - if (i2c->vhost_dev.started) { + if (vhost_dev_is_started(&i2c->vhost_dev)) { vu_i2c_stop(vdev); } } diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c index 3a7bf8e32d..bc1f36c5ac 100644 --- a/hw/virtio/vhost-user-rng.c +++ b/hw/virtio/vhost-user-rng.c @@ -90,13 +90,9 @@ static void vu_rng_stop(VirtIODevice *vdev) static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserRNG *rng = VHOST_USER_RNG(vdev); - bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + bool should_start = virtio_device_started(vdev, status); - if (!vdev->vm_running) { - should_start = false; - } - - if (rng->vhost_dev.started == should_start) { + if (vhost_dev_is_started(&rng->vhost_dev) == should_start) { return; } @@ -164,7 +160,7 @@ static void vu_rng_disconnect(DeviceState *dev) rng->connected = false; - if (rng->vhost_dev.started) { + if (vhost_dev_is_started(&rng->vhost_dev)) { vu_rng_stop(vdev); } } diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c index 0f8ff99f85..7b67e29d83 100644 --- a/hw/virtio/vhost-user-vsock.c +++ b/hw/virtio/vhost-user-vsock.c @@ -55,13 +55,9 @@ const VhostDevConfigOps vsock_ops = { static void vuv_set_status(VirtIODevice *vdev, uint8_t status) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); - bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + bool should_start = virtio_device_started(vdev, status); - if (!vdev->vm_running) { - should_start = false; - } - - if (vvc->vhost_dev.started == should_start) { + if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) { return; } diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 70748e61e0..03415b6c95 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -200,7 +200,7 @@ typedef struct { VhostUserRequest request; #define VHOST_USER_VERSION_MASK (0x3) -#define VHOST_USER_REPLY_MASK (0x1<<2) +#define VHOST_USER_REPLY_MASK (0x1 << 2) #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3) uint32_t flags; uint32_t size; /* the following payload size */ @@ -208,7 +208,7 @@ typedef struct { typedef union { #define VHOST_USER_VRING_IDX_MASK (0xff) -#define VHOST_USER_VRING_NOFD_MASK (0x1<<8) +#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) uint64_t u64; struct vhost_vring_state state; struct vhost_vring_addr addr; @@ -248,7 +248,8 @@ struct vhost_user { size_t region_rb_len; /* RAMBlock associated with a given region */ RAMBlock **region_rb; - /* The offset from the start of the RAMBlock to the start of the + /* + * The offset from the start of the RAMBlock to the start of the * vhost region. */ ram_addr_t *region_rb_offset; @@ -1460,7 +1461,14 @@ static int vhost_user_set_features(struct vhost_dev *dev, */ bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL); - return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features, + /* + * We need to include any extra backend only feature bits that + * might be needed by our device. Currently this includes the + * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol + * features. + */ + return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, + features | dev->backend_features, log_enabled); } diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c index 7394818e00..29b9ab4f72 100644 --- a/hw/virtio/vhost-vsock-common.c +++ b/hw/virtio/vhost-vsock-common.c @@ -14,6 +14,7 @@ #include "hw/virtio/virtio-access.h" #include "qemu/error-report.h" #include "hw/qdev-properties.h" +#include "hw/virtio/vhost.h" #include "hw/virtio/vhost-vsock.h" #include "qemu/iov.h" #include "monitor/monitor.h" @@ -199,7 +200,7 @@ int vhost_vsock_common_pre_save(void *opaque) * At this point, backend must be stopped, otherwise * it might keep writing to memory. */ - assert(!vvc->vhost_dev.started); + assert(!vhost_dev_is_started(&vvc->vhost_dev)); return 0; } diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c index 0338de892f..7dc3c73931 100644 --- a/hw/virtio/vhost-vsock.c +++ b/hw/virtio/vhost-vsock.c @@ -70,14 +70,10 @@ static int vhost_vsock_set_running(VirtIODevice *vdev, int start) static void vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); - bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + bool should_start = virtio_device_started(vdev, status); int ret; - if (!vdev->vm_running) { - should_start = false; - } - - if (vvc->vhost_dev.started == should_start) { + if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) { return; } diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index f758f177bb..5185c15295 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1477,6 +1477,8 @@ void vhost_dev_cleanup(struct vhost_dev *hdev) { int i; + trace_vhost_dev_cleanup(hdev); + for (i = 0; i < hdev->nvqs; ++i) { vhost_virtqueue_cleanup(hdev->vqs + i); } @@ -1783,6 +1785,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) /* should only be called after backend is connected */ assert(hdev->vhost_ops); + trace_vhost_dev_start(hdev, vdev->name); + vdev->vhost_started = true; hdev->started = true; hdev->vdev = vdev; @@ -1869,6 +1873,8 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) /* should only be called after backend is connected */ assert(hdev->vhost_ops); + trace_vhost_dev_stop(hdev, vdev->name); + if (hdev->vhost_ops->vhost_dev_start) { hdev->vhost_ops->vhost_dev_start(hdev, false); } diff --git a/hw/virtio/virtio-stub.c b/hw/virtio/virtio-stub.c new file mode 100644 index 0000000000..7ddb22cc5e --- /dev/null +++ b/hw/virtio/virtio-stub.c @@ -0,0 +1,42 @@ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-virtio.h" + +static void *qmp_virtio_unsupported(Error **errp) +{ + error_setg(errp, "Virtio is disabled"); + return NULL; +} + +VirtioInfoList *qmp_x_query_virtio(Error **errp) +{ + return qmp_virtio_unsupported(errp); +} + +VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp) +{ + return qmp_virtio_unsupported(errp); +} + +VirtVhostQueueStatus *qmp_x_query_virtio_vhost_queue_status(const char *path, + uint16_t queue, + Error **errp) +{ + return qmp_virtio_unsupported(errp); +} + +VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path, + uint16_t queue, + Error **errp) +{ + return qmp_virtio_unsupported(errp); +} + +VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path, + uint16_t queue, + bool has_index, + uint16_t index, + Error **errp) +{ + return qmp_virtio_unsupported(errp); +} diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 5d607aeaa0..808446b4c9 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -13,12 +13,18 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qapi-commands-virtio.h" +#include "qapi/qapi-commands-qom.h" +#include "qapi/qapi-visit-virtio.h" +#include "qapi/qmp/qjson.h" #include "cpu.h" #include "trace.h" #include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" +#include "qom/object_interfaces.h" #include "hw/virtio/virtio.h" #include "migration/qemu-file-types.h" #include "qemu/atomic.h" @@ -28,6 +34,432 @@ #include "sysemu/dma.h" #include "sysemu/runstate.h" #include "standard-headers/linux/virtio_ids.h" +#include "standard-headers/linux/vhost_types.h" +#include "standard-headers/linux/virtio_blk.h" +#include "standard-headers/linux/virtio_console.h" +#include "standard-headers/linux/virtio_gpu.h" +#include "standard-headers/linux/virtio_net.h" +#include "standard-headers/linux/virtio_scsi.h" +#include "standard-headers/linux/virtio_i2c.h" +#include "standard-headers/linux/virtio_balloon.h" +#include "standard-headers/linux/virtio_iommu.h" +#include "standard-headers/linux/virtio_mem.h" +#include "standard-headers/linux/virtio_vsock.h" +#include CONFIG_DEVICES + +/* QAPI list of realized VirtIODevices */ +static QTAILQ_HEAD(, VirtIODevice) virtio_list; + +/* + * Maximum size of virtio device config space + */ +#define VHOST_USER_MAX_CONFIG_SIZE 256 + +#define FEATURE_ENTRY(name, desc) (qmp_virtio_feature_map_t) \ + { .virtio_bit = name, .feature_desc = desc } + +enum VhostUserProtocolFeature { + VHOST_USER_PROTOCOL_F_MQ = 0, + VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1, + VHOST_USER_PROTOCOL_F_RARP = 2, + VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, + VHOST_USER_PROTOCOL_F_NET_MTU = 4, + VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, + VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, + VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, + VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, + VHOST_USER_PROTOCOL_F_CONFIG = 9, + VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, + VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, + VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, + VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14, + VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15, + VHOST_USER_PROTOCOL_F_MAX +}; + +/* Virtio transport features mapping */ +static qmp_virtio_feature_map_t virtio_transport_map[] = { + /* Virtio device transport features */ +#ifndef VIRTIO_CONFIG_NO_LEGACY + FEATURE_ENTRY(VIRTIO_F_NOTIFY_ON_EMPTY, \ + "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. " + "descs. on VQ"), + FEATURE_ENTRY(VIRTIO_F_ANY_LAYOUT, \ + "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts"), +#endif /* !VIRTIO_CONFIG_NO_LEGACY */ + FEATURE_ENTRY(VIRTIO_F_VERSION_1, \ + "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)"), + FEATURE_ENTRY(VIRTIO_F_IOMMU_PLATFORM, \ + "VIRTIO_F_IOMMU_PLATFORM: Device can be used on IOMMU platform"), + FEATURE_ENTRY(VIRTIO_F_RING_PACKED, \ + "VIRTIO_F_RING_PACKED: Device supports packed VQ layout"), + FEATURE_ENTRY(VIRTIO_F_IN_ORDER, \ + "VIRTIO_F_IN_ORDER: Device uses buffers in same order as made " + "available by driver"), + FEATURE_ENTRY(VIRTIO_F_ORDER_PLATFORM, \ + "VIRTIO_F_ORDER_PLATFORM: Memory accesses ordered by platform"), + FEATURE_ENTRY(VIRTIO_F_SR_IOV, \ + "VIRTIO_F_SR_IOV: Device supports single root I/O virtualization"), + /* Virtio ring transport features */ + FEATURE_ENTRY(VIRTIO_RING_F_INDIRECT_DESC, \ + "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported"), + FEATURE_ENTRY(VIRTIO_RING_F_EVENT_IDX, \ + "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled"), + { -1, "" } +}; + +/* Vhost-user protocol features mapping */ +static qmp_virtio_feature_map_t vhost_user_protocol_map[] = { + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_MQ, \ + "VHOST_USER_PROTOCOL_F_MQ: Multiqueue protocol supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_LOG_SHMFD, \ + "VHOST_USER_PROTOCOL_F_LOG_SHMFD: Shared log memory fd supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RARP, \ + "VHOST_USER_PROTOCOL_F_RARP: Vhost-user back-end RARP broadcasting " + "supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_REPLY_ACK, \ + "VHOST_USER_PROTOCOL_F_REPLY_ACK: Requested operation status ack. " + "supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_NET_MTU, \ + "VHOST_USER_PROTOCOL_F_NET_MTU: Expose host MTU to guest supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_REQ, \ + "VHOST_USER_PROTOCOL_F_SLAVE_REQ: Socket fd for back-end initiated " + "requests supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, \ + "VHOST_USER_PROTOCOL_F_CROSS_ENDIAN: Endianness of VQs for legacy " + "devices supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CRYPTO_SESSION, \ + "VHOST_USER_PROTOCOL_F_CRYPTO_SESSION: Session creation for crypto " + "operations supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_PAGEFAULT, \ + "VHOST_USER_PROTOCOL_F_PAGEFAULT: Request servicing on userfaultfd " + "for accessed pages supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIG, \ + "VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio " + "device configuration space supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD, \ + "VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD: Slave fd communication " + "channel supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \ + "VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified " + "VQs supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD, \ + "VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD: Shared inflight I/O buffers " + "supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RESET_DEVICE, \ + "VHOST_USER_PROTOCOL_F_RESET_DEVICE: Disabling all rings and " + "resetting internal device state supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS, \ + "VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS: In-band messaging " + "supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS, \ + "VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS: Configuration for " + "memory slots supported"), + { -1, "" } +}; + +/* virtio device configuration statuses */ +static qmp_virtio_feature_map_t virtio_config_status_map[] = { + FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER_OK, \ + "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_FEATURES_OK, \ + "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER, \ + "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_NEEDS_RESET, \ + "VIRTIO_CONFIG_S_NEEDS_RESET: Irrecoverable error, device needs " + "reset"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_FAILED, \ + "VIRTIO_CONFIG_S_FAILED: Error in guest, device failed"), + FEATURE_ENTRY(VIRTIO_CONFIG_S_ACKNOWLEDGE, \ + "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found"), + { -1, "" } +}; + +/* virtio-blk features mapping */ +qmp_virtio_feature_map_t virtio_blk_feature_map[] = { + FEATURE_ENTRY(VIRTIO_BLK_F_SIZE_MAX, \ + "VIRTIO_BLK_F_SIZE_MAX: Max segment size is size_max"), + FEATURE_ENTRY(VIRTIO_BLK_F_SEG_MAX, \ + "VIRTIO_BLK_F_SEG_MAX: Max segments in a request is seg_max"), + FEATURE_ENTRY(VIRTIO_BLK_F_GEOMETRY, \ + "VIRTIO_BLK_F_GEOMETRY: Legacy geometry available"), + FEATURE_ENTRY(VIRTIO_BLK_F_RO, \ + "VIRTIO_BLK_F_RO: Device is read-only"), + FEATURE_ENTRY(VIRTIO_BLK_F_BLK_SIZE, \ + "VIRTIO_BLK_F_BLK_SIZE: Block size of disk available"), + FEATURE_ENTRY(VIRTIO_BLK_F_TOPOLOGY, \ + "VIRTIO_BLK_F_TOPOLOGY: Topology information available"), + FEATURE_ENTRY(VIRTIO_BLK_F_MQ, \ + "VIRTIO_BLK_F_MQ: Multiqueue supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_DISCARD, \ + "VIRTIO_BLK_F_DISCARD: Discard command supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \ + "VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"), +#ifndef VIRTIO_BLK_NO_LEGACY + FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \ + "VIRTIO_BLK_F_BARRIER: Request barriers supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_SCSI, \ + "VIRTIO_BLK_F_SCSI: SCSI packet commands supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_FLUSH, \ + "VIRTIO_BLK_F_FLUSH: Flush command supported"), + FEATURE_ENTRY(VIRTIO_BLK_F_CONFIG_WCE, \ + "VIRTIO_BLK_F_CONFIG_WCE: Cache writeback and writethrough modes " + "supported"), +#endif /* !VIRTIO_BLK_NO_LEGACY */ + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-serial features mapping */ +qmp_virtio_feature_map_t virtio_serial_feature_map[] = { + FEATURE_ENTRY(VIRTIO_CONSOLE_F_SIZE, \ + "VIRTIO_CONSOLE_F_SIZE: Host providing console size"), + FEATURE_ENTRY(VIRTIO_CONSOLE_F_MULTIPORT, \ + "VIRTIO_CONSOLE_F_MULTIPORT: Multiple ports for device supported"), + FEATURE_ENTRY(VIRTIO_CONSOLE_F_EMERG_WRITE, \ + "VIRTIO_CONSOLE_F_EMERG_WRITE: Emergency write supported"), + { -1, "" } +}; + +/* virtio-gpu features mapping */ +qmp_virtio_feature_map_t virtio_gpu_feature_map[] = { + FEATURE_ENTRY(VIRTIO_GPU_F_VIRGL, \ + "VIRTIO_GPU_F_VIRGL: Virgl 3D mode supported"), + FEATURE_ENTRY(VIRTIO_GPU_F_EDID, \ + "VIRTIO_GPU_F_EDID: EDID metadata supported"), + FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_UUID, \ + "VIRTIO_GPU_F_RESOURCE_UUID: Resource UUID assigning supported"), + FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_BLOB, \ + "VIRTIO_GPU_F_RESOURCE_BLOB: Size-based blob resources supported"), + FEATURE_ENTRY(VIRTIO_GPU_F_CONTEXT_INIT, \ + "VIRTIO_GPU_F_CONTEXT_INIT: Context types and synchronization " + "timelines supported"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-input features mapping */ +qmp_virtio_feature_map_t virtio_input_feature_map[] = { + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-net features mapping */ +qmp_virtio_feature_map_t virtio_net_feature_map[] = { + FEATURE_ENTRY(VIRTIO_NET_F_CSUM, \ + "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum " + "supported"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_CSUM, \ + "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial " + "checksum supported"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ + "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading " + "reconfig. supported"), + FEATURE_ENTRY(VIRTIO_NET_F_MTU, \ + "VIRTIO_NET_F_MTU: Device max MTU reporting supported"), + FEATURE_ENTRY(VIRTIO_NET_F_MAC, \ + "VIRTIO_NET_F_MAC: Device has given MAC address"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO4, \ + "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO6, \ + "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ECN, \ + "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_UFO, \ + "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO4, \ + "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO6, \ + "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_ECN, \ + "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_UFO, \ + "VIRTIO_NET_F_HOST_UFO: Device can receive UFO"), + FEATURE_ENTRY(VIRTIO_NET_F_MRG_RXBUF, \ + "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers"), + FEATURE_ENTRY(VIRTIO_NET_F_STATUS, \ + "VIRTIO_NET_F_STATUS: Configuration status field available"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VQ, \ + "VIRTIO_NET_F_CTRL_VQ: Control channel available"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX, \ + "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VLAN, \ + "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX_EXTRA, \ + "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ANNOUNCE, \ + "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets " + "supported"), + FEATURE_ENTRY(VIRTIO_NET_F_MQ, \ + "VIRTIO_NET_F_MQ: Multiqueue with automatic receive steering " + "supported"), + FEATURE_ENTRY(VIRTIO_NET_F_CTRL_MAC_ADDR, \ + "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control " + "channel"), + FEATURE_ENTRY(VIRTIO_NET_F_HASH_REPORT, \ + "VIRTIO_NET_F_HASH_REPORT: Hash reporting supported"), + FEATURE_ENTRY(VIRTIO_NET_F_RSS, \ + "VIRTIO_NET_F_RSS: RSS RX steering supported"), + FEATURE_ENTRY(VIRTIO_NET_F_RSC_EXT, \ + "VIRTIO_NET_F_RSC_EXT: Extended coalescing info supported"), + FEATURE_ENTRY(VIRTIO_NET_F_STANDBY, \ + "VIRTIO_NET_F_STANDBY: Device acting as standby for primary " + "device with same MAC addr. supported"), + FEATURE_ENTRY(VIRTIO_NET_F_SPEED_DUPLEX, \ + "VIRTIO_NET_F_SPEED_DUPLEX: Device set linkspeed and duplex"), +#ifndef VIRTIO_NET_NO_LEGACY + FEATURE_ENTRY(VIRTIO_NET_F_GSO, \ + "VIRTIO_NET_F_GSO: Handling GSO-type packets supported"), +#endif /* !VIRTIO_NET_NO_LEGACY */ + FEATURE_ENTRY(VHOST_NET_F_VIRTIO_NET_HDR, \ + "VHOST_NET_F_VIRTIO_NET_HDR: Virtio-net headers for RX and TX " + "packets supported"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-scsi features mapping */ +qmp_virtio_feature_map_t virtio_scsi_feature_map[] = { + FEATURE_ENTRY(VIRTIO_SCSI_F_INOUT, \ + "VIRTIO_SCSI_F_INOUT: Requests including read and writable data " + "buffers suppoted"), + FEATURE_ENTRY(VIRTIO_SCSI_F_HOTPLUG, \ + "VIRTIO_SCSI_F_HOTPLUG: Reporting and handling hot-plug events " + "supported"), + FEATURE_ENTRY(VIRTIO_SCSI_F_CHANGE, \ + "VIRTIO_SCSI_F_CHANGE: Reporting and handling LUN changes " + "supported"), + FEATURE_ENTRY(VIRTIO_SCSI_F_T10_PI, \ + "VIRTIO_SCSI_F_T10_PI: T10 info included in request header"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio/vhost-user-fs features mapping */ +qmp_virtio_feature_map_t virtio_fs_feature_map[] = { + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio/vhost-user-i2c features mapping */ +qmp_virtio_feature_map_t virtio_i2c_feature_map[] = { + FEATURE_ENTRY(VIRTIO_I2C_F_ZERO_LENGTH_REQUEST, \ + "VIRTIO_I2C_F_ZERO_LEGNTH_REQUEST: Zero length requests supported"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio/vhost-vsock features mapping */ +qmp_virtio_feature_map_t virtio_vsock_feature_map[] = { + FEATURE_ENTRY(VIRTIO_VSOCK_F_SEQPACKET, \ + "VIRTIO_VSOCK_F_SEQPACKET: SOCK_SEQPACKET supported"), + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; + +/* virtio-balloon features mapping */ +qmp_virtio_feature_map_t virtio_balloon_feature_map[] = { + FEATURE_ENTRY(VIRTIO_BALLOON_F_MUST_TELL_HOST, \ + "VIRTIO_BALLOON_F_MUST_TELL_HOST: Tell host before reclaiming " + "pages"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_STATS_VQ, \ + "VIRTIO_BALLOON_F_STATS_VQ: Guest memory stats VQ available"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_DEFLATE_ON_OOM, \ + "VIRTIO_BALLOON_F_DEFLATE_ON_OOM: Deflate balloon when guest OOM"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_FREE_PAGE_HINT, \ + "VIRTIO_BALLOON_F_FREE_PAGE_HINT: VQ reporting free pages enabled"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_PAGE_POISON, \ + "VIRTIO_BALLOON_F_PAGE_POISON: Guest page poisoning enabled"), + FEATURE_ENTRY(VIRTIO_BALLOON_F_REPORTING, \ + "VIRTIO_BALLOON_F_REPORTING: Page reporting VQ enabled"), + { -1, "" } +}; + +/* virtio-crypto features mapping */ +qmp_virtio_feature_map_t virtio_crypto_feature_map[] = { + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + { -1, "" } +}; + +/* virtio-iommu features mapping */ +qmp_virtio_feature_map_t virtio_iommu_feature_map[] = { + FEATURE_ENTRY(VIRTIO_IOMMU_F_INPUT_RANGE, \ + "VIRTIO_IOMMU_F_INPUT_RANGE: Range of available virtual addrs. " + "available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_DOMAIN_RANGE, \ + "VIRTIO_IOMMU_F_DOMAIN_RANGE: Number of supported domains " + "available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_MAP_UNMAP, \ + "VIRTIO_IOMMU_F_MAP_UNMAP: Map and unmap requests available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS, \ + "VIRTIO_IOMMU_F_BYPASS: Endpoints not attached to domains are in " + "bypass mode"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_PROBE, \ + "VIRTIO_IOMMU_F_PROBE: Probe requests available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_MMIO, \ + "VIRTIO_IOMMU_F_MMIO: VIRTIO_IOMMU_MAP_F_MMIO flag available"), + FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS_CONFIG, \ + "VIRTIO_IOMMU_F_BYPASS_CONFIG: Bypass field of IOMMU config " + "available"), + { -1, "" } +}; + +/* virtio-mem features mapping */ +qmp_virtio_feature_map_t virtio_mem_feature_map[] = { +#ifndef CONFIG_ACPI + FEATURE_ENTRY(VIRTIO_MEM_F_ACPI_PXM, \ + "VIRTIO_MEM_F_ACPI_PXM: node_id is an ACPI PXM and is valid"), +#endif /* !CONFIG_ACPI */ + FEATURE_ENTRY(VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, \ + "VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE: Unplugged memory cannot be " + "accessed"), + { -1, "" } +}; + +/* virtio-rng features mapping */ +qmp_virtio_feature_map_t virtio_rng_feature_map[] = { + FEATURE_ENTRY(VHOST_F_LOG_ALL, \ + "VHOST_F_LOG_ALL: Logging write descriptors supported"), + FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ + "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " + "negotiation supported"), + { -1, "" } +}; /* * The alignment to use between consumer and producer parts of vring. @@ -391,6 +823,19 @@ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem)); } +/* Called within rcu_read_lock(). */ +static inline uint16_t vring_used_flags(VirtQueue *vq) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + hwaddr pa = offsetof(VRingUsed, flags); + + if (!caches) { + return 0; + } + + return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); +} + /* Called within rcu_read_lock(). */ static uint16_t vring_used_idx(VirtQueue *vq) { @@ -2980,6 +3425,13 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val) if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { return -EINVAL; } + + if (val & (1ull << VIRTIO_F_BAD_FEATURE)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n", + __func__, vdev->name); + } + ret = virtio_set_features_nocheck(vdev, val); if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */ @@ -2999,11 +3451,12 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val) return ret; } -size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes, - uint64_t host_features) +size_t virtio_get_config_size(const VirtIOConfigSizeParams *params, + uint64_t host_features) { - size_t config_size = 0; - int i; + size_t config_size = params->min_size; + const VirtIOFeature *feature_sizes = params->feature_sizes; + size_t i; for (i = 0; feature_sizes[i].flags != 0; i++) { if (host_features & feature_sizes[i].flags) { @@ -3011,6 +3464,7 @@ size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes, } } + assert(config_size <= params->max_size); return config_size; } @@ -3698,6 +4152,7 @@ static void virtio_device_realize(DeviceState *dev, Error **errp) vdev->listener.commit = virtio_memory_listener_commit; vdev->listener.name = "virtio"; memory_listener_register(&vdev->listener, vdev->dma_as); + QTAILQ_INSERT_TAIL(&virtio_list, vdev, next); } static void virtio_device_unrealize(DeviceState *dev) @@ -3712,6 +4167,7 @@ static void virtio_device_unrealize(DeviceState *dev) vdc->unrealize(dev); } + QTAILQ_REMOVE(&virtio_list, vdev, next); g_free(vdev->bus_name); vdev->bus_name = NULL; } @@ -3885,6 +4341,8 @@ static void virtio_device_class_init(ObjectClass *klass, void *data) vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl; vdc->legacy_features |= VIRTIO_LEGACY_FEATURES; + + QTAILQ_INIT(&virtio_list); } bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev) @@ -3895,6 +4353,589 @@ bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev) return virtio_bus_ioeventfd_enabled(vbus); } +VirtioInfoList *qmp_x_query_virtio(Error **errp) +{ + VirtioInfoList *list = NULL; + VirtioInfoList *node; + VirtIODevice *vdev; + + QTAILQ_FOREACH(vdev, &virtio_list, next) { + DeviceState *dev = DEVICE(vdev); + Error *err = NULL; + QObject *obj = qmp_qom_get(dev->canonical_path, "realized", &err); + + if (err == NULL) { + GString *is_realized = qobject_to_json_pretty(obj, true); + /* virtio device is NOT realized, remove it from list */ + if (!strncmp(is_realized->str, "false", 4)) { + QTAILQ_REMOVE(&virtio_list, vdev, next); + } else { + node = g_new0(VirtioInfoList, 1); + node->value = g_new(VirtioInfo, 1); + node->value->path = g_strdup(dev->canonical_path); + node->value->name = g_strdup(vdev->name); + QAPI_LIST_PREPEND(list, node->value); + } + g_string_free(is_realized, true); + } + qobject_unref(obj); + } + + return list; +} + +static VirtIODevice *virtio_device_find(const char *path) +{ + VirtIODevice *vdev; + + QTAILQ_FOREACH(vdev, &virtio_list, next) { + DeviceState *dev = DEVICE(vdev); + + if (strcmp(dev->canonical_path, path) != 0) { + continue; + } + + Error *err = NULL; + QObject *obj = qmp_qom_get(dev->canonical_path, "realized", &err); + if (err == NULL) { + GString *is_realized = qobject_to_json_pretty(obj, true); + /* virtio device is NOT realized, remove it from list */ + if (!strncmp(is_realized->str, "false", 4)) { + g_string_free(is_realized, true); + qobject_unref(obj); + QTAILQ_REMOVE(&virtio_list, vdev, next); + return NULL; + } + g_string_free(is_realized, true); + } else { + /* virtio device doesn't exist in QOM tree */ + QTAILQ_REMOVE(&virtio_list, vdev, next); + qobject_unref(obj); + return NULL; + } + /* device exists in QOM tree & is realized */ + qobject_unref(obj); + return vdev; + } + return NULL; +} + +#define CONVERT_FEATURES(type, map, is_status, bitmap) \ + ({ \ + type *list = NULL; \ + type *node; \ + for (i = 0; map[i].virtio_bit != -1; i++) { \ + if (is_status) { \ + bit = map[i].virtio_bit; \ + } \ + else { \ + bit = 1ULL << map[i].virtio_bit; \ + } \ + if ((bitmap & bit) == 0) { \ + continue; \ + } \ + node = g_new0(type, 1); \ + node->value = g_strdup(map[i].feature_desc); \ + node->next = list; \ + list = node; \ + bitmap ^= bit; \ + } \ + list; \ + }) + +static VirtioDeviceStatus *qmp_decode_status(uint8_t bitmap) +{ + VirtioDeviceStatus *status; + uint8_t bit; + int i; + + status = g_new0(VirtioDeviceStatus, 1); + status->statuses = CONVERT_FEATURES(strList, virtio_config_status_map, + 1, bitmap); + status->has_unknown_statuses = bitmap != 0; + if (status->has_unknown_statuses) { + status->unknown_statuses = bitmap; + } + + return status; +} + +static VhostDeviceProtocols *qmp_decode_protocols(uint64_t bitmap) +{ + VhostDeviceProtocols *vhu_protocols; + uint64_t bit; + int i; + + vhu_protocols = g_new0(VhostDeviceProtocols, 1); + vhu_protocols->protocols = + CONVERT_FEATURES(strList, + vhost_user_protocol_map, 0, bitmap); + vhu_protocols->has_unknown_protocols = bitmap != 0; + if (vhu_protocols->has_unknown_protocols) { + vhu_protocols->unknown_protocols = bitmap; + } + + return vhu_protocols; +} + +static VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, + uint64_t bitmap) +{ + VirtioDeviceFeatures *features; + uint64_t bit; + int i; + + features = g_new0(VirtioDeviceFeatures, 1); + features->has_dev_features = true; + + /* transport features */ + features->transports = CONVERT_FEATURES(strList, virtio_transport_map, 0, + bitmap); + + /* device features */ + switch (device_id) { +#ifdef CONFIG_VIRTIO_SERIAL + case VIRTIO_ID_CONSOLE: + features->dev_features = + CONVERT_FEATURES(strList, virtio_serial_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_BLK + case VIRTIO_ID_BLOCK: + features->dev_features = + CONVERT_FEATURES(strList, virtio_blk_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_GPU + case VIRTIO_ID_GPU: + features->dev_features = + CONVERT_FEATURES(strList, virtio_gpu_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_NET + case VIRTIO_ID_NET: + features->dev_features = + CONVERT_FEATURES(strList, virtio_net_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_SCSI + case VIRTIO_ID_SCSI: + features->dev_features = + CONVERT_FEATURES(strList, virtio_scsi_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_BALLOON + case VIRTIO_ID_BALLOON: + features->dev_features = + CONVERT_FEATURES(strList, virtio_balloon_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_IOMMU + case VIRTIO_ID_IOMMU: + features->dev_features = + CONVERT_FEATURES(strList, virtio_iommu_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_INPUT + case VIRTIO_ID_INPUT: + features->dev_features = + CONVERT_FEATURES(strList, virtio_input_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VHOST_USER_FS + case VIRTIO_ID_FS: + features->dev_features = + CONVERT_FEATURES(strList, virtio_fs_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VHOST_VSOCK + case VIRTIO_ID_VSOCK: + features->dev_features = + CONVERT_FEATURES(strList, virtio_vsock_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_CRYPTO + case VIRTIO_ID_CRYPTO: + features->dev_features = + CONVERT_FEATURES(strList, virtio_crypto_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_MEM + case VIRTIO_ID_MEM: + features->dev_features = + CONVERT_FEATURES(strList, virtio_mem_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_I2C_ADAPTER + case VIRTIO_ID_I2C_ADAPTER: + features->dev_features = + CONVERT_FEATURES(strList, virtio_i2c_feature_map, 0, bitmap); + break; +#endif +#ifdef CONFIG_VIRTIO_RNG + case VIRTIO_ID_RNG: + features->dev_features = + CONVERT_FEATURES(strList, virtio_rng_feature_map, 0, bitmap); + break; +#endif + /* No features */ + case VIRTIO_ID_9P: + case VIRTIO_ID_PMEM: + case VIRTIO_ID_IOMEM: + case VIRTIO_ID_RPMSG: + case VIRTIO_ID_CLOCK: + case VIRTIO_ID_MAC80211_WLAN: + case VIRTIO_ID_MAC80211_HWSIM: + case VIRTIO_ID_RPROC_SERIAL: + case VIRTIO_ID_MEMORY_BALLOON: + case VIRTIO_ID_CAIF: + case VIRTIO_ID_SIGNAL_DIST: + case VIRTIO_ID_PSTORE: + case VIRTIO_ID_SOUND: + case VIRTIO_ID_BT: + case VIRTIO_ID_RPMB: + case VIRTIO_ID_VIDEO_ENCODER: + case VIRTIO_ID_VIDEO_DECODER: + case VIRTIO_ID_SCMI: + case VIRTIO_ID_NITRO_SEC_MOD: + case VIRTIO_ID_WATCHDOG: + case VIRTIO_ID_CAN: + case VIRTIO_ID_DMABUF: + case VIRTIO_ID_PARAM_SERV: + case VIRTIO_ID_AUDIO_POLICY: + case VIRTIO_ID_GPIO: + break; + default: + g_assert_not_reached(); + } + + features->has_unknown_dev_features = bitmap != 0; + if (features->has_unknown_dev_features) { + features->unknown_dev_features = bitmap; + } + + return features; +} + +VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp) +{ + VirtIODevice *vdev; + VirtioStatus *status; + + vdev = virtio_device_find(path); + if (vdev == NULL) { + error_setg(errp, "Path %s is not a VirtIODevice", path); + return NULL; + } + + status = g_new0(VirtioStatus, 1); + status->name = g_strdup(vdev->name); + status->device_id = vdev->device_id; + status->vhost_started = vdev->vhost_started; + status->guest_features = qmp_decode_features(vdev->device_id, + vdev->guest_features); + status->host_features = qmp_decode_features(vdev->device_id, + vdev->host_features); + status->backend_features = qmp_decode_features(vdev->device_id, + vdev->backend_features); + + switch (vdev->device_endian) { + case VIRTIO_DEVICE_ENDIAN_LITTLE: + status->device_endian = g_strdup("little"); + break; + case VIRTIO_DEVICE_ENDIAN_BIG: + status->device_endian = g_strdup("big"); + break; + default: + status->device_endian = g_strdup("unknown"); + break; + } + + status->num_vqs = virtio_get_num_queues(vdev); + status->status = qmp_decode_status(vdev->status); + status->isr = vdev->isr; + status->queue_sel = vdev->queue_sel; + status->vm_running = vdev->vm_running; + status->broken = vdev->broken; + status->disabled = vdev->disabled; + status->use_started = vdev->use_started; + status->started = vdev->started; + status->start_on_kick = vdev->start_on_kick; + status->disable_legacy_check = vdev->disable_legacy_check; + status->bus_name = g_strdup(vdev->bus_name); + status->use_guest_notifier_mask = vdev->use_guest_notifier_mask; + status->has_vhost_dev = vdev->vhost_started; + + if (vdev->vhost_started) { + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + struct vhost_dev *hdev = vdc->get_vhost(vdev); + + status->vhost_dev = g_new0(VhostStatus, 1); + status->vhost_dev->n_mem_sections = hdev->n_mem_sections; + status->vhost_dev->n_tmp_sections = hdev->n_tmp_sections; + status->vhost_dev->nvqs = hdev->nvqs; + status->vhost_dev->vq_index = hdev->vq_index; + status->vhost_dev->features = + qmp_decode_features(vdev->device_id, hdev->features); + status->vhost_dev->acked_features = + qmp_decode_features(vdev->device_id, hdev->acked_features); + status->vhost_dev->backend_features = + qmp_decode_features(vdev->device_id, hdev->backend_features); + status->vhost_dev->protocol_features = + qmp_decode_protocols(hdev->protocol_features); + status->vhost_dev->max_queues = hdev->max_queues; + status->vhost_dev->backend_cap = hdev->backend_cap; + status->vhost_dev->log_enabled = hdev->log_enabled; + status->vhost_dev->log_size = hdev->log_size; + } + + return status; +} + +VirtVhostQueueStatus *qmp_x_query_virtio_vhost_queue_status(const char *path, + uint16_t queue, + Error **errp) +{ + VirtIODevice *vdev; + VirtVhostQueueStatus *status; + + vdev = virtio_device_find(path); + if (vdev == NULL) { + error_setg(errp, "Path %s is not a VirtIODevice", path); + return NULL; + } + + if (!vdev->vhost_started) { + error_setg(errp, "Error: vhost device has not started yet"); + return NULL; + } + + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + struct vhost_dev *hdev = vdc->get_vhost(vdev); + + if (queue < hdev->vq_index || queue >= hdev->vq_index + hdev->nvqs) { + error_setg(errp, "Invalid vhost virtqueue number %d", queue); + return NULL; + } + + status = g_new0(VirtVhostQueueStatus, 1); + status->name = g_strdup(vdev->name); + status->kick = hdev->vqs[queue].kick; + status->call = hdev->vqs[queue].call; + status->desc = (uintptr_t)hdev->vqs[queue].desc; + status->avail = (uintptr_t)hdev->vqs[queue].avail; + status->used = (uintptr_t)hdev->vqs[queue].used; + status->num = hdev->vqs[queue].num; + status->desc_phys = hdev->vqs[queue].desc_phys; + status->desc_size = hdev->vqs[queue].desc_size; + status->avail_phys = hdev->vqs[queue].avail_phys; + status->avail_size = hdev->vqs[queue].avail_size; + status->used_phys = hdev->vqs[queue].used_phys; + status->used_size = hdev->vqs[queue].used_size; + + return status; +} + +VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path, + uint16_t queue, + Error **errp) +{ + VirtIODevice *vdev; + VirtQueueStatus *status; + + vdev = virtio_device_find(path); + if (vdev == NULL) { + error_setg(errp, "Path %s is not a VirtIODevice", path); + return NULL; + } + + if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) { + error_setg(errp, "Invalid virtqueue number %d", queue); + return NULL; + } + + status = g_new0(VirtQueueStatus, 1); + status->name = g_strdup(vdev->name); + status->queue_index = vdev->vq[queue].queue_index; + status->inuse = vdev->vq[queue].inuse; + status->vring_num = vdev->vq[queue].vring.num; + status->vring_num_default = vdev->vq[queue].vring.num_default; + status->vring_align = vdev->vq[queue].vring.align; + status->vring_desc = vdev->vq[queue].vring.desc; + status->vring_avail = vdev->vq[queue].vring.avail; + status->vring_used = vdev->vq[queue].vring.used; + status->used_idx = vdev->vq[queue].used_idx; + status->signalled_used = vdev->vq[queue].signalled_used; + status->signalled_used_valid = vdev->vq[queue].signalled_used_valid; + + if (vdev->vhost_started) { + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + struct vhost_dev *hdev = vdc->get_vhost(vdev); + + /* check if vq index exists for vhost as well */ + if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) { + status->has_last_avail_idx = true; + + int vhost_vq_index = + hdev->vhost_ops->vhost_get_vq_index(hdev, queue); + struct vhost_vring_state state = { + .index = vhost_vq_index, + }; + + status->last_avail_idx = + hdev->vhost_ops->vhost_get_vring_base(hdev, &state); + } + } else { + status->has_shadow_avail_idx = true; + status->has_last_avail_idx = true; + status->last_avail_idx = vdev->vq[queue].last_avail_idx; + status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx; + } + + return status; +} + +static strList *qmp_decode_vring_desc_flags(uint16_t flags) +{ + strList *list = NULL; + strList *node; + int i; + + struct { + uint16_t flag; + const char *value; + } map[] = { + { VRING_DESC_F_NEXT, "next" }, + { VRING_DESC_F_WRITE, "write" }, + { VRING_DESC_F_INDIRECT, "indirect" }, + { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" }, + { 1 << VRING_PACKED_DESC_F_USED, "used" }, + { 0, "" } + }; + + for (i = 0; map[i].flag; i++) { + if ((map[i].flag & flags) == 0) { + continue; + } + node = g_malloc0(sizeof(strList)); + node->value = g_strdup(map[i].value); + node->next = list; + list = node; + } + + return list; +} + +VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path, + uint16_t queue, + bool has_index, + uint16_t index, + Error **errp) +{ + VirtIODevice *vdev; + VirtQueue *vq; + VirtioQueueElement *element = NULL; + + vdev = virtio_device_find(path); + if (vdev == NULL) { + error_setg(errp, "Path %s is not a VirtIO device", path); + return NULL; + } + + if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) { + error_setg(errp, "Invalid virtqueue number %d", queue); + return NULL; + } + vq = &vdev->vq[queue]; + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + error_setg(errp, "Packed ring not supported"); + return NULL; + } else { + unsigned int head, i, max; + VRingMemoryRegionCaches *caches; + MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; + MemoryRegionCache *desc_cache; + VRingDesc desc; + VirtioRingDescList *list = NULL; + VirtioRingDescList *node; + int rc; int ndescs; + + RCU_READ_LOCK_GUARD(); + + max = vq->vring.num; + + if (!has_index) { + head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num); + } else { + head = vring_avail_ring(vq, index % vq->vring.num); + } + i = head; + + caches = vring_get_region_caches(vq); + if (!caches) { + error_setg(errp, "Region caches not initialized"); + return NULL; + } + if (caches->desc.len < max * sizeof(VRingDesc)) { + error_setg(errp, "Cannot map descriptor ring"); + return NULL; + } + + desc_cache = &caches->desc; + vring_split_desc_read(vdev, &desc, desc_cache, i); + if (desc.flags & VRING_DESC_F_INDIRECT) { + int64_t len; + len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, + desc.addr, desc.len, false); + desc_cache = &indirect_desc_cache; + if (len < desc.len) { + error_setg(errp, "Cannot map indirect buffer"); + goto done; + } + + max = desc.len / sizeof(VRingDesc); + i = 0; + vring_split_desc_read(vdev, &desc, desc_cache, i); + } + + element = g_new0(VirtioQueueElement, 1); + element->avail = g_new0(VirtioRingAvail, 1); + element->used = g_new0(VirtioRingUsed, 1); + element->name = g_strdup(vdev->name); + element->index = head; + element->avail->flags = vring_avail_flags(vq); + element->avail->idx = vring_avail_idx(vq); + element->avail->ring = head; + element->used->flags = vring_used_flags(vq); + element->used->idx = vring_used_idx(vq); + ndescs = 0; + + do { + /* A buggy driver may produce an infinite loop */ + if (ndescs >= max) { + break; + } + node = g_new0(VirtioRingDescList, 1); + node->value = g_new0(VirtioRingDesc, 1); + node->value->addr = desc.addr; + node->value->len = desc.len; + node->value->flags = qmp_decode_vring_desc_flags(desc.flags); + node->next = list; + list = node; + + ndescs++; + rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, + max, &i); + } while (rc == VIRTQUEUE_READ_DESC_MORE); + element->descs = list; +done: + address_space_cache_destroy(&indirect_desc_cache); + } + + return element; +} + static const TypeInfo virtio_device_info = { .name = TYPE_VIRTIO_DEVICE, .parent = TYPE_DEVICE, |