summaryrefslogtreecommitdiffstats
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/block/vhost-user-blk.c4
-rw-r--r--hw/char/virtio-serial-bus.c7
-rw-r--r--hw/dma/i82374.c9
-rw-r--r--hw/i386/kvm/clock.c14
-rw-r--r--hw/s390x/ipl.c112
-rw-r--r--hw/s390x/s390-virtio-ccw.c5
-rw-r--r--hw/scsi/scsi-disk.c39
-rw-r--r--hw/scsi/scsi-generic.c48
-rw-r--r--hw/vfio/ccw.c2
-rw-r--r--hw/vfio/common.c11
-rw-r--r--hw/vfio/trace-events1
-rw-r--r--hw/virtio/vhost-user.c22
-rw-r--r--hw/virtio/vhost.c16
13 files changed, 220 insertions, 70 deletions
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index f840f07dfe..262baca432 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -259,6 +259,8 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
s->dev.vq_index = 0;
s->dev.backend_features = 0;
+ vhost_dev_set_config_notifier(&s->dev, &blk_ops);
+
ret = vhost_dev_init(&s->dev, &s->chardev, VHOST_BACKEND_TYPE_USER, 0);
if (ret < 0) {
error_setg(errp, "vhost-user-blk: vhost initialization failed: %s",
@@ -277,8 +279,6 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
s->blkcfg.num_queues = s->num_queues;
}
- vhost_dev_set_config_notifier(&s->dev, &blk_ops);
-
return;
vhost_err:
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index 9470bd7be7..d2dd8ab502 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -580,13 +580,16 @@ static void set_config(VirtIODevice *vdev, const uint8_t *config_data)
VirtIOSerial *vser = VIRTIO_SERIAL(vdev);
struct virtio_console_config *config =
(struct virtio_console_config *)config_data;
- uint8_t emerg_wr_lo = le32_to_cpu(config->emerg_wr);
VirtIOSerialPort *port = find_first_connected_console(vser);
VirtIOSerialPortClass *vsc;
+ uint8_t emerg_wr_lo;
- if (!config->emerg_wr) {
+ if (!virtio_has_feature(vser->host_features,
+ VIRTIO_CONSOLE_F_EMERG_WRITE) || !config->emerg_wr) {
return;
}
+
+ emerg_wr_lo = le32_to_cpu(config->emerg_wr);
/* Make sure we don't misdetect an emergency write when the guest
* does a short config write after an emergency write. */
config->emerg_wr = 0;
diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c
index 83c87d92e0..892f655a7e 100644
--- a/hw/dma/i82374.c
+++ b/hw/dma/i82374.c
@@ -23,6 +23,7 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/isa/isa.h"
#include "hw/dma/i8257.h"
@@ -118,13 +119,19 @@ static const MemoryRegionPortio i82374_portio_list[] = {
static void i82374_realize(DeviceState *dev, Error **errp)
{
I82374State *s = I82374(dev);
+ ISABus *isa_bus = isa_bus_from_device(ISA_DEVICE(dev));
+
+ if (isa_get_dma(isa_bus, 0)) {
+ error_setg(errp, "DMA already initialized on ISA bus");
+ return;
+ }
+ i8257_dma_init(isa_bus, true);
portio_list_init(&s->port_list, OBJECT(s), i82374_portio_list, s,
"i82374");
portio_list_add(&s->port_list, isa_address_space_io(&s->parent_obj),
s->iobase);
- i8257_dma_init(isa_bus_from_device(ISA_DEVICE(dev)), true);
memset(s->commands, 0, sizeof(s->commands));
}
diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c
index 1707434db3..7dac319403 100644
--- a/hw/i386/kvm/clock.c
+++ b/hw/i386/kvm/clock.c
@@ -242,6 +242,19 @@ static const VMStateDescription kvmclock_reliable_get_clock = {
};
/*
+ * When migrating, assume the source has an unreliable
+ * KVM_GET_CLOCK unless told otherwise.
+ */
+static int kvmclock_pre_load(void *opaque)
+{
+ KVMClockState *s = opaque;
+
+ s->clock_is_reliable = false;
+
+ return 0;
+}
+
+/*
* When migrating, read the clock just before migration,
* so that the guest clock counts during the events
* between:
@@ -268,6 +281,7 @@ static const VMStateDescription kvmclock_vmsd = {
.name = "kvmclock",
.version_id = 1,
.minimum_version_id = 1,
+ .pre_load = kvmclock_pre_load,
.pre_save = kvmclock_pre_save,
.fields = (VMStateField[]) {
VMSTATE_UINT64(clock, KVMClockState),
diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c
index fdeaec3a58..fb554ab156 100644
--- a/hw/s390x/ipl.c
+++ b/hw/s390x/ipl.c
@@ -279,44 +279,52 @@ static void s390_ipl_set_boot_menu(S390IPLState *ipl)
*timeout = cpu_to_be32(splash_time);
}
+static CcwDevice *s390_get_ccw_device(DeviceState *dev_st)
+{
+ CcwDevice *ccw_dev = NULL;
+
+ if (dev_st) {
+ VirtioCcwDevice *virtio_ccw_dev = (VirtioCcwDevice *)
+ object_dynamic_cast(OBJECT(qdev_get_parent_bus(dev_st)->parent),
+ TYPE_VIRTIO_CCW_DEVICE);
+ if (virtio_ccw_dev) {
+ ccw_dev = CCW_DEVICE(virtio_ccw_dev);
+ } else {
+ SCSIDevice *sd = (SCSIDevice *)
+ object_dynamic_cast(OBJECT(dev_st),
+ TYPE_SCSI_DEVICE);
+ if (sd) {
+ SCSIBus *bus = scsi_bus_from_device(sd);
+ VirtIOSCSI *vdev = container_of(bus, VirtIOSCSI, bus);
+ VirtIOSCSICcw *scsi_ccw = container_of(vdev, VirtIOSCSICcw,
+ vdev);
+
+ ccw_dev = (CcwDevice *)object_dynamic_cast(OBJECT(scsi_ccw),
+ TYPE_CCW_DEVICE);
+ }
+ }
+ }
+ return ccw_dev;
+}
+
static bool s390_gen_initial_iplb(S390IPLState *ipl)
{
DeviceState *dev_st;
+ CcwDevice *ccw_dev = NULL;
dev_st = get_boot_device(0);
if (dev_st) {
- VirtioCcwDevice *virtio_ccw_dev = (VirtioCcwDevice *)
- object_dynamic_cast(OBJECT(qdev_get_parent_bus(dev_st)->parent),
- TYPE_VIRTIO_CCW_DEVICE);
+ ccw_dev = s390_get_ccw_device(dev_st);
+ }
+
+ /*
+ * Currently allow IPL only from CCW devices.
+ */
+ if (ccw_dev) {
SCSIDevice *sd = (SCSIDevice *) object_dynamic_cast(OBJECT(dev_st),
TYPE_SCSI_DEVICE);
- VirtIONet *vn = (VirtIONet *) object_dynamic_cast(OBJECT(dev_st),
- TYPE_VIRTIO_NET);
-
- if (vn) {
- ipl->netboot = true;
- }
- if (virtio_ccw_dev) {
- CcwDevice *ccw_dev = CCW_DEVICE(virtio_ccw_dev);
-
- ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN);
- ipl->iplb.blk0_len =
- cpu_to_be32(S390_IPLB_MIN_CCW_LEN - S390_IPLB_HEADER_LEN);
- ipl->iplb.pbt = S390_IPL_TYPE_CCW;
- ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno);
- ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3;
- } else if (sd) {
- SCSIBus *bus = scsi_bus_from_device(sd);
- VirtIOSCSI *vdev = container_of(bus, VirtIOSCSI, bus);
- VirtIOSCSICcw *scsi_ccw = container_of(vdev, VirtIOSCSICcw, vdev);
- CcwDevice *ccw_dev;
-
- ccw_dev = (CcwDevice *)object_dynamic_cast(OBJECT(scsi_ccw),
- TYPE_CCW_DEVICE);
- if (!ccw_dev) { /* It might be a PCI device instead */
- return false;
- }
+ if (sd) {
ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN);
ipl->iplb.blk0_len =
cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN - S390_IPLB_HEADER_LEN);
@@ -327,12 +335,25 @@ static bool s390_gen_initial_iplb(S390IPLState *ipl)
ipl->iplb.scsi.devno = cpu_to_be16(ccw_dev->sch->devno);
ipl->iplb.scsi.ssid = ccw_dev->sch->ssid & 3;
} else {
- return false; /* unknown device */
+ VirtIONet *vn = (VirtIONet *) object_dynamic_cast(OBJECT(dev_st),
+ TYPE_VIRTIO_NET);
+
+ ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN);
+ ipl->iplb.blk0_len =
+ cpu_to_be32(S390_IPLB_MIN_CCW_LEN - S390_IPLB_HEADER_LEN);
+ ipl->iplb.pbt = S390_IPL_TYPE_CCW;
+ ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno);
+ ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3;
+
+ if (vn) {
+ ipl->netboot = true;
+ }
}
if (!s390_ipl_set_loadparm(ipl->iplb.loadparm)) {
ipl->iplb.flags |= DIAG308_FLAGS_LP_VALID;
}
+
return true;
}
@@ -406,7 +427,8 @@ unref_mr:
return img_size;
}
-static bool is_virtio_net_device(IplParameterBlock *iplb)
+static bool is_virtio_ccw_device_of_type(IplParameterBlock *iplb,
+ int virtio_id)
{
uint8_t cssid;
uint8_t ssid;
@@ -426,13 +448,23 @@ static bool is_virtio_net_device(IplParameterBlock *iplb)
sch = css_find_subch(1, cssid, ssid, schid);
if (sch && sch->devno == devno) {
- return sch->id.cu_model == VIRTIO_ID_NET;
+ return sch->id.cu_model == virtio_id;
}
}
}
return false;
}
+static bool is_virtio_net_device(IplParameterBlock *iplb)
+{
+ return is_virtio_ccw_device_of_type(iplb, VIRTIO_ID_NET);
+}
+
+static bool is_virtio_scsi_device(IplParameterBlock *iplb)
+{
+ return is_virtio_ccw_device_of_type(iplb, VIRTIO_ID_SCSI);
+}
+
void s390_ipl_update_diag308(IplParameterBlock *iplb)
{
S390IPLState *ipl = get_ipl_device();
@@ -457,6 +489,22 @@ void s390_reipl_request(void)
S390IPLState *ipl = get_ipl_device();
ipl->reipl_requested = true;
+ if (ipl->iplb_valid &&
+ !ipl->netboot &&
+ ipl->iplb.pbt == S390_IPL_TYPE_CCW &&
+ is_virtio_scsi_device(&ipl->iplb)) {
+ CcwDevice *ccw_dev = s390_get_ccw_device(get_boot_device(0));
+
+ if (ccw_dev &&
+ cpu_to_be16(ccw_dev->sch->devno) == ipl->iplb.ccw.devno &&
+ (ccw_dev->sch->ssid & 3) == ipl->iplb.ccw.ssid) {
+ /*
+ * this is the original boot device's SCSI
+ * so restore IPL parameter info from it
+ */
+ ipl->iplb_valid = s390_gen_initial_iplb(ipl);
+ }
+ }
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
}
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 864145a7c6..435f7c99e7 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -246,6 +246,7 @@ static void s390_init_ipl_dev(const char *kernel_filename,
{
Object *new = object_new(TYPE_S390_IPL);
DeviceState *dev = DEVICE(new);
+ char *netboot_fw_prop;
if (kernel_filename) {
qdev_prop_set_string(dev, "kernel", kernel_filename);
@@ -256,9 +257,11 @@ static void s390_init_ipl_dev(const char *kernel_filename,
qdev_prop_set_string(dev, "cmdline", kernel_cmdline);
qdev_prop_set_string(dev, "firmware", firmware);
qdev_prop_set_bit(dev, "enforce_bios", enforce_bios);
- if (!strlen(object_property_get_str(new, "netboot_fw", &error_abort))) {
+ netboot_fw_prop = object_property_get_str(new, "netboot_fw", &error_abort);
+ if (!strlen(netboot_fw_prop)) {
qdev_prop_set_string(dev, "netboot_fw", netboot_fw);
}
+ g_free(netboot_fw_prop);
object_property_add_child(qdev_get_machine(), TYPE_S390_IPL,
new, NULL);
object_unref(new);
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index f5ab767ab5..ded23d36ca 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -714,10 +714,12 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
/* min_io_size and opt_io_size can't be greater than
* max_io_sectors */
- min_io_size =
- MIN_NON_ZERO(min_io_size, max_io_sectors);
- opt_io_size =
- MIN_NON_ZERO(opt_io_size, max_io_sectors);
+ if (min_io_size) {
+ min_io_size = MIN(min_io_size, max_io_sectors);
+ }
+ if (opt_io_size) {
+ opt_io_size = MIN(opt_io_size, max_io_sectors);
+ }
}
/* required VPD size with unmap support */
buflen = 0x40;
@@ -823,7 +825,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
* block characteristics VPD page by default. Not all of SPC-3
* is actually implemented, but we're good enough.
*/
- outbuf[2] = 5;
+ outbuf[2] = s->qdev.default_scsi_version;
outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
if (buflen > 36) {
@@ -2191,7 +2193,11 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
case READ_12:
case READ_16:
DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len);
- if (r->req.cmd.buf[1] & 0xe0) {
+ /* Protection information is not supported. For SCSI versions 2 and
+ * older (as determined by snooping the guest's INQUIRY commands),
+ * there is no RD/WR/VRPROTECT, so skip this check in these versions.
+ */
+ if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
goto illegal_request;
}
if (!check_lba_range(s, r->req.cmd.lba, len)) {
@@ -2222,7 +2228,7 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
* As far as DMA is concerned, we can treat it the same as a write;
* scsi_block_do_sgio will send VERIFY commands.
*/
- if (r->req.cmd.buf[1] & 0xe0) {
+ if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
goto illegal_request;
}
if (!check_lba_range(s, r->req.cmd.lba, len)) {
@@ -2268,6 +2274,8 @@ static void scsi_disk_reset(DeviceState *dev)
/* reset tray statuses */
s->tray_locked = 0;
s->tray_open = 0;
+
+ s->qdev.scsi_version = s->qdev.default_scsi_version;
}
static void scsi_disk_resize_cb(void *opaque)
@@ -2812,6 +2820,8 @@ static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
{
SCSIBlockReq *r = (SCSIBlockReq *)req;
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
+
r->cmd = req->cmd.buf[0];
switch (r->cmd >> 5) {
case 0:
@@ -2837,8 +2847,11 @@ static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
abort();
}
- if (r->cdb1 & 0xe0) {
- /* Protection information is not supported. */
+ /* Protection information is not supported. For SCSI versions 2 and
+ * older (as determined by snooping the guest's INQUIRY commands),
+ * there is no RD/WR/VRPROTECT, so skip this check in these versions.
+ */
+ if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
return 0;
}
@@ -2950,6 +2963,8 @@ static Property scsi_hd_properties[] = {
DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
DEFAULT_MAX_IO_SIZE),
DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
+ DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
+ 5),
DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2995,6 +3010,8 @@ static Property scsi_cd_properties[] = {
DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
DEFAULT_MAX_IO_SIZE),
+ DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
+ 5),
DEFINE_PROP_END_OF_LIST(),
};
@@ -3023,6 +3040,8 @@ static Property scsi_block_properties[] = {
DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
+ DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
+ -1),
DEFINE_PROP_END_OF_LIST(),
};
@@ -3063,6 +3082,8 @@ static Property scsi_disk_properties[] = {
DEFAULT_MAX_UNMAP_SIZE),
DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
DEFAULT_MAX_IO_SIZE),
+ DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
+ 5),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index 4753f8738f..381f04e339 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -194,17 +194,40 @@ static void scsi_read_complete(void * opaque, int ret)
r->buf[3] |= 0x80;
}
}
- if (s->type == TYPE_DISK &&
- r->req.cmd.buf[0] == INQUIRY &&
- r->req.cmd.buf[2] == 0xb0) {
- uint32_t max_transfer =
- blk_get_max_transfer(s->conf.blk) / s->blocksize;
-
- assert(max_transfer);
- stl_be_p(&r->buf[8], max_transfer);
- /* Also take care of the opt xfer len. */
- stl_be_p(&r->buf[12],
- MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
+ if (r->req.cmd.buf[0] == INQUIRY) {
+ /*
+ * EVPD set to zero returns the standard INQUIRY data.
+ *
+ * Check if scsi_version is unset (-1) to avoid re-defining it
+ * each time an INQUIRY with standard data is received.
+ * scsi_version is initialized with -1 in scsi_generic_reset
+ * and scsi_disk_reset, making sure that we'll set the
+ * scsi_version after a reset. If the version field of the
+ * INQUIRY response somehow changes after a guest reboot,
+ * we'll be able to keep track of it.
+ *
+ * On SCSI-2 and older, first 3 bits of byte 2 is the
+ * ANSI-approved version, while on later versions the
+ * whole byte 2 contains the version. Check if we're dealing
+ * with a newer version and, in that case, assign the
+ * whole byte.
+ */
+ if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
+ s->scsi_version = r->buf[2] & 0x07;
+ if (s->scsi_version > 2) {
+ s->scsi_version = r->buf[2];
+ }
+ }
+ if (s->type == TYPE_DISK && r->req.cmd.buf[2] == 0xb0) {
+ uint32_t max_transfer =
+ blk_get_max_transfer(s->conf.blk) / s->blocksize;
+
+ assert(max_transfer);
+ stl_be_p(&r->buf[8], max_transfer);
+ /* Also take care of the opt xfer len. */
+ stl_be_p(&r->buf[12],
+ MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
+ }
}
scsi_req_data(&r->req, len);
scsi_req_unref(&r->req);
@@ -474,6 +497,7 @@ static void scsi_generic_reset(DeviceState *dev)
{
SCSIDevice *s = SCSI_DEVICE(dev);
+ s->scsi_version = s->default_scsi_version;
scsi_device_purge_requests(s, SENSE_CODE(RESET));
}
@@ -549,6 +573,8 @@ static void scsi_generic_realize(SCSIDevice *s, Error **errp)
DPRINTF("block size %d\n", s->blocksize);
+ /* Only used by scsi-block, but initialize it nevertheless to be clean. */
+ s->default_scsi_version = -1;
scsi_generic_read_device_identification(s);
}
diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c
index 4e5855741a..fe34b50769 100644
--- a/hw/vfio/ccw.c
+++ b/hw/vfio/ccw.c
@@ -357,11 +357,13 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp)
if (strcmp(vbasedev->name, vcdev->vdev.name) == 0) {
error_setg(&err, "vfio: subchannel %s has already been attached",
vcdev->vdev.name);
+ g_free(vcdev->vdev.name);
goto out_device_err;
}
}
if (vfio_get_device(group, cdev->mdevid, &vcdev->vdev, &err)) {
+ g_free(vcdev->vdev.name);
goto out_device_err;
}
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 5e84716218..07ffa0ba10 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -548,12 +548,11 @@ static void vfio_listener_region_add(MemoryListener *listener,
hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
- error_report("Region 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx
- " is not aligned to 0x%"HWADDR_PRIx
- " and cannot be mapped for DMA",
- section->offset_within_region,
- int128_getlo(section->size),
- pgmask + 1);
+ trace_vfio_listener_region_add_no_dma_map(
+ memory_region_name(section->mr),
+ section->offset_within_address_space,
+ int128_getlo(section->size),
+ pgmask + 1);
return;
}
}
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 79f63a2ff6..20109cb758 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -90,6 +90,7 @@ vfio_iommu_map_notify(const char *op, uint64_t iova_start, uint64_t iova_end) "i
vfio_listener_region_add_skip(uint64_t start, uint64_t end) "SKIPPING region_add 0x%"PRIx64" - 0x%"PRIx64
vfio_listener_region_add_iommu(uint64_t start, uint64_t end) "region_add [iommu] 0x%"PRIx64" - 0x%"PRIx64
vfio_listener_region_add_ram(uint64_t iova_start, uint64_t iova_end, void *vaddr) "region_add [ram] 0x%"PRIx64" - 0x%"PRIx64" [%p]"
+vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t size, uint64_t page_size) "Region \"%s\" 0x%"PRIx64" size=0x%"PRIx64" is not aligned to 0x%"PRIx64" and cannot be mapped for DMA"
vfio_listener_region_del_skip(uint64_t start, uint64_t end) "SKIPPING region_del 0x%"PRIx64" - 0x%"PRIx64
vfio_listener_region_del(uint64_t start, uint64_t end) "region_del 0x%"PRIx64" - 0x%"PRIx64
vfio_disconnect_container(int fd) "close container->fd=%d"
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 44aea5c0a8..38da8692bb 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -46,6 +46,7 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
+ VHOST_USER_PROTOCOL_F_CONFIG = 9,
VHOST_USER_PROTOCOL_F_MAX
};
@@ -1211,6 +1212,17 @@ static int vhost_user_init(struct vhost_dev *dev, void *opaque)
dev->protocol_features =
protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
+
+ if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
+ /* Don't acknowledge CONFIG feature if device doesn't support it */
+ dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
+ } else if (!(protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
+ error_report("Device expects VHOST_USER_PROTOCOL_F_CONFIG "
+ "but backend does not support it.");
+ return -1;
+ }
+
err = vhost_user_set_protocol_features(dev, dev->protocol_features);
if (err < 0) {
return err;
@@ -1405,6 +1417,11 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
.hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
};
+ if (!virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_CONFIG)) {
+ return -1;
+ }
+
if (config_len > VHOST_USER_MAX_CONFIG_SIZE) {
return -1;
}
@@ -1448,6 +1465,11 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
.hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
};
+ if (!virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_CONFIG)) {
+ return -1;
+ }
+
if (reply_supported) {
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 250f886acb..f51bf573d5 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -595,10 +595,15 @@ static void vhost_region_add_section(struct vhost_dev *dev,
prev_sec->offset_within_address_space,
prev_sec->offset_within_region);
} else {
- error_report("%s: Overlapping but not coherent sections "
- "at %"PRIx64,
- __func__, mrs_gpa);
- return;
+ /* adjoining regions are fine, but overlapping ones with
+ * different blocks/offsets shouldn't happen
+ */
+ if (mrs_gpa != prev_gpa_end + 1) {
+ error_report("%s: Overlapping but not coherent sections "
+ "at %"PRIx64,
+ __func__, mrs_gpa);
+ return;
+ }
}
}
}
@@ -1223,7 +1228,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
- } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
+ } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
error_setg(&hdev->migration_blocker,
"Migration disabled: failed to allocate shared memory");
}
@@ -1451,7 +1456,6 @@ int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
const VhostDevConfigOps *ops)
{
- assert(hdev->vhost_ops);
hdev->config_ops = ops;
}