From bf55b7afce53718ef96f4e6616da62c0ccac37dd Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Fri, 19 May 2017 11:19:40 +0800 Subject: memory: tune last param of iommu_ops.translate() This patch converts the old "is_write" bool into IOMMUAccessFlags. The difference is that "is_write" can only express either read/write, but sometimes what we really want is "none" here (neither read nor write). Replay is an good example - during replay, we should not check any RW permission bits since thats not an actual IO at all. CC: Paolo Bonzini CC: David Gibson Reviewed-by: David Gibson Acked-by: David Gibson Acked-by: Paolo Bonzini Signed-off-by: Peter Xu Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Jason Wang --- include/exec/memory.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/exec/memory.h b/include/exec/memory.h index 99e0f54d86..97fd0c242b 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -185,8 +185,14 @@ struct MemoryRegionOps { typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; struct MemoryRegionIOMMUOps { - /* Return a TLB entry that contains a given address. */ - IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); + /* + * Return a TLB entry that contains a given address. Flag should + * be the access permission of this translation operation. We can + * set flag to IOMMU_NONE to mean that we don't need any + * read/write permission checks, like, when for region replay. + */ + IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, + IOMMUAccessFlags flag); /* Returns minimum supported page size */ uint64_t (*get_min_page_size)(MemoryRegion *iommu); /* Called when IOMMU Notifier flag changed */ -- cgit v1.2.3-55-g7522 From ad523590f62cf5d44e97388de370d27b95b25aff Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Fri, 19 May 2017 11:19:41 +0800 Subject: memory: remove the last param in memory_region_iommu_replay() We were always passing in that one as "false" to assume that's an read operation, and we also assume that IOMMU translation would always have that read permission. A better permission would be IOMMU_NONE since the replay is after all not a real read operation, but just a page table rebuilding process. CC: David Gibson CC: Paolo Bonzini Reviewed-by: David Gibson Acked-by: Paolo Bonzini Signed-off-by: Peter Xu Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Jason Wang --- hw/vfio/common.c | 2 +- include/exec/memory.h | 5 +---- memory.c | 8 +++----- 3 files changed, 5 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/hw/vfio/common.c b/hw/vfio/common.c index a8f12eeb35..b9abe77f5a 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -502,7 +502,7 @@ static void vfio_listener_region_add(MemoryListener *listener, QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); memory_region_register_iommu_notifier(giommu->iommu, &giommu->n); - memory_region_iommu_replay(giommu->iommu, &giommu->n, false); + memory_region_iommu_replay(giommu->iommu, &giommu->n); return; } diff --git a/include/exec/memory.h b/include/exec/memory.h index 97fd0c242b..bfdc685f24 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -731,11 +731,8 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, * * @mr: the memory region to observe * @n: the notifier to which to replay iommu mappings - * @is_write: Whether to treat the replay as a translate "write" - * through the iommu */ -void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n, - bool is_write); +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n); /** * memory_region_iommu_replay_all: replay existing IOMMU translations diff --git a/memory.c b/memory.c index 3f0aae8e17..0ddc4cc28d 100644 --- a/memory.c +++ b/memory.c @@ -1620,12 +1620,10 @@ uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr) return TARGET_PAGE_SIZE; } -void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n, - bool is_write) +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n) { hwaddr addr, granularity; IOMMUTLBEntry iotlb; - IOMMUAccessFlags flag = is_write ? IOMMU_WO : IOMMU_RO; /* If the IOMMU has its own replay callback, override */ if (mr->iommu_ops->replay) { @@ -1636,7 +1634,7 @@ void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n, granularity = memory_region_iommu_get_min_page_size(mr); for (addr = 0; addr < memory_region_size(mr); addr += granularity) { - iotlb = mr->iommu_ops->translate(mr, addr, flag); + iotlb = mr->iommu_ops->translate(mr, addr, IOMMU_NONE); if (iotlb.perm != IOMMU_NONE) { n->notify(n, &iotlb); } @@ -1654,7 +1652,7 @@ void memory_region_iommu_replay_all(MemoryRegion *mr) IOMMUNotifier *notifier; IOMMU_NOTIFIER_FOREACH(notifier, mr) { - memory_region_iommu_replay(mr, notifier, false); + memory_region_iommu_replay(mr, notifier); } } -- cgit v1.2.3-55-g7522 From dbaabb25f441264d9029dc53e84a156269ecd275 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Fri, 19 May 2017 11:19:47 +0800 Subject: intel_iommu: support passthrough (PT) Hardware support for VT-d device passthrough. Although current Linux can live with iommu=pt even without this, but this is faster than when using software passthrough. Signed-off-by: Peter Xu Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Liu, Yi L Reviewed-by: Jason Wang --- hw/i386/intel_iommu.c | 231 ++++++++++++++++++++++++++++++----------- hw/i386/intel_iommu_internal.h | 1 + hw/i386/trace-events | 2 + hw/i386/x86-iommu.c | 1 + include/hw/i386/x86-iommu.h | 1 + 5 files changed, 177 insertions(+), 59 deletions(-) (limited to 'include') diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index aac2cc7483..15610b9de8 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -613,6 +613,11 @@ static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, return false; } break; + case VTD_CONTEXT_TT_PASS_THROUGH: + if (!x86_iommu->pt_supported) { + return false; + } + break; default: /* Unknwon type */ return false; @@ -660,6 +665,29 @@ static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level) } } +/* Find the VTD address space associated with a given bus number */ +static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) +{ + VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num]; + if (!vtd_bus) { + /* + * Iterate over the registered buses to find the one which + * currently hold this bus number, and update the bus_num + * lookup table: + */ + GHashTableIter iter; + + g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); + while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { + if (pci_bus_num(vtd_bus->bus) == bus_num) { + s->vtd_as_by_bus_num[bus_num] = vtd_bus; + return vtd_bus; + } + } + } + return vtd_bus; +} + /* Given the @iova, get relevant @slptep. @slpte_level will be the last level * of the translation, can be used for deciding the size of large page. */ @@ -906,6 +934,91 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, return 0; } +/* + * Fetch translation type for specific device. Returns <0 if error + * happens, otherwise return the shifted type to check against + * VTD_CONTEXT_TT_*. + */ +static int vtd_dev_get_trans_type(VTDAddressSpace *as) +{ + IntelIOMMUState *s; + VTDContextEntry ce; + int ret; + + s = as->iommu_state; + + ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus), + as->devfn, &ce); + if (ret) { + return ret; + } + + return vtd_ce_get_type(&ce); +} + +static bool vtd_dev_pt_enabled(VTDAddressSpace *as) +{ + int ret; + + assert(as); + + ret = vtd_dev_get_trans_type(as); + if (ret < 0) { + /* + * Possibly failed to parse the context entry for some reason + * (e.g., during init, or any guest configuration errors on + * context entries). We should assume PT not enabled for + * safety. + */ + return false; + } + + return ret == VTD_CONTEXT_TT_PASS_THROUGH; +} + +/* Return whether the device is using IOMMU translation. */ +static bool vtd_switch_address_space(VTDAddressSpace *as) +{ + bool use_iommu; + + assert(as); + + use_iommu = as->iommu_state->dmar_enabled & !vtd_dev_pt_enabled(as); + + trace_vtd_switch_address_space(pci_bus_num(as->bus), + VTD_PCI_SLOT(as->devfn), + VTD_PCI_FUNC(as->devfn), + use_iommu); + + /* Turn off first then on the other */ + if (use_iommu) { + memory_region_set_enabled(&as->sys_alias, false); + memory_region_set_enabled(&as->iommu, true); + } else { + memory_region_set_enabled(&as->iommu, false); + memory_region_set_enabled(&as->sys_alias, true); + } + + return use_iommu; +} + +static void vtd_switch_address_space_all(IntelIOMMUState *s) +{ + GHashTableIter iter; + VTDBus *vtd_bus; + int i; + + g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); + while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { + for (i = 0; i < X86_IOMMU_PCI_DEVFN_MAX; i++) { + if (!vtd_bus->dev_as[i]) { + continue; + } + vtd_switch_address_space(vtd_bus->dev_as[i]); + } + } +} + static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn) { return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL); @@ -943,6 +1056,31 @@ static inline bool vtd_is_interrupt_addr(hwaddr addr) return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST; } +static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id) +{ + VTDBus *vtd_bus; + VTDAddressSpace *vtd_as; + bool success = false; + + vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id)); + if (!vtd_bus) { + goto out; + } + + vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)]; + if (!vtd_as) { + goto out; + } + + if (vtd_switch_address_space(vtd_as) == false) { + /* We switched off IOMMU region successfully. */ + success = true; + } + +out: + trace_vtd_pt_enable_fast_path(source_id, success); +} + /* Map dev to context-entry then do a paging-structures walk to do a iommu * translation. * @@ -1014,6 +1152,30 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, cc_entry->context_cache_gen = s->context_cache_gen; } + /* + * We don't need to translate for pass-through context entries. + * Also, let's ignore IOTLB caching as well for PT devices. + */ + if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) { + entry->translated_addr = entry->iova; + entry->addr_mask = VTD_PAGE_SIZE - 1; + entry->perm = IOMMU_RW; + trace_vtd_translate_pt(source_id, entry->iova); + + /* + * When this happens, it means firstly caching-mode is not + * enabled, and this is the first passthrough translation for + * the device. Let's enable the fast path for passthrough. + * + * When passthrough is disabled again for the device, we can + * capture it via the context entry invalidation, then the + * IOMMU region can be swapped back. + */ + vtd_pt_enable_fast_path(s, source_id); + + return; + } + ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level, &reads, &writes); if (ret_fr) { @@ -1083,6 +1245,7 @@ static void vtd_context_global_invalidate(IntelIOMMUState *s) if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) { vtd_reset_context_cache(s); } + vtd_switch_address_space_all(s); /* * From VT-d spec 6.5.2.1, a global context entry invalidation * should be followed by a IOTLB global invalidation, so we should @@ -1093,29 +1256,6 @@ static void vtd_context_global_invalidate(IntelIOMMUState *s) vtd_iommu_replay_all(s); } - -/* Find the VTD address space currently associated with a given bus number, - */ -static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) -{ - VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num]; - if (!vtd_bus) { - /* Iterate over the registered buses to find the one - * which currently hold this bus number, and update the bus_num lookup table: - */ - GHashTableIter iter; - - g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); - while (g_hash_table_iter_next (&iter, NULL, (void**)&vtd_bus)) { - if (pci_bus_num(vtd_bus->bus) == bus_num) { - s->vtd_as_by_bus_num[bus_num] = vtd_bus; - return vtd_bus; - } - } - } - return vtd_bus; -} - /* Do a context-cache device-selective invalidation. * @func_mask: FM field after shifting */ @@ -1157,6 +1297,11 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s, trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it), VTD_PCI_FUNC(devfn_it)); vtd_as->context_cache_entry.context_cache_gen = 0; + /* + * Do switch address space when needed, in case if the + * device passthrough bit is switched. + */ + vtd_switch_address_space(vtd_as); /* * So a device is moving out of (or moving into) a * domain, a replay() suites here to notify all the @@ -1389,42 +1534,6 @@ static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s) vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS); } -static void vtd_switch_address_space(VTDAddressSpace *as) -{ - assert(as); - - trace_vtd_switch_address_space(pci_bus_num(as->bus), - VTD_PCI_SLOT(as->devfn), - VTD_PCI_FUNC(as->devfn), - as->iommu_state->dmar_enabled); - - /* Turn off first then on the other */ - if (as->iommu_state->dmar_enabled) { - memory_region_set_enabled(&as->sys_alias, false); - memory_region_set_enabled(&as->iommu, true); - } else { - memory_region_set_enabled(&as->iommu, false); - memory_region_set_enabled(&as->sys_alias, true); - } -} - -static void vtd_switch_address_space_all(IntelIOMMUState *s) -{ - GHashTableIter iter; - VTDBus *vtd_bus; - int i; - - g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); - while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { - for (i = 0; i < X86_IOMMU_PCI_DEVFN_MAX; i++) { - if (!vtd_bus->dev_as[i]) { - continue; - } - vtd_switch_address_space(vtd_bus->dev_as[i]); - } - } -} - /* Handle Translation Enable/Disable */ static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en) { @@ -2872,6 +2981,10 @@ static void vtd_init(IntelIOMMUState *s) s->ecap |= VTD_ECAP_DT; } + if (x86_iommu->pt_supported) { + s->ecap |= VTD_ECAP_PT; + } + if (s->caching_mode) { s->cap |= VTD_CAP_CM; } diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 29d67075f4..0e73a65bf2 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -187,6 +187,7 @@ /* Interrupt Remapping support */ #define VTD_ECAP_IR (1ULL << 3) #define VTD_ECAP_EIM (1ULL << 4) +#define VTD_ECAP_PT (1ULL << 6) #define VTD_ECAP_MHMV (15ULL << 20) /* CAP_REG */ diff --git a/hw/i386/trace-events b/hw/i386/trace-events index 04a6980800..72556dad48 100644 --- a/hw/i386/trace-events +++ b/hw/i386/trace-events @@ -38,6 +38,8 @@ vtd_page_walk_skip_perm(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"P vtd_page_walk_skip_reserve(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to rsrv set" vtd_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)" vtd_as_unmap_whole(uint8_t bus, uint8_t slot, uint8_t fn, uint64_t iova, uint64_t size) "Device %02x:%02x.%x start 0x%"PRIx64" size 0x%"PRIx64 +vtd_translate_pt(uint16_t sid, uint64_t addr) "source id 0x%"PRIu16", iova 0x%"PRIx64 +vtd_pt_enable_fast_path(uint16_t sid, bool success) "sid 0x%"PRIu16" %d" # hw/i386/amd_iommu.c amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" + offset 0x%"PRIx32 diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c index 02b88258a4..293caf83ef 100644 --- a/hw/i386/x86-iommu.c +++ b/hw/i386/x86-iommu.c @@ -91,6 +91,7 @@ static void x86_iommu_realize(DeviceState *dev, Error **errp) static Property x86_iommu_properties[] = { DEFINE_PROP_BOOL("intremap", X86IOMMUState, intr_supported, false), DEFINE_PROP_BOOL("device-iotlb", X86IOMMUState, dt_supported, false), + DEFINE_PROP_BOOL("pt", X86IOMMUState, pt_supported, true), DEFINE_PROP_END_OF_LIST(), }; diff --git a/include/hw/i386/x86-iommu.h b/include/hw/i386/x86-iommu.h index 361c07cdc6..ef89c0c646 100644 --- a/include/hw/i386/x86-iommu.h +++ b/include/hw/i386/x86-iommu.h @@ -74,6 +74,7 @@ struct X86IOMMUState { SysBusDevice busdev; bool intr_supported; /* Whether vIOMMU supports IR */ bool dt_supported; /* Whether vIOMMU supports DT */ + bool pt_supported; /* Whether vIOMMU supports pass-through */ IommuType type; /* IOMMU type - AMD/Intel */ QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */ }; -- cgit v1.2.3-55-g7522 From c10595fb348d3fe6991fe9750834bd2ab16d3190 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Fri, 19 May 2017 11:19:48 +0800 Subject: intel_iommu: turn off pt before 2.9 This is for compatibility. Signed-off-by: Peter Xu Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Jason Wang --- include/hw/compat.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/hw/compat.h b/include/hw/compat.h index 55b176507a..4c53d600c1 100644 --- a/include/hw/compat.h +++ b/include/hw/compat.h @@ -6,6 +6,10 @@ .driver = "pci-bridge",\ .property = "shpc",\ .value = "off",\ + },{\ + .driver = "intel-iommu",\ + .property = "pt",\ + .value = "off",\ }, #define HW_COMPAT_2_8 \ -- cgit v1.2.3-55-g7522 From 75ebec11afe49539f71cc1c494e3010f91c86adb Mon Sep 17 00:00:00 2001 From: Maxime Coquelin Date: Tue, 23 May 2017 14:31:19 +0200 Subject: virtio_net: Bypass backends for MTU feature negotiation This patch adds a new internal "x-mtu-bypass-backend" property to bypass backends for MTU feature negotiation. When this property is set, the MTU feature is negotiated as soon as supported by the guest and a MTU value is set via the host_mtu parameter. In case the backend advertises the feature (e.g. DPDK's vhost-user backend), the feature negotiation is propagated down to the backend. When this property is not set, the backend has to support the MTU feature for its negotiation to succeed. For compatibility purpose, this property is disabled for machine types v2.9 and older. Cc: Aaron Conole Suggested-by: Michael S. Tsirkin Signed-off-by: Maxime Coquelin Reviewed-by: Vlad Yasevich Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/net/virtio-net.c | 17 ++++++++++++++++- include/hw/compat.h | 4 ++++ include/hw/virtio/virtio-net.h | 1 + include/hw/virtio/virtio.h | 1 + 4 files changed, 22 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 98bd683f31..9a3d769aa2 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -589,7 +589,15 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, if (!get_vhost_net(nc->peer)) { return features; } - return vhost_net_get_features(get_vhost_net(nc->peer), features); + features = vhost_net_get_features(get_vhost_net(nc->peer), features); + vdev->backend_features = features; + + if (n->mtu_bypass_backend && + (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) { + features |= (1ULL << VIRTIO_NET_F_MTU); + } + + return features; } static uint64_t virtio_net_bad_features(VirtIODevice *vdev) @@ -640,6 +648,11 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) VirtIONet *n = VIRTIO_NET(vdev); int i; + if (n->mtu_bypass_backend && + !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) { + features &= ~(1ULL << VIRTIO_NET_F_MTU); + } + virtio_net_set_multiqueue(n, virtio_has_feature(features, VIRTIO_NET_F_MQ)); @@ -2093,6 +2106,8 @@ static Property virtio_net_properties[] = { DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE), DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0), + DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend, + true), DEFINE_PROP_END_OF_LIST(), }; diff --git a/include/hw/compat.h b/include/hw/compat.h index 4c53d600c1..400c64b318 100644 --- a/include/hw/compat.h +++ b/include/hw/compat.h @@ -10,6 +10,10 @@ .driver = "intel-iommu",\ .property = "pt",\ .value = "off",\ + },{\ + .driver = "virtio-net-device",\ + .property = "x-mtu-bypass-backend",\ + .value = "off",\ }, #define HW_COMPAT_2_8 \ diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h index 1eec9a2da3..602b4868d4 100644 --- a/include/hw/virtio/virtio-net.h +++ b/include/hw/virtio/virtio-net.h @@ -97,6 +97,7 @@ typedef struct VirtIONet { QEMUTimer *announce_timer; int announce_counter; bool needs_vnet_hdr_swap; + bool mtu_bypass_backend; } VirtIONet; void virtio_net_set_netclient_name(VirtIONet *n, const char *name, diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index 7b6edbafd7..80c45c321e 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -79,6 +79,7 @@ struct VirtIODevice uint16_t queue_sel; uint64_t guest_features; uint64_t host_features; + uint64_t backend_features; size_t config_len; void *config; uint16_t config_vector; -- cgit v1.2.3-55-g7522