From 0b99f22461e59ec7a31c75ebc4c057a45dd9e9a5 Mon Sep 17 00:00:00 2001 From: Marc-André Lureau Date: Fri, 8 Mar 2019 15:04:45 +0100 Subject: vhost-user: simplify vhost_user_init/vhost_user_cleanup Take a VhostUserState* that can be pre-allocated, and initialize it with the associated chardev. Signed-off-by: Marc-André Lureau Reviewed-by: Tiwei Bie Message-Id: <20190308140454.32437-4-marcandre.lureau@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- include/hw/virtio/vhost-user-blk.h | 2 +- include/hw/virtio/vhost-user-scsi.h | 2 +- include/hw/virtio/vhost-user.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/hw/virtio/vhost-user-blk.h b/include/hw/virtio/vhost-user-blk.h index d52944aeeb..a8a106eecb 100644 --- a/include/hw/virtio/vhost-user-blk.h +++ b/include/hw/virtio/vhost-user-blk.h @@ -36,7 +36,7 @@ typedef struct VHostUserBlk { uint32_t queue_size; uint32_t config_wce; struct vhost_dev dev; - VhostUserState *vhost_user; + VhostUserState vhost_user; } VHostUserBlk; #endif diff --git a/include/hw/virtio/vhost-user-scsi.h b/include/hw/virtio/vhost-user-scsi.h index e429cacd8e..738f9288bd 100644 --- a/include/hw/virtio/vhost-user-scsi.h +++ b/include/hw/virtio/vhost-user-scsi.h @@ -30,7 +30,7 @@ typedef struct VHostUserSCSI { VHostSCSICommon parent_obj; - VhostUserState *vhost_user; + VhostUserState vhost_user; } VHostUserSCSI; #endif /* VHOST_USER_SCSI_H */ diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h index fd660393a0..811e325f42 100644 --- a/include/hw/virtio/vhost-user.h +++ b/include/hw/virtio/vhost-user.h @@ -22,7 +22,7 @@ typedef struct VhostUserState { VhostUserHostNotifier notifier[VIRTIO_QUEUE_MAX]; } VhostUserState; -VhostUserState *vhost_user_init(void); +bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp); void vhost_user_cleanup(VhostUserState *user); #endif -- cgit v1.2.3-55-g7522 From fb43cf739e1a74178212bfcd4313b598109d2310 Mon Sep 17 00:00:00 2001 From: Liu, Yi L Date: Tue, 5 Mar 2019 10:34:53 +0800 Subject: intel_iommu: scalable mode emulation Intel(R) VT-d 3.0 spec introduces scalable mode address translation to replace extended context mode. This patch extends current emulator to support Scalable Mode which includes root table, context table and new pasid table format change. Now intel_iommu emulates both legacy mode and scalable mode (with legacy-equivalent capability set). The key points are below: 1. Extend root table operations to support both legacy mode and scalable mode. 2. Extend context table operations to support both legacy mode and scalable mode. 3. Add pasid tabled operations to support scalable mode. Signed-off-by: Liu, Yi L [Yi Sun is co-developer to contribute much to refine the whole commit.] Signed-off-by: Yi Sun Message-Id: <1551753295-30167-2-git-send-email-yi.y.sun@linux.intel.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Peter Xu --- hw/i386/intel_iommu.c | 494 +++++++++++++++++++++++++++++++++-------- hw/i386/intel_iommu_internal.h | 41 +++- hw/i386/trace-events | 2 +- include/hw/i386/intel_iommu.h | 24 +- 4 files changed, 464 insertions(+), 97 deletions(-) (limited to 'include') diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index ee22e754f0..ea9ea93e42 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -37,6 +37,27 @@ #include "kvm_i386.h" #include "trace.h" +/* context entry operations */ +#define VTD_CE_GET_RID2PASID(ce) \ + ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK) +#define VTD_CE_GET_PASID_DIR_TABLE(ce) \ + ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK) + +/* pe operations */ +#define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT) +#define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW)) +#define VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write) {\ + if (ret_fr) { \ + ret_fr = -ret_fr; \ + if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { \ + trace_vtd_fault_disabled(); \ + } else { \ + vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); \ + } \ + goto error; \ + } \ +} + static void vtd_address_space_refresh_all(IntelIOMMUState *s); static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n); @@ -512,9 +533,15 @@ static void vtd_generate_completion_event(IntelIOMMUState *s) } } -static inline bool vtd_root_entry_present(VTDRootEntry *root) +static inline bool vtd_root_entry_present(IntelIOMMUState *s, + VTDRootEntry *re, + uint8_t devfn) { - return root->val & VTD_ROOT_ENTRY_P; + if (s->root_scalable && devfn > UINT8_MAX / 2) { + return re->hi & VTD_ROOT_ENTRY_P; + } + + return re->lo & VTD_ROOT_ENTRY_P; } static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, @@ -524,10 +551,11 @@ static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, addr = s->root + index * sizeof(*re); if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) { - re->val = 0; + re->lo = 0; return -VTD_FR_ROOT_TABLE_INV; } - re->val = le64_to_cpu(re->val); + re->lo = le64_to_cpu(re->lo); + re->hi = le64_to_cpu(re->hi); return 0; } @@ -536,18 +564,35 @@ static inline bool vtd_ce_present(VTDContextEntry *context) return context->lo & VTD_CONTEXT_ENTRY_P; } -static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index, +static int vtd_get_context_entry_from_root(IntelIOMMUState *s, + VTDRootEntry *re, + uint8_t index, VTDContextEntry *ce) { - dma_addr_t addr; + dma_addr_t addr, ce_size; /* we have checked that root entry is present */ - addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce); - if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) { + ce_size = s->root_scalable ? VTD_CTX_ENTRY_SCALABLE_SIZE : + VTD_CTX_ENTRY_LEGACY_SIZE; + + if (s->root_scalable && index > UINT8_MAX / 2) { + index = index & (~VTD_DEVFN_CHECK_MASK); + addr = re->hi & VTD_ROOT_ENTRY_CTP; + } else { + addr = re->lo & VTD_ROOT_ENTRY_CTP; + } + + addr = addr + index * ce_size; + if (dma_memory_read(&address_space_memory, addr, ce, ce_size)) { return -VTD_FR_CONTEXT_TABLE_INV; } + ce->lo = le64_to_cpu(ce->lo); ce->hi = le64_to_cpu(ce->hi); + if (ce_size == VTD_CTX_ENTRY_SCALABLE_SIZE) { + ce->val[2] = le64_to_cpu(ce->val[2]); + ce->val[3] = le64_to_cpu(ce->val[3]); + } return 0; } @@ -600,6 +645,144 @@ static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level) (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); } +/* Return true if check passed, otherwise false */ +static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu, + VTDPASIDEntry *pe) +{ + switch (VTD_PE_GET_TYPE(pe)) { + case VTD_SM_PASID_ENTRY_FLT: + case VTD_SM_PASID_ENTRY_SLT: + case VTD_SM_PASID_ENTRY_NESTED: + break; + case VTD_SM_PASID_ENTRY_PT: + if (!x86_iommu->pt_supported) { + return false; + } + break; + default: + /* Unknwon type */ + return false; + } + return true; +} + +static int vtd_get_pasid_dire(dma_addr_t pasid_dir_base, + uint32_t pasid, + VTDPASIDDirEntry *pdire) +{ + uint32_t index; + dma_addr_t addr, entry_size; + + index = VTD_PASID_DIR_INDEX(pasid); + entry_size = VTD_PASID_DIR_ENTRY_SIZE; + addr = pasid_dir_base + index * entry_size; + if (dma_memory_read(&address_space_memory, addr, pdire, entry_size)) { + return -VTD_FR_PASID_TABLE_INV; + } + + return 0; +} + +static int vtd_get_pasid_entry(IntelIOMMUState *s, + uint32_t pasid, + VTDPASIDDirEntry *pdire, + VTDPASIDEntry *pe) +{ + uint32_t index; + dma_addr_t addr, entry_size; + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); + + index = VTD_PASID_TABLE_INDEX(pasid); + entry_size = VTD_PASID_ENTRY_SIZE; + addr = pdire->val & VTD_PASID_TABLE_BASE_ADDR_MASK; + addr = addr + index * entry_size; + if (dma_memory_read(&address_space_memory, addr, pe, entry_size)) { + return -VTD_FR_PASID_TABLE_INV; + } + + /* Do translation type check */ + if (!vtd_pe_type_check(x86_iommu, pe)) { + return -VTD_FR_PASID_TABLE_INV; + } + + if (!vtd_is_level_supported(s, VTD_PE_GET_LEVEL(pe))) { + return -VTD_FR_PASID_TABLE_INV; + } + + return 0; +} + +static int vtd_get_pasid_entry_from_pasid(IntelIOMMUState *s, + dma_addr_t pasid_dir_base, + uint32_t pasid, + VTDPASIDEntry *pe) +{ + int ret; + VTDPASIDDirEntry pdire; + + ret = vtd_get_pasid_dire(pasid_dir_base, pasid, &pdire); + if (ret) { + return ret; + } + + ret = vtd_get_pasid_entry(s, pasid, &pdire, pe); + if (ret) { + return ret; + } + + return ret; +} + +static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState *s, + VTDContextEntry *ce, + VTDPASIDEntry *pe) +{ + uint32_t pasid; + dma_addr_t pasid_dir_base; + int ret = 0; + + pasid = VTD_CE_GET_RID2PASID(ce); + pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); + ret = vtd_get_pasid_entry_from_pasid(s, pasid_dir_base, pasid, pe); + + return ret; +} + +static int vtd_ce_get_pasid_fpd(IntelIOMMUState *s, + VTDContextEntry *ce, + bool *pe_fpd_set) +{ + int ret; + uint32_t pasid; + dma_addr_t pasid_dir_base; + VTDPASIDDirEntry pdire; + VTDPASIDEntry pe; + + pasid = VTD_CE_GET_RID2PASID(ce); + pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); + + ret = vtd_get_pasid_dire(pasid_dir_base, pasid, &pdire); + if (ret) { + return ret; + } + + if (pdire.val & VTD_PASID_DIR_FPD) { + *pe_fpd_set = true; + return 0; + } + + ret = vtd_get_pasid_entry(s, pasid, &pdire, &pe); + if (ret) { + return ret; + } + + if (pe.val[0] & VTD_PASID_ENTRY_FPD) { + *pe_fpd_set = true; + } + + return 0; +} + /* Get the page-table level that hardware should use for the second-level * page-table walk from the Address Width field of context-entry. */ @@ -608,17 +791,43 @@ static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce) return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW); } +static uint32_t vtd_get_iova_level(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return VTD_PE_GET_LEVEL(&pe); + } + + return vtd_ce_get_level(ce); +} + static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce) { return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; } +static uint32_t vtd_get_iova_agaw(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return 30 + ((pe.val[0] >> 2) & VTD_SM_PASID_ENTRY_AW) * 9; + } + + return vtd_ce_get_agaw(ce); +} + static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce) { return ce->lo & VTD_CONTEXT_ENTRY_TT; } -/* Return true if check passed, otherwise false */ +/* Only for Legacy Mode. Return true if check passed, otherwise false */ static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, VTDContextEntry *ce) { @@ -639,7 +848,7 @@ static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, } break; default: - /* Unknwon type */ + /* Unknown type */ error_report_once("%s: unknown ce type: %"PRIu32, __func__, vtd_ce_get_type(ce)); return false; @@ -647,21 +856,36 @@ static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, return true; } -static inline uint64_t vtd_iova_limit(VTDContextEntry *ce, uint8_t aw) +static inline uint64_t vtd_iova_limit(IntelIOMMUState *s, + VTDContextEntry *ce, uint8_t aw) { - uint32_t ce_agaw = vtd_ce_get_agaw(ce); + uint32_t ce_agaw = vtd_get_iova_agaw(s, ce); return 1ULL << MIN(ce_agaw, aw); } /* Return true if IOVA passes range check, otherwise false. */ -static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce, +static inline bool vtd_iova_range_check(IntelIOMMUState *s, + uint64_t iova, VTDContextEntry *ce, uint8_t aw) { /* * Check if @iova is above 2^X-1, where X is the minimum of MGAW * in CAP_REG and AW in context-entry. */ - return !(iova & ~(vtd_iova_limit(ce, aw) - 1)); + return !(iova & ~(vtd_iova_limit(s, ce, aw) - 1)); +} + +static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR; + } + + return vtd_ce_get_slpt_base(ce); } /* @@ -707,17 +931,18 @@ static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) /* Given the @iova, get relevant @slptep. @slpte_level will be the last level * of the translation, can be used for deciding the size of large page. */ -static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, +static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, + uint64_t iova, bool is_write, uint64_t *slptep, uint32_t *slpte_level, bool *reads, bool *writes, uint8_t aw_bits) { - dma_addr_t addr = vtd_ce_get_slpt_base(ce); - uint32_t level = vtd_ce_get_level(ce); + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); + uint32_t level = vtd_get_iova_level(s, ce); uint32_t offset; uint64_t slpte; uint64_t access_right_check; - if (!vtd_iova_range_check(iova, ce, aw_bits)) { + if (!vtd_iova_range_check(s, iova, ce, aw_bits)) { error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")", __func__, iova); return -VTD_FR_ADDR_BEYOND_MGAW; @@ -733,7 +958,7 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, if (slpte == (uint64_t)-1) { error_report_once("%s: detected read error on DMAR slpte " "(iova=0x%" PRIx64 ")", __func__, iova); - if (level == vtd_ce_get_level(ce)) { + if (level == vtd_get_iova_level(s, ce)) { /* Invalid programming of context-entry */ return -VTD_FR_CONTEXT_ENTRY_INV; } else { @@ -962,29 +1187,96 @@ next: /** * vtd_page_walk - walk specific IOVA range, and call the hook * + * @s: intel iommu state * @ce: context entry to walk upon * @start: IOVA address to start the walk * @end: IOVA range end address (start <= addr < end) * @info: page walking information struct */ -static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end, +static int vtd_page_walk(IntelIOMMUState *s, VTDContextEntry *ce, + uint64_t start, uint64_t end, vtd_page_walk_info *info) { - dma_addr_t addr = vtd_ce_get_slpt_base(ce); - uint32_t level = vtd_ce_get_level(ce); + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); + uint32_t level = vtd_get_iova_level(s, ce); - if (!vtd_iova_range_check(start, ce, info->aw)) { + if (!vtd_iova_range_check(s, start, ce, info->aw)) { return -VTD_FR_ADDR_BEYOND_MGAW; } - if (!vtd_iova_range_check(end, ce, info->aw)) { + if (!vtd_iova_range_check(s, end, ce, info->aw)) { /* Fix end so that it reaches the maximum */ - end = vtd_iova_limit(ce, info->aw); + end = vtd_iova_limit(s, ce, info->aw); } return vtd_page_walk_level(addr, start, end, level, true, true, info); } +static int vtd_root_entry_rsvd_bits_check(IntelIOMMUState *s, + VTDRootEntry *re) +{ + /* Legacy Mode reserved bits check */ + if (!s->root_scalable && + (re->hi || (re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) + goto rsvd_err; + + /* Scalable Mode reserved bits check */ + if (s->root_scalable && + ((re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)) || + (re->hi & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) + goto rsvd_err; + + return 0; + +rsvd_err: + error_report_once("%s: invalid root entry: hi=0x%"PRIx64 + ", lo=0x%"PRIx64, + __func__, re->hi, re->lo); + return -VTD_FR_ROOT_ENTRY_RSVD; +} + +static inline int vtd_context_entry_rsvd_bits_check(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + if (!s->root_scalable && + (ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI || + ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { + error_report_once("%s: invalid context entry: hi=%"PRIx64 + ", lo=%"PRIx64" (reserved nonzero)", + __func__, ce->hi, ce->lo); + return -VTD_FR_CONTEXT_ENTRY_RSVD; + } + + if (s->root_scalable && + (ce->val[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s->aw_bits) || + ce->val[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 || + ce->val[2] || + ce->val[3])) { + error_report_once("%s: invalid context entry: val[3]=%"PRIx64 + ", val[2]=%"PRIx64 + ", val[1]=%"PRIx64 + ", val[0]=%"PRIx64" (reserved nonzero)", + __func__, ce->val[3], ce->val[2], + ce->val[1], ce->val[0]); + return -VTD_FR_CONTEXT_ENTRY_RSVD; + } + + return 0; +} + +static int vtd_ce_rid2pasid_check(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + /* + * Make sure in Scalable Mode, a present context entry + * has valid rid2pasid setting, which includes valid + * rid2pasid field and corresponding pasid entry setting + */ + return vtd_ce_get_rid2pasid_entry(s, ce, &pe); +} + /* Map a device to its corresponding domain (context-entry) */ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, uint8_t devfn, VTDContextEntry *ce) @@ -998,20 +1290,18 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, return ret_fr; } - if (!vtd_root_entry_present(&re)) { + if (!vtd_root_entry_present(s, &re, devfn)) { /* Not error - it's okay we don't have root entry. */ trace_vtd_re_not_present(bus_num); return -VTD_FR_ROOT_ENTRY_P; } - if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD(s->aw_bits))) { - error_report_once("%s: invalid root entry: rsvd=0x%"PRIx64 - ", val=0x%"PRIx64" (reserved nonzero)", - __func__, re.rsvd, re.val); - return -VTD_FR_ROOT_ENTRY_RSVD; + ret_fr = vtd_root_entry_rsvd_bits_check(s, &re); + if (ret_fr) { + return ret_fr; } - ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce); + ret_fr = vtd_get_context_entry_from_root(s, &re, devfn, ce); if (ret_fr) { return ret_fr; } @@ -1022,26 +1312,38 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, return -VTD_FR_CONTEXT_ENTRY_P; } - if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) || - (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { - error_report_once("%s: invalid context entry: hi=%"PRIx64 - ", lo=%"PRIx64" (reserved nonzero)", - __func__, ce->hi, ce->lo); - return -VTD_FR_CONTEXT_ENTRY_RSVD; + ret_fr = vtd_context_entry_rsvd_bits_check(s, ce); + if (ret_fr) { + return ret_fr; } /* Check if the programming of context-entry is valid */ - if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) { + if (!s->root_scalable && + !vtd_is_level_supported(s, vtd_ce_get_level(ce))) { error_report_once("%s: invalid context entry: hi=%"PRIx64 ", lo=%"PRIx64" (level %d not supported)", - __func__, ce->hi, ce->lo, vtd_ce_get_level(ce)); + __func__, ce->hi, ce->lo, + vtd_ce_get_level(ce)); return -VTD_FR_CONTEXT_ENTRY_INV; } - /* Do translation type check */ - if (!vtd_ce_type_check(x86_iommu, ce)) { - /* Errors dumped in vtd_ce_type_check() */ - return -VTD_FR_CONTEXT_ENTRY_INV; + if (!s->root_scalable) { + /* Do translation type check */ + if (!vtd_ce_type_check(x86_iommu, ce)) { + /* Errors dumped in vtd_ce_type_check() */ + return -VTD_FR_CONTEXT_ENTRY_INV; + } + } else { + /* + * Check if the programming of context-entry.rid2pasid + * and corresponding pasid setting is valid, and thus + * avoids to check pasid entry fetching result in future + * helper function calling. + */ + ret_fr = vtd_ce_rid2pasid_check(s, ce); + if (ret_fr) { + return ret_fr; + } } return 0; @@ -1054,6 +1356,19 @@ static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry, return 0; } +static uint16_t vtd_get_domain_id(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return VTD_SM_PASID_ENTRY_DID(pe.val[1]); + } + + return VTD_CONTEXT_ENTRY_DID(ce->hi); +} + static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, VTDContextEntry *ce, hwaddr addr, hwaddr size) @@ -1065,10 +1380,10 @@ static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, .notify_unmap = true, .aw = s->aw_bits, .as = vtd_as, - .domain_id = VTD_CONTEXT_ENTRY_DID(ce->hi), + .domain_id = vtd_get_domain_id(s, ce), }; - return vtd_page_walk(ce, addr, addr + size, &info); + return vtd_page_walk(s, ce, addr, addr + size, &info); } static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) @@ -1103,35 +1418,24 @@ static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) } /* - * Fetch translation type for specific device. Returns <0 if error - * happens, otherwise return the shifted type to check against - * VTD_CONTEXT_TT_*. + * Check if specific device is configed to bypass address + * translation for DMA requests. In Scalable Mode, bypass + * 1st-level translation or 2nd-level translation, it depends + * on PGTT setting. */ -static int vtd_dev_get_trans_type(VTDAddressSpace *as) +static bool vtd_dev_pt_enabled(VTDAddressSpace *as) { IntelIOMMUState *s; VTDContextEntry ce; + VTDPASIDEntry pe; int ret; - s = as->iommu_state; + assert(as); + s = as->iommu_state; ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus), as->devfn, &ce); if (ret) { - return ret; - } - - return vtd_ce_get_type(&ce); -} - -static bool vtd_dev_pt_enabled(VTDAddressSpace *as) -{ - int ret; - - assert(as); - - ret = vtd_dev_get_trans_type(as); - if (ret < 0) { /* * Possibly failed to parse the context entry for some reason * (e.g., during init, or any guest configuration errors on @@ -1141,7 +1445,17 @@ static bool vtd_dev_pt_enabled(VTDAddressSpace *as) return false; } - return ret == VTD_CONTEXT_TT_PASS_THROUGH; + if (s->root_scalable) { + ret = vtd_ce_get_rid2pasid_entry(s, &ce, &pe); + if (ret) { + error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32, + __func__, ret); + return false; + } + return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT); + } + + return (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH); } /* Return whether the device is using IOMMU translation. */ @@ -1221,6 +1535,7 @@ static const bool vtd_qualified_faults[] = { [VTD_FR_ROOT_ENTRY_RSVD] = false, [VTD_FR_PAGING_ENTRY_RSVD] = true, [VTD_FR_CONTEXT_ENTRY_TT] = true, + [VTD_FR_PASID_TABLE_INV] = false, [VTD_FR_RESERVED_ERR] = false, [VTD_FR_MAX] = false, }; @@ -1322,18 +1637,17 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, cc_entry->context_cache_gen); ce = cc_entry->context_entry; is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; + if (!is_fpd_set && s->root_scalable) { + ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); + VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); + } } else { ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; - if (ret_fr) { - ret_fr = -ret_fr; - if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { - trace_vtd_fault_disabled(); - } else { - vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); - } - goto error; + if (!ret_fr && !is_fpd_set && s->root_scalable) { + ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); } + VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); /* Update context-cache */ trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo, cc_entry->context_cache_gen, @@ -1367,21 +1681,13 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, return true; } - ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level, + ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &slpte, &level, &reads, &writes, s->aw_bits); - if (ret_fr) { - ret_fr = -ret_fr; - if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { - trace_vtd_fault_disabled(); - } else { - vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); - } - goto error; - } + VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); page_mask = vtd_slpt_level_page_mask(level); access_flags = IOMMU_ACCESS_FLAG(reads, writes); - vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte, + vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce), addr, slpte, access_flags, level); out: vtd_iommu_unlock(s); @@ -1573,7 +1879,7 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce) && - domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { + domain_id == vtd_get_domain_id(s, &ce)) { vtd_sync_shadow_page_table(vtd_as); } } @@ -1591,7 +1897,7 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce); - if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { + if (!ret && domain_id == vtd_get_domain_id(s, &ce)) { if (vtd_as_has_map_notifier(vtd_as)) { /* * As long as we have MAP notifications registered in @@ -2629,6 +2935,7 @@ static const VMStateDescription vtd_vmstate = { VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE), VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState), VMSTATE_BOOL(root_extended, IntelIOMMUState), + VMSTATE_BOOL(root_scalable, IntelIOMMUState), VMSTATE_BOOL(dmar_enabled, IntelIOMMUState), VMSTATE_BOOL(qi_enabled, IntelIOMMUState), VMSTATE_BOOL(intr_enabled, IntelIOMMUState), @@ -3098,9 +3405,11 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) vtd_address_space_unmap(vtd_as, n); if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { - trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn), + trace_vtd_replay_ce_valid(s->root_scalable ? "scalable mode" : + "legacy mode", + bus_n, PCI_SLOT(vtd_as->devfn), PCI_FUNC(vtd_as->devfn), - VTD_CONTEXT_ENTRY_DID(ce.hi), + vtd_get_domain_id(s, &ce), ce.hi, ce.lo); if (vtd_as_has_map_notifier(vtd_as)) { /* This is required only for MAP typed notifiers */ @@ -3110,10 +3419,10 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) .notify_unmap = false, .aw = s->aw_bits, .as = vtd_as, - .domain_id = VTD_CONTEXT_ENTRY_DID(ce.hi), + .domain_id = vtd_get_domain_id(s, &ce), }; - vtd_page_walk(&ce, 0, ~0ULL, &info); + vtd_page_walk(s, &ce, 0, ~0ULL, &info); } } else { trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), @@ -3137,6 +3446,7 @@ static void vtd_init(IntelIOMMUState *s) s->root = 0; s->root_extended = false; + s->root_scalable = false; s->dmar_enabled = false; s->intr_enabled = false; s->iq_head = 0; @@ -3199,7 +3509,7 @@ static void vtd_init(IntelIOMMUState *s) vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0); vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL); vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0); - vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0); + vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffffc00ULL, 0); vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0); vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL); diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 00e9edbc66..fe72bc38f0 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -172,6 +172,7 @@ /* RTADDR_REG */ #define VTD_RTADDR_RTT (1ULL << 11) +#define VTD_RTADDR_SMT (1ULL << 10) #define VTD_RTADDR_ADDR_MASK(aw) (VTD_HAW_MASK(aw) ^ 0xfffULL) /* IRTA_REG */ @@ -294,6 +295,8 @@ typedef enum VTDFaultReason { * request while disabled */ VTD_FR_IR_SID_ERR = 0x26, /* Invalid Source-ID */ + VTD_FR_PASID_TABLE_INV = 0x58, /*Invalid PASID table entry */ + /* This is not a normal fault reason. We use this to indicate some faults * that are not referenced by the VT-d specification. * Fault event with such reason should not be recorded. @@ -411,8 +414,8 @@ typedef struct VTDIOTLBPageInvInfo VTDIOTLBPageInvInfo; #define VTD_PAGE_MASK_1G (~((1ULL << VTD_PAGE_SHIFT_1G) - 1)) struct VTDRootEntry { - uint64_t val; - uint64_t rsvd; + uint64_t lo; + uint64_t hi; }; typedef struct VTDRootEntry VTDRootEntry; @@ -423,6 +426,8 @@ typedef struct VTDRootEntry VTDRootEntry; #define VTD_ROOT_ENTRY_NR (VTD_PAGE_SIZE / sizeof(VTDRootEntry)) #define VTD_ROOT_ENTRY_RSVD(aw) (0xffeULL | ~VTD_HAW_MASK(aw)) +#define VTD_DEVFN_CHECK_MASK 0x80 + /* Masks for struct VTDContextEntry */ /* lo */ #define VTD_CONTEXT_ENTRY_P (1ULL << 0) @@ -441,6 +446,38 @@ typedef struct VTDRootEntry VTDRootEntry; #define VTD_CONTEXT_ENTRY_NR (VTD_PAGE_SIZE / sizeof(VTDContextEntry)) +#define VTD_CTX_ENTRY_LEGACY_SIZE 16 +#define VTD_CTX_ENTRY_SCALABLE_SIZE 32 + +#define VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK 0xfffff +#define VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(aw) (0x1e0ULL | ~VTD_HAW_MASK(aw)) +#define VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 0xffffffffffe00000ULL + +/* PASID Table Related Definitions */ +#define VTD_PASID_DIR_BASE_ADDR_MASK (~0xfffULL) +#define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL) +#define VTD_PASID_DIR_ENTRY_SIZE 8 +#define VTD_PASID_ENTRY_SIZE 64 +#define VTD_PASID_DIR_BITS_MASK (0x3fffULL) +#define VTD_PASID_DIR_INDEX(pasid) (((pasid) >> 6) & VTD_PASID_DIR_BITS_MASK) +#define VTD_PASID_DIR_FPD (1ULL << 1) /* Fault Processing Disable */ +#define VTD_PASID_TABLE_BITS_MASK (0x3fULL) +#define VTD_PASID_TABLE_INDEX(pasid) ((pasid) & VTD_PASID_TABLE_BITS_MASK) +#define VTD_PASID_ENTRY_FPD (1ULL << 1) /* Fault Processing Disable */ + +/* PASID Granular Translation Type Mask */ +#define VTD_SM_PASID_ENTRY_PGTT (7ULL << 6) +#define VTD_SM_PASID_ENTRY_FLT (1ULL << 6) +#define VTD_SM_PASID_ENTRY_SLT (2ULL << 6) +#define VTD_SM_PASID_ENTRY_NESTED (3ULL << 6) +#define VTD_SM_PASID_ENTRY_PT (4ULL << 6) + +#define VTD_SM_PASID_ENTRY_AW 7ULL /* Adjusted guest-address-width */ +#define VTD_SM_PASID_ENTRY_DID(val) ((val) & VTD_DOMAIN_ID_MASK) + +/* Second Level Page Translation Pointer*/ +#define VTD_SM_PASID_ENTRY_SLPTPTR (~0xfffULL) + /* Paging Structure common */ #define VTD_SL_PT_PAGE_SIZE_MASK (1ULL << 7) /* Bits to decide the offset for each level */ diff --git a/hw/i386/trace-events b/hw/i386/trace-events index 77244fc384..cae1b76fde 100644 --- a/hw/i386/trace-events +++ b/hw/i386/trace-events @@ -30,7 +30,7 @@ vtd_iotlb_cc_hit(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32 vtd_iotlb_cc_update(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32_t gen1, uint32_t gen2) "IOTLB context update bus 0x%"PRIx8" devfn 0x%"PRIx8" high 0x%"PRIx64" low 0x%"PRIx64" gen %"PRIu32" -> gen %"PRIu32 vtd_iotlb_reset(const char *reason) "IOTLB reset (reason: %s)" vtd_fault_disabled(void) "Fault processing disabled for context entry" -vtd_replay_ce_valid(uint8_t bus, uint8_t dev, uint8_t fn, uint16_t domain, uint64_t hi, uint64_t lo) "replay valid context device %02"PRIx8":%02"PRIx8".%02"PRIx8" domain 0x%"PRIx16" hi 0x%"PRIx64" lo 0x%"PRIx64 +vtd_replay_ce_valid(const char *mode, uint8_t bus, uint8_t dev, uint8_t fn, uint16_t domain, uint64_t hi, uint64_t lo) "%s: replay valid context device %02"PRIx8":%02"PRIx8".%02"PRIx8" domain 0x%"PRIx16" hi 0x%"PRIx64" lo 0x%"PRIx64 vtd_replay_ce_invalid(uint8_t bus, uint8_t dev, uint8_t fn) "replay invalid context device %02"PRIx8":%02"PRIx8".%02"PRIx8 vtd_page_walk_level(uint64_t addr, uint32_t level, uint64_t start, uint64_t end) "walk (base=0x%"PRIx64", level=%"PRIu32") iova range 0x%"PRIx64" - 0x%"PRIx64 vtd_page_walk_one(uint16_t domain, uint64_t iova, uint64_t gpa, uint64_t mask, int perm) "domain 0x%"PRIu16" iova 0x%"PRIx64" -> gpa 0x%"PRIx64" mask 0x%"PRIx64" perm %d" diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h index a321cc9691..72c5ca6a59 100644 --- a/include/hw/i386/intel_iommu.h +++ b/include/hw/i386/intel_iommu.h @@ -66,11 +66,20 @@ typedef struct VTDIOTLBEntry VTDIOTLBEntry; typedef struct VTDBus VTDBus; typedef union VTD_IR_TableEntry VTD_IR_TableEntry; typedef union VTD_IR_MSIAddress VTD_IR_MSIAddress; +typedef struct VTDPASIDDirEntry VTDPASIDDirEntry; +typedef struct VTDPASIDEntry VTDPASIDEntry; /* Context-Entry */ struct VTDContextEntry { - uint64_t lo; - uint64_t hi; + union { + struct { + uint64_t lo; + uint64_t hi; + }; + struct { + uint64_t val[4]; + }; + }; }; struct VTDContextCacheEntry { @@ -81,6 +90,16 @@ struct VTDContextCacheEntry { struct VTDContextEntry context_entry; }; +/* PASID Directory Entry */ +struct VTDPASIDDirEntry { + uint64_t val; +}; + +/* PASID Table Entry */ +struct VTDPASIDEntry { + uint64_t val[8]; +}; + struct VTDAddressSpace { PCIBus *bus; uint8_t devfn; @@ -212,6 +231,7 @@ struct IntelIOMMUState { dma_addr_t root; /* Current root table pointer */ bool root_extended; /* Type of root table (extended or not) */ + bool root_scalable; /* Type of root table (scalable or not) */ bool dmar_enabled; /* Set if DMA remapping is enabled */ uint16_t iq_head; /* Current invalidation queue head */ -- cgit v1.2.3-55-g7522 From c0c1d351849bc249304e0c505e1597e570d1e777 Mon Sep 17 00:00:00 2001 From: Liu, Yi L Date: Tue, 5 Mar 2019 10:34:54 +0800 Subject: intel_iommu: add 256 bits qi_desc support Per Intel(R) VT-d 3.0, the qi_desc is 256 bits in Scalable Mode. This patch adds emulation of 256bits qi_desc. Signed-off-by: Liu, Yi L [Yi Sun is co-developer to rebase and refine the patch.] Signed-off-by: Yi Sun Reviewed-by: Peter Xu Message-Id: <1551753295-30167-3-git-send-email-yi.y.sun@linux.intel.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/i386/intel_iommu.c | 40 +++++++++++++++++++++++++++++----------- hw/i386/intel_iommu_internal.h | 9 ++++++++- include/hw/i386/intel_iommu.h | 1 + 3 files changed, 38 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index ea9ea93e42..06b48b3172 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -2005,7 +2005,7 @@ static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en) if (en) { s->iq = iqa_val & VTD_IQA_IQA_MASK(s->aw_bits); /* 2^(x+8) entries */ - s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8); + s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8 - (s->iq_dw ? 1 : 0)); s->qi_enabled = true; trace_vtd_inv_qi_setup(s->iq, s->iq_size); /* Ok - report back to driver */ @@ -2172,19 +2172,24 @@ static void vtd_handle_iotlb_write(IntelIOMMUState *s) } /* Fetch an Invalidation Descriptor from the Invalidation Queue */ -static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset, +static bool vtd_get_inv_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { - dma_addr_t addr = base_addr + offset * sizeof(*inv_desc); - if (dma_memory_read(&address_space_memory, addr, inv_desc, - sizeof(*inv_desc))) { - error_report_once("Read INV DESC failed"); - inv_desc->lo = 0; - inv_desc->hi = 0; + dma_addr_t base_addr = s->iq; + uint32_t offset = s->iq_head; + uint32_t dw = s->iq_dw ? 32 : 16; + dma_addr_t addr = base_addr + offset * dw; + + if (dma_memory_read(&address_space_memory, addr, inv_desc, dw)) { + error_report_once("Read INV DESC failed."); return false; } inv_desc->lo = le64_to_cpu(inv_desc->lo); inv_desc->hi = le64_to_cpu(inv_desc->hi); + if (dw == 32) { + inv_desc->val[2] = le64_to_cpu(inv_desc->val[2]); + inv_desc->val[3] = le64_to_cpu(inv_desc->val[3]); + } return true; } @@ -2390,10 +2395,11 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) uint8_t desc_type; trace_vtd_inv_qi_head(s->iq_head); - if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) { + if (!vtd_get_inv_desc(s, &inv_desc)) { s->iq_last_desc_type = VTD_INV_DESC_NONE; return false; } + desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; /* FIXME: should update at first or at last? */ s->iq_last_desc_type = desc_type; @@ -2478,7 +2484,12 @@ static void vtd_handle_iqt_write(IntelIOMMUState *s) { uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG); - s->iq_tail = VTD_IQT_QT(val); + if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { + error_report_once("%s: RSV bit is set: val=0x%"PRIx64, + __func__, val); + return; + } + s->iq_tail = VTD_IQT_QT(s->iq_dw, val); trace_vtd_inv_qi_tail(s->iq_tail); if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { @@ -2747,6 +2758,12 @@ static void vtd_mem_write(void *opaque, hwaddr addr, } else { vtd_set_quad(s, addr, val); } + if (s->ecap & VTD_ECAP_SMTS && + val & VTD_IQA_DW_MASK) { + s->iq_dw = true; + } else { + s->iq_dw = false; + } break; case DMAR_IQA_REG_HI: @@ -3455,6 +3472,7 @@ static void vtd_init(IntelIOMMUState *s) s->iq_size = 0; s->qi_enabled = false; s->iq_last_desc_type = VTD_INV_DESC_NONE; + s->iq_dw = false; s->next_frcd_reg = 0; s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS | @@ -3532,7 +3550,7 @@ static void vtd_init(IntelIOMMUState *s) vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0); vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0); - vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0); + vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff807ULL, 0); vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL); vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0); vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0); diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index fe72bc38f0..016fa4c7c5 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -190,6 +190,7 @@ #define VTD_ECAP_EIM (1ULL << 4) #define VTD_ECAP_PT (1ULL << 6) #define VTD_ECAP_MHMV (15ULL << 20) +#define VTD_ECAP_SMTS (1ULL << 43) /* CAP_REG */ /* (offset >> 4) << 24 */ @@ -218,11 +219,14 @@ #define VTD_CAP_SAGAW_48bit (0x4ULL << VTD_CAP_SAGAW_SHIFT) /* IQT_REG */ -#define VTD_IQT_QT(val) (((val) >> 4) & 0x7fffULL) +#define VTD_IQT_QT(dw_bit, val) (dw_bit ? (((val) >> 5) & 0x3fffULL) : \ + (((val) >> 4) & 0x7fffULL)) +#define VTD_IQT_QT_256_RSV_BIT 0x10 /* IQA_REG */ #define VTD_IQA_IQA_MASK(aw) (VTD_HAW_MASK(aw) ^ 0xfffULL) #define VTD_IQA_QS 0x7ULL +#define VTD_IQA_DW_MASK 0x800 /* IQH_REG */ #define VTD_IQH_QH_SHIFT 4 @@ -324,6 +328,9 @@ union VTDInvDesc { uint64_t lo; uint64_t hi; }; + struct { + uint64_t val[4]; + }; union { VTDInvDescIEC iec; }; diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h index 72c5ca6a59..2877c94ca2 100644 --- a/include/hw/i386/intel_iommu.h +++ b/include/hw/i386/intel_iommu.h @@ -238,6 +238,7 @@ struct IntelIOMMUState { uint16_t iq_tail; /* Current invalidation queue tail */ dma_addr_t iq; /* Current invalidation queue pointer */ uint16_t iq_size; /* IQ Size in number of entries */ + bool iq_dw; /* IQ descriptor width 256bit or not */ bool qi_enabled; /* Set if the QI is enabled */ uint8_t iq_last_desc_type; /* The type of last completed descriptor */ -- cgit v1.2.3-55-g7522 From 4a4f219e8a109b0859b23b1f9c8ad8bfbcb59ee1 Mon Sep 17 00:00:00 2001 From: Yi Sun Date: Tue, 5 Mar 2019 10:34:55 +0800 Subject: intel_iommu: add scalable-mode option to make scalable mode work This patch adds an option to provide flexibility for user to expose Scalable Mode to guest. User could expose Scalable Mode to guest by the config as below: "-device intel-iommu,caching-mode=on,scalable-mode=on" The Linux iommu driver has supported scalable mode. Please refer below patch set: https://www.spinics.net/lists/kernel/msg2985279.html Signed-off-by: Liu, Yi L Signed-off-by: Yi Sun Message-Id: <1551753295-30167-4-git-send-email-yi.y.sun@linux.intel.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Peter Xu Reviewed-by: Igor Mammedov Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/i386/intel_iommu.c | 25 +++++++++++++++++++++++++ hw/i386/intel_iommu_internal.h | 4 ++++ include/hw/i386/intel_iommu.h | 3 ++- 3 files changed, 31 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 06b48b3172..b90de6c664 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -1710,6 +1710,9 @@ static void vtd_root_table_setup(IntelIOMMUState *s) { s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG); s->root_extended = s->root & VTD_RTADDR_RTT; + if (s->scalable_mode) { + s->root_scalable = s->root & VTD_RTADDR_SMT; + } s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits); trace_vtd_reg_dmar_root(s->root, s->root_extended); @@ -2419,6 +2422,17 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) } break; + /* + * TODO: the entity of below two cases will be implemented in future series. + * To make guest (which integrates scalable mode support patch set in + * iommu driver) work, just return true is enough so far. + */ + case VTD_INV_DESC_PC: + break; + + case VTD_INV_DESC_PIOTLB: + break; + case VTD_INV_DESC_WAIT: trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo); if (!vtd_process_wait_desc(s, &inv_desc)) { @@ -2983,6 +2997,7 @@ static Property vtd_properties[] = { DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState, aw_bits, VTD_HOST_ADDRESS_WIDTH), DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE), + DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE), DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true), DEFINE_PROP_END_OF_LIST(), }; @@ -3518,6 +3533,11 @@ static void vtd_init(IntelIOMMUState *s) s->cap |= VTD_CAP_CM; } + /* TODO: read cap/ecap from host to decide which cap to be exposed. */ + if (s->scalable_mode) { + s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS; + } + vtd_reset_caches(s); /* Define registers with default values and bit semantics */ @@ -3629,6 +3649,11 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) return false; } + if (s->scalable_mode && !s->dma_drain) { + error_setg(errp, "Need to set dma_drain for scalable mode"); + return false; + } + return true; } diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 016fa4c7c5..1160618177 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -190,7 +190,9 @@ #define VTD_ECAP_EIM (1ULL << 4) #define VTD_ECAP_PT (1ULL << 6) #define VTD_ECAP_MHMV (15ULL << 20) +#define VTD_ECAP_SRS (1ULL << 31) #define VTD_ECAP_SMTS (1ULL << 43) +#define VTD_ECAP_SLTS (1ULL << 46) /* CAP_REG */ /* (offset >> 4) << 24 */ @@ -345,6 +347,8 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_IEC 0x4 /* Interrupt Entry Cache Invalidate Descriptor */ #define VTD_INV_DESC_WAIT 0x5 /* Invalidation Wait Descriptor */ +#define VTD_INV_DESC_PIOTLB 0x6 /* PASID-IOTLB Invalidate Desc */ +#define VTD_INV_DESC_PC 0x7 /* PASID-cache Invalidate Desc */ #define VTD_INV_DESC_NONE 0 /* Not an Invalidate Descriptor */ /* Masks for Invalidation Wait Descriptor*/ diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h index 2877c94ca2..c11e3d5b34 100644 --- a/include/hw/i386/intel_iommu.h +++ b/include/hw/i386/intel_iommu.h @@ -227,7 +227,8 @@ struct IntelIOMMUState { uint8_t womask[DMAR_REG_SIZE]; /* WO (write only - read returns 0) */ uint32_t version; - bool caching_mode; /* RO - is cap CM enabled? */ + bool caching_mode; /* RO - is cap CM enabled? */ + bool scalable_mode; /* RO - is Scalable Mode supported? */ dma_addr_t root; /* Current root table pointer */ bool root_extended; /* Type of root table (extended or not) */ -- cgit v1.2.3-55-g7522 From 5ad204bf2a06bf29234df9e6f1ef6c566771b40c Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Thu, 28 Feb 2019 16:53:49 +0800 Subject: vhost-user: Support transferring inflight buffer between qemu and backend This patch introduces two new messages VHOST_USER_GET_INFLIGHT_FD and VHOST_USER_SET_INFLIGHT_FD to support transferring a shared buffer between qemu and backend. Firstly, qemu uses VHOST_USER_GET_INFLIGHT_FD to get the shared buffer from backend. Then qemu should send it back through VHOST_USER_SET_INFLIGHT_FD each time we start vhost-user. This shared buffer is used to track inflight I/O by backend. Qemu should retrieve a new one when vm reset. Signed-off-by: Xie Yongji Signed-off-by: Chai Wen Signed-off-by: Zhang Yu Message-Id: <20190228085355.9614-2-xieyongji@baidu.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- docs/interop/vhost-user.txt | 285 ++++++++++++++++++++++++++++++++++++++ hw/virtio/vhost-user.c | 107 ++++++++++++++ hw/virtio/vhost.c | 96 +++++++++++++ include/hw/virtio/vhost-backend.h | 10 ++ include/hw/virtio/vhost.h | 18 +++ 5 files changed, 516 insertions(+) (limited to 'include') diff --git a/docs/interop/vhost-user.txt b/docs/interop/vhost-user.txt index 9ee2a60cfb..4dbd530cb9 100644 --- a/docs/interop/vhost-user.txt +++ b/docs/interop/vhost-user.txt @@ -147,6 +147,17 @@ Depending on the request type, payload can be: Offset: a 64-bit offset of this area from the start of the supplied file descriptor + * Inflight description + ----------------------------------------------------- + | mmap size | mmap offset | num queues | queue size | + ----------------------------------------------------- + + mmap size: a 64-bit size of area to track inflight I/O + mmap offset: a 64-bit offset of this area from the start + of the supplied file descriptor + num queues: a 16-bit number of virtqueues + queue size: a 16-bit size of virtqueues + In QEMU the vhost-user message is implemented with the following struct: typedef struct VhostUserMsg { @@ -162,6 +173,7 @@ typedef struct VhostUserMsg { struct vhost_iotlb_msg iotlb; VhostUserConfig config; VhostUserVringArea area; + VhostUserInflight inflight; }; } QEMU_PACKED VhostUserMsg; @@ -180,6 +192,7 @@ the ones that do: * VHOST_USER_GET_PROTOCOL_FEATURES * VHOST_USER_GET_VRING_BASE * VHOST_USER_SET_LOG_BASE (if VHOST_USER_PROTOCOL_F_LOG_SHMFD) + * VHOST_USER_GET_INFLIGHT_FD (if VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD) [ Also see the section on REPLY_ACK protocol extension. ] @@ -193,6 +206,7 @@ in the ancillary data: * VHOST_USER_SET_VRING_CALL * VHOST_USER_SET_VRING_ERR * VHOST_USER_SET_SLAVE_REQ_FD + * VHOST_USER_SET_INFLIGHT_FD (if VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD) If Master is unable to send the full message or receives a wrong reply it will close the connection. An optional reconnection mechanism can be implemented. @@ -387,6 +401,256 @@ If VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD protocol feature is negotiated, slave can send file descriptors (at most 8 descriptors in each message) to master via ancillary data using this fd communication channel. +Inflight I/O tracking +--------------------- + +To support reconnecting after restart or crash, slave may need to resubmit +inflight I/Os. If virtqueue is processed in order, we can easily achieve +that by getting the inflight descriptors from descriptor table (split virtqueue) +or descriptor ring (packed virtqueue). However, it can't work when we process +descriptors out-of-order because some entries which store the information of +inflight descriptors in available ring (split virtqueue) or descriptor +ring (packed virtqueue) might be overrided by new entries. To solve this +problem, slave need to allocate an extra buffer to store this information of inflight +descriptors and share it with master for persistent. VHOST_USER_GET_INFLIGHT_FD and +VHOST_USER_SET_INFLIGHT_FD are used to transfer this buffer between master +and slave. And the format of this buffer is described below: + +------------------------------------------------------- +| queue0 region | queue1 region | ... | queueN region | +------------------------------------------------------- + +N is the number of available virtqueues. Slave could get it from num queues +field of VhostUserInflight. + +For split virtqueue, queue region can be implemented as: + +typedef struct DescStateSplit { + /* Indicate whether this descriptor is inflight or not. + * Only available for head-descriptor. */ + uint8_t inflight; + + /* Padding */ + uint8_t padding[5]; + + /* Maintain a list for the last batch of used descriptors. + * Only available when batching is used for submitting */ + uint16_t next; + + /* Used to preserve the order of fetching available descriptors. + * Only available for head-descriptor. */ + uint64_t counter; +} DescStateSplit; + +typedef struct QueueRegionSplit { + /* The feature flags of this region. Now it's initialized to 0. */ + uint64_t features; + + /* The version of this region. It's 1 currently. + * Zero value indicates an uninitialized buffer */ + uint16_t version; + + /* The size of DescStateSplit array. It's equal to the virtqueue + * size. Slave could get it from queue size field of VhostUserInflight. */ + uint16_t desc_num; + + /* The head of list that track the last batch of used descriptors. */ + uint16_t last_batch_head; + + /* Store the idx value of used ring */ + uint16_t used_idx; + + /* Used to track the state of each descriptor in descriptor table */ + DescStateSplit desc[0]; +} QueueRegionSplit; + +To track inflight I/O, the queue region should be processed as follows: + +When receiving available buffers from the driver: + + 1. Get the next available head-descriptor index from available ring, i + + 2. Set desc[i].counter to the value of global counter + + 3. Increase global counter by 1 + + 4. Set desc[i].inflight to 1 + +When supplying used buffers to the driver: + + 1. Get corresponding used head-descriptor index, i + + 2. Set desc[i].next to last_batch_head + + 3. Set last_batch_head to i + + 4. Steps 1,2,3 may be performed repeatedly if batching is possible + + 5. Increase the idx value of used ring by the size of the batch + + 6. Set the inflight field of each DescStateSplit entry in the batch to 0 + + 7. Set used_idx to the idx value of used ring + +When reconnecting: + + 1. If the value of used_idx does not match the idx value of used ring (means + the inflight field of DescStateSplit entries in last batch may be incorrect), + + (a) Subtract the value of used_idx from the idx value of used ring to get + last batch size of DescStateSplit entries + + (b) Set the inflight field of each DescStateSplit entry to 0 in last batch + list which starts from last_batch_head + + (c) Set used_idx to the idx value of used ring + + 2. Resubmit inflight DescStateSplit entries in order of their counter value + +For packed virtqueue, queue region can be implemented as: + +typedef struct DescStatePacked { + /* Indicate whether this descriptor is inflight or not. + * Only available for head-descriptor. */ + uint8_t inflight; + + /* Padding */ + uint8_t padding; + + /* Link to the next free entry */ + uint16_t next; + + /* Link to the last entry of descriptor list. + * Only available for head-descriptor. */ + uint16_t last; + + /* The length of descriptor list. + * Only available for head-descriptor. */ + uint16_t num; + + /* Used to preserve the order of fetching available descriptors. + * Only available for head-descriptor. */ + uint64_t counter; + + /* The buffer id */ + uint16_t id; + + /* The descriptor flags */ + uint16_t flags; + + /* The buffer length */ + uint32_t len; + + /* The buffer address */ + uint64_t addr; +} DescStatePacked; + +typedef struct QueueRegionPacked { + /* The feature flags of this region. Now it's initialized to 0. */ + uint64_t features; + + /* The version of this region. It's 1 currently. + * Zero value indicates an uninitialized buffer */ + uint16_t version; + + /* The size of DescStatePacked array. It's equal to the virtqueue + * size. Slave could get it from queue size field of VhostUserInflight. */ + uint16_t desc_num; + + /* The head of free DescStatePacked entry list */ + uint16_t free_head; + + /* The old head of free DescStatePacked entry list */ + uint16_t old_free_head; + + /* The used index of descriptor ring */ + uint16_t used_idx; + + /* The old used index of descriptor ring */ + uint16_t old_used_idx; + + /* Device ring wrap counter */ + uint8_t used_wrap_counter; + + /* The old device ring wrap counter */ + uint8_t old_used_wrap_counter; + + /* Padding */ + uint8_t padding[7]; + + /* Used to track the state of each descriptor fetched from descriptor ring */ + DescStatePacked desc[0]; +} QueueRegionPacked; + +To track inflight I/O, the queue region should be processed as follows: + +When receiving available buffers from the driver: + + 1. Get the next available descriptor entry from descriptor ring, d + + 2. If d is head descriptor, + + (a) Set desc[old_free_head].num to 0 + + (b) Set desc[old_free_head].counter to the value of global counter + + (c) Increase global counter by 1 + + (d) Set desc[old_free_head].inflight to 1 + + 3. If d is last descriptor, set desc[old_free_head].last to free_head + + 4. Increase desc[old_free_head].num by 1 + + 5. Set desc[free_head].addr, desc[free_head].len, desc[free_head].flags, + desc[free_head].id to d.addr, d.len, d.flags, d.id + + 6. Set free_head to desc[free_head].next + + 7. If d is last descriptor, set old_free_head to free_head + +When supplying used buffers to the driver: + + 1. Get corresponding used head-descriptor entry from descriptor ring, d + + 2. Get corresponding DescStatePacked entry, e + + 3. Set desc[e.last].next to free_head + + 4. Set free_head to the index of e + + 5. Steps 1,2,3,4 may be performed repeatedly if batching is possible + + 6. Increase used_idx by the size of the batch and update used_wrap_counter if needed + + 7. Update d.flags + + 8. Set the inflight field of each head DescStatePacked entry in the batch to 0 + + 9. Set old_free_head, old_used_idx, old_used_wrap_counter to free_head, used_idx, + used_wrap_counter + +When reconnecting: + + 1. If used_idx does not match old_used_idx (means the inflight field of DescStatePacked + entries in last batch may be incorrect), + + (a) Get the next descriptor ring entry through old_used_idx, d + + (b) Use old_used_wrap_counter to calculate the available flags + + (c) If d.flags is not equal to the calculated flags value (means slave has + submitted the buffer to guest driver before crash, so it has to commit the + in-progres update), set old_free_head, old_used_idx, old_used_wrap_counter + to free_head, used_idx, used_wrap_counter + + 2. Set free_head, used_idx, used_wrap_counter to old_free_head, old_used_idx, + old_used_wrap_counter (roll back any in-progress update) + + 3. Set the inflight field of each DescStatePacked entry in free list to 0 + + 4. Resubmit inflight DescStatePacked entries in order of their counter value + Protocol features ----------------- @@ -402,6 +666,7 @@ Protocol features #define VHOST_USER_PROTOCOL_F_CONFIG 9 #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11 +#define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12 Master message types -------------------- @@ -766,6 +1031,26 @@ Master message types was previously sent. The value returned is an error indication; 0 is success. + * VHOST_USER_GET_INFLIGHT_FD + Id: 31 + Equivalent ioctl: N/A + Master payload: inflight description + + When VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD protocol feature has been + successfully negotiated, this message is submitted by master to get + a shared buffer from slave. The shared buffer will be used to track + inflight I/O by slave. QEMU should retrieve a new one when vm reset. + + * VHOST_USER_SET_INFLIGHT_FD + Id: 32 + Equivalent ioctl: N/A + Master payload: inflight description + + When VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD protocol feature has been + successfully negotiated, this message is submitted by master to send + the shared inflight buffer back to slave so that slave could get + inflight I/O after a crash or restart. + Slave message types ------------------- diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 5df73405bc..553319c7ac 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -56,6 +56,7 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_CONFIG = 9, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, VHOST_USER_PROTOCOL_F_MAX }; @@ -93,6 +94,8 @@ typedef enum VhostUserRequest { VHOST_USER_POSTCOPY_ADVISE = 28, VHOST_USER_POSTCOPY_LISTEN = 29, VHOST_USER_POSTCOPY_END = 30, + VHOST_USER_GET_INFLIGHT_FD = 31, + VHOST_USER_SET_INFLIGHT_FD = 32, VHOST_USER_MAX } VhostUserRequest; @@ -151,6 +154,13 @@ typedef struct VhostUserVringArea { uint64_t offset; } VhostUserVringArea; +typedef struct VhostUserInflight { + uint64_t mmap_size; + uint64_t mmap_offset; + uint16_t num_queues; + uint16_t queue_size; +} VhostUserInflight; + typedef struct { VhostUserRequest request; @@ -173,6 +183,7 @@ typedef union { VhostUserConfig config; VhostUserCryptoSession session; VhostUserVringArea area; + VhostUserInflight inflight; } VhostUserPayload; typedef struct VhostUserMsg { @@ -1770,6 +1781,100 @@ static bool vhost_user_mem_section_filter(struct vhost_dev *dev, return result; } +static int vhost_user_get_inflight_fd(struct vhost_dev *dev, + uint16_t queue_size, + struct vhost_inflight *inflight) +{ + void *addr; + int fd; + struct vhost_user *u = dev->opaque; + CharBackend *chr = u->user->chr; + VhostUserMsg msg = { + .hdr.request = VHOST_USER_GET_INFLIGHT_FD, + .hdr.flags = VHOST_USER_VERSION, + .payload.inflight.num_queues = dev->nvqs, + .payload.inflight.queue_size = queue_size, + .hdr.size = sizeof(msg.payload.inflight), + }; + + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { + return 0; + } + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + if (vhost_user_read(dev, &msg) < 0) { + return -1; + } + + if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) { + error_report("Received unexpected msg type. " + "Expected %d received %d", + VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request); + return -1; + } + + if (msg.hdr.size != sizeof(msg.payload.inflight)) { + error_report("Received bad msg size."); + return -1; + } + + if (!msg.payload.inflight.mmap_size) { + return 0; + } + + fd = qemu_chr_fe_get_msgfd(chr); + if (fd < 0) { + error_report("Failed to get mem fd"); + return -1; + } + + addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, msg.payload.inflight.mmap_offset); + + if (addr == MAP_FAILED) { + error_report("Failed to mmap mem fd"); + close(fd); + return -1; + } + + inflight->addr = addr; + inflight->fd = fd; + inflight->size = msg.payload.inflight.mmap_size; + inflight->offset = msg.payload.inflight.mmap_offset; + inflight->queue_size = queue_size; + + return 0; +} + +static int vhost_user_set_inflight_fd(struct vhost_dev *dev, + struct vhost_inflight *inflight) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_INFLIGHT_FD, + .hdr.flags = VHOST_USER_VERSION, + .payload.inflight.mmap_size = inflight->size, + .payload.inflight.mmap_offset = inflight->offset, + .payload.inflight.num_queues = dev->nvqs, + .payload.inflight.queue_size = inflight->queue_size, + .hdr.size = sizeof(msg.payload.inflight), + }; + + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { + return 0; + } + + if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) { + return -1; + } + + return 0; +} + bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) { if (user->chr) { @@ -1829,4 +1934,6 @@ const VhostOps user_ops = { .vhost_crypto_create_session = vhost_user_crypto_create_session, .vhost_crypto_close_session = vhost_user_crypto_close_session, .vhost_backend_mem_section_filter = vhost_user_mem_section_filter, + .vhost_get_inflight_fd = vhost_user_get_inflight_fd, + .vhost_set_inflight_fd = vhost_user_set_inflight_fd, }; diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 311432f190..7f61018f2a 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1481,6 +1481,102 @@ void vhost_dev_set_config_notifier(struct vhost_dev *hdev, hdev->config_ops = ops; } +void vhost_dev_free_inflight(struct vhost_inflight *inflight) +{ + if (inflight->addr) { + qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); + inflight->addr = NULL; + inflight->fd = -1; + } +} + +static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, + uint64_t new_size) +{ + Error *err = NULL; + int fd = -1; + void *addr = qemu_memfd_alloc("vhost-inflight", new_size, + F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, + &fd, &err); + + if (err) { + error_report_err(err); + return -1; + } + + vhost_dev_free_inflight(inflight); + inflight->offset = 0; + inflight->addr = addr; + inflight->fd = fd; + inflight->size = new_size; + + return 0; +} + +void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f) +{ + if (inflight->addr) { + qemu_put_be64(f, inflight->size); + qemu_put_be16(f, inflight->queue_size); + qemu_put_buffer(f, inflight->addr, inflight->size); + } else { + qemu_put_be64(f, 0); + } +} + +int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) +{ + uint64_t size; + + size = qemu_get_be64(f); + if (!size) { + return 0; + } + + if (inflight->size != size) { + if (vhost_dev_resize_inflight(inflight, size)) { + return -1; + } + } + inflight->queue_size = qemu_get_be16(f); + + qemu_get_buffer(f, inflight->addr, size); + + return 0; +} + +int vhost_dev_set_inflight(struct vhost_dev *dev, + struct vhost_inflight *inflight) +{ + int r; + + if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { + r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); + if (r) { + VHOST_OPS_DEBUG("vhost_set_inflight_fd failed"); + return -errno; + } + } + + return 0; +} + +int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, + struct vhost_inflight *inflight) +{ + int r; + + if (dev->vhost_ops->vhost_get_inflight_fd) { + r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); + if (r) { + VHOST_OPS_DEBUG("vhost_get_inflight_fd failed"); + return -errno; + } + } + + return 0; +} + /* Host notifiers must be enabled at this point. */ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) { diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index 81283ec50f..d6632a18e6 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -25,6 +25,7 @@ typedef enum VhostSetConfigType { VHOST_SET_CONFIG_TYPE_MIGRATION = 1, } VhostSetConfigType; +struct vhost_inflight; struct vhost_dev; struct vhost_log; struct vhost_memory; @@ -104,6 +105,13 @@ typedef int (*vhost_crypto_close_session_op)(struct vhost_dev *dev, typedef bool (*vhost_backend_mem_section_filter_op)(struct vhost_dev *dev, MemoryRegionSection *section); +typedef int (*vhost_get_inflight_fd_op)(struct vhost_dev *dev, + uint16_t queue_size, + struct vhost_inflight *inflight); + +typedef int (*vhost_set_inflight_fd_op)(struct vhost_dev *dev, + struct vhost_inflight *inflight); + typedef struct VhostOps { VhostBackendType backend_type; vhost_backend_init vhost_backend_init; @@ -142,6 +150,8 @@ typedef struct VhostOps { vhost_crypto_create_session_op vhost_crypto_create_session; vhost_crypto_close_session_op vhost_crypto_close_session; vhost_backend_mem_section_filter_op vhost_backend_mem_section_filter; + vhost_get_inflight_fd_op vhost_get_inflight_fd; + vhost_set_inflight_fd_op vhost_set_inflight_fd; } VhostOps; extern const VhostOps user_ops; diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index a7f449fa87..619498c8f4 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -7,6 +7,15 @@ #include "exec/memory.h" /* Generic structures common for any vhost based device. */ + +struct vhost_inflight { + int fd; + void *addr; + uint64_t size; + uint64_t offset; + uint16_t queue_size; +}; + struct vhost_virtqueue { int kick; int call; @@ -120,4 +129,13 @@ int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data, */ void vhost_dev_set_config_notifier(struct vhost_dev *dev, const VhostDevConfigOps *ops); + +void vhost_dev_reset_inflight(struct vhost_inflight *inflight); +void vhost_dev_free_inflight(struct vhost_inflight *inflight); +void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f); +int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f); +int vhost_dev_set_inflight(struct vhost_dev *dev, + struct vhost_inflight *inflight); +int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, + struct vhost_inflight *inflight); #endif -- cgit v1.2.3-55-g7522 From a1fe0b8f273c2272a7964655548179478b9d6a91 Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Thu, 28 Feb 2019 16:53:53 +0800 Subject: vhost-user-blk: Add support to get/set inflight buffer This patch adds support for vhost-user-blk device to get/set inflight buffer from/to backend. Signed-off-by: Xie Yongji Signed-off-by: Zhang Yu Message-Id: <20190228085355.9614-6-xieyongji@baidu.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/block/vhost-user-blk.c | 28 ++++++++++++++++++++++++++++ include/hw/virtio/vhost-user-blk.h | 1 + 2 files changed, 29 insertions(+) (limited to 'include') diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index 767c734a81..28b81368f7 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -128,6 +128,21 @@ static void vhost_user_blk_start(VirtIODevice *vdev) } s->dev.acked_features = vdev->guest_features; + + if (!s->inflight->addr) { + ret = vhost_dev_get_inflight(&s->dev, s->queue_size, s->inflight); + if (ret < 0) { + error_report("Error get inflight: %d", -ret); + goto err_guest_notifiers; + } + } + + ret = vhost_dev_set_inflight(&s->dev, s->inflight); + if (ret < 0) { + error_report("Error set inflight: %d", -ret); + goto err_guest_notifiers; + } + ret = vhost_dev_start(&s->dev, vdev); if (ret < 0) { error_report("Error starting vhost: %d", -ret); @@ -249,6 +264,13 @@ static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) } } +static void vhost_user_blk_reset(VirtIODevice *vdev) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + + vhost_dev_free_inflight(s->inflight); +} + static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); @@ -283,6 +305,8 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) vhost_user_blk_handle_output); } + s->inflight = g_new0(struct vhost_inflight, 1); + s->dev.nvqs = s->num_queues; s->dev.vqs = g_new(struct vhost_virtqueue, s->dev.nvqs); s->dev.vq_index = 0; @@ -315,6 +339,7 @@ vhost_err: vhost_dev_cleanup(&s->dev); virtio_err: g_free(vqs); + g_free(s->inflight); virtio_cleanup(vdev); vhost_user_cleanup(&s->vhost_user); } @@ -327,7 +352,9 @@ static void vhost_user_blk_device_unrealize(DeviceState *dev, Error **errp) vhost_user_blk_set_status(vdev, 0); vhost_dev_cleanup(&s->dev); + vhost_dev_free_inflight(s->inflight); g_free(vqs); + g_free(s->inflight); virtio_cleanup(vdev); vhost_user_cleanup(&s->vhost_user); } @@ -372,6 +399,7 @@ static void vhost_user_blk_class_init(ObjectClass *klass, void *data) vdc->set_config = vhost_user_blk_set_config; vdc->get_features = vhost_user_blk_get_features; vdc->set_status = vhost_user_blk_set_status; + vdc->reset = vhost_user_blk_reset; } static const TypeInfo vhost_user_blk_info = { diff --git a/include/hw/virtio/vhost-user-blk.h b/include/hw/virtio/vhost-user-blk.h index a8a106eecb..68634bee61 100644 --- a/include/hw/virtio/vhost-user-blk.h +++ b/include/hw/virtio/vhost-user-blk.h @@ -36,6 +36,7 @@ typedef struct VHostUserBlk { uint32_t queue_size; uint32_t config_wce; struct vhost_dev dev; + struct vhost_inflight *inflight; VhostUserState vhost_user; } VHostUserBlk; -- cgit v1.2.3-55-g7522 From db891a9ba3b5aef52c2735f9b907d4d3678a86f8 Mon Sep 17 00:00:00 2001 From: Knut Omang Date: Thu, 21 Feb 2019 19:13:22 +0100 Subject: pcie: Add a simple PCIe ACS (Access Control Services) helper function Implementing an ACS capability on downstream ports and multifunction endpoints indicates isolation and IOMMU visibility to a finer granularity. This creates smaller IOMMU groups in the guest and thus more flexibility in assigning endpoints to guest userspace or an L2 guest. Signed-off-by: Knut Omang Message-Id: <07489975121696f5573b0a92baaf3486ef51e35d.1550768238.git-series.knut.omang@oracle.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Alex Williamson --- hw/pci/pcie.c | 38 ++++++++++++++++++++++++++++++++++++++ include/hw/pci/pcie.h | 6 ++++++ include/hw/pci/pcie_regs.h | 4 ++++ 3 files changed, 48 insertions(+) (limited to 'include') diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 640f678773..cf1ca30f93 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -914,3 +914,41 @@ void pcie_ats_init(PCIDevice *dev, uint16_t offset) pci_set_word(dev->wmask + dev->exp.ats_cap + PCI_ATS_CTRL, 0x800f); } + +/* ACS (Access Control Services) */ +void pcie_acs_init(PCIDevice *dev, uint16_t offset) +{ + bool is_downstream = pci_is_express_downstream_port(dev); + uint16_t cap_bits = 0; + + /* For endpoints, only multifunction devs may have an ACS capability: */ + assert(is_downstream || + (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) || + PCI_FUNC(dev->devfn)); + + pcie_add_capability(dev, PCI_EXT_CAP_ID_ACS, PCI_ACS_VER, offset, + PCI_ACS_SIZEOF); + dev->exp.acs_cap = offset; + + if (is_downstream) { + /* + * Downstream ports must implement SV, TB, RR, CR, UF, and DT (with + * caveats on the latter four that we ignore for simplicity). + * Endpoints may also implement a subset of ACS capabilities, + * but these are optional if the endpoint does not support + * peer-to-peer between functions and thus omitted here. + */ + cap_bits = PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | + PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT; + } + + pci_set_word(dev->config + offset + PCI_ACS_CAP, cap_bits); + pci_set_word(dev->wmask + offset + PCI_ACS_CTRL, cap_bits); +} + +void pcie_acs_reset(PCIDevice *dev) +{ + if (dev->exp.acs_cap) { + pci_set_word(dev->config + dev->exp.acs_cap + PCI_ACS_CTRL, 0); + } +} diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index 5b82a0d244..e30334d74d 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -79,6 +79,9 @@ struct PCIExpressDevice { /* Offset of ATS capability in config space */ uint16_t ats_cap; + + /* ACS */ + uint16_t acs_cap; }; #define COMPAT_PROP_PCP "power_controller_present" @@ -128,6 +131,9 @@ void pcie_add_capability(PCIDevice *dev, uint16_t offset, uint16_t size); void pcie_sync_bridge_lnk(PCIDevice *dev); +void pcie_acs_init(PCIDevice *dev, uint16_t offset); +void pcie_acs_reset(PCIDevice *dev); + void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn); void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num); void pcie_ats_init(PCIDevice *dev, uint16_t offset); diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h index ad4e7808b8..1db86b0ec4 100644 --- a/include/hw/pci/pcie_regs.h +++ b/include/hw/pci/pcie_regs.h @@ -175,4 +175,8 @@ typedef enum PCIExpLinkWidth { PCI_ERR_COR_INTERNAL | \ PCI_ERR_COR_HL_OVERFLOW) +/* ACS */ +#define PCI_ACS_VER 0x1 +#define PCI_ACS_SIZEOF 8 + #endif /* QEMU_PCIE_REGS_H */ -- cgit v1.2.3-55-g7522 From e07fb4b50b629141eb1517002ccfa070dbdc1ea7 Mon Sep 17 00:00:00 2001 From: Knut Omang Date: Thu, 21 Feb 2019 19:13:23 +0100 Subject: gen_pcie_root_port: Add ACS (Access Control Services) capability Claim ACS support in the generic PCIe root port to allow passthrough of individual functions of a device to different guests (in a nested virt.setting) with VFIO. Without this patch, all functions of a device, such as all VFs of an SR/IOV device, will end up in the same IOMMU group. A similar situation occurs on Windows with Hyper-V. In the single function device case, it also has a small cosmetic benefit in that the root port itself is not grouped with the device. VFIO handles that situation in that binding rules only apply to endpoints, so it does not limit passthrough in those cases. Signed-off-by: Knut Omang Reviewed-by: Marcel Apfelbaum Message-Id: <319460b483f566dd57487eb3dd340ed4c10aa53c.1550768238.git-series.knut.omang@oracle.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Alex Williamson --- hw/pci-bridge/gen_pcie_root_port.c | 4 ++++ hw/pci-bridge/pcie_root_port.c | 4 ++++ include/hw/pci/pcie_port.h | 1 + 3 files changed, 9 insertions(+) (limited to 'include') diff --git a/hw/pci-bridge/gen_pcie_root_port.c b/hw/pci-bridge/gen_pcie_root_port.c index 9766edb445..26bda73eae 100644 --- a/hw/pci-bridge/gen_pcie_root_port.c +++ b/hw/pci-bridge/gen_pcie_root_port.c @@ -20,6 +20,9 @@ OBJECT_CHECK(GenPCIERootPort, (obj), TYPE_GEN_PCIE_ROOT_PORT) #define GEN_PCIE_ROOT_PORT_AER_OFFSET 0x100 +#define GEN_PCIE_ROOT_PORT_ACS_OFFSET \ + (GEN_PCIE_ROOT_PORT_AER_OFFSET + PCI_ERR_SIZEOF) + #define GEN_PCIE_ROOT_PORT_MSIX_NR_VECTOR 1 typedef struct GenPCIERootPort { @@ -149,6 +152,7 @@ static void gen_rp_dev_class_init(ObjectClass *klass, void *data) rpc->interrupts_init = gen_rp_interrupts_init; rpc->interrupts_uninit = gen_rp_interrupts_uninit; rpc->aer_offset = GEN_PCIE_ROOT_PORT_AER_OFFSET; + rpc->acs_offset = GEN_PCIE_ROOT_PORT_ACS_OFFSET; } static const TypeInfo gen_rp_dev_info = { diff --git a/hw/pci-bridge/pcie_root_port.c b/hw/pci-bridge/pcie_root_port.c index 34ad76743c..e94d918b6d 100644 --- a/hw/pci-bridge/pcie_root_port.c +++ b/hw/pci-bridge/pcie_root_port.c @@ -47,6 +47,7 @@ static void rp_reset(DeviceState *qdev) pcie_cap_deverr_reset(d); pcie_cap_slot_reset(d); pcie_cap_arifwd_reset(d); + pcie_acs_reset(d); pcie_aer_root_reset(d); pci_bridge_reset(qdev); pci_bridge_disable_base_limit(d); @@ -106,6 +107,9 @@ static void rp_realize(PCIDevice *d, Error **errp) pcie_aer_root_init(d); rp_aer_vector_update(d); + if (rpc->acs_offset) { + pcie_acs_init(d, rpc->acs_offset); + } return; err: diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h index df242a0caf..09586f4641 100644 --- a/include/hw/pci/pcie_port.h +++ b/include/hw/pci/pcie_port.h @@ -78,6 +78,7 @@ typedef struct PCIERootPortClass { int exp_offset; int aer_offset; int ssvid_offset; + int acs_offset; /* If nonzero, optional ACS capability offset */ int ssid; } PCIERootPortClass; -- cgit v1.2.3-55-g7522 From 9040e6dfa8c3fed87695a3de555d2c775727bb51 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Fri, 1 Mar 2019 11:35:48 +0800 Subject: i386, acpi: check acpi_memory_hotplug capacity in pre_plug Currently we do device realization like below: hotplug_handler_pre_plug() dc->realize() hotplug_handler_plug() Before we do device realization and plug, we should allocate necessary resources and check if memory-hotplug-support property is enabled. At the piix4 and ich9, the memory-hotplug-support property is checked at plug stage. This means that device has been realized and mapped into guest address space 'pc_dimm_plug()' by the time acpi plug handler is called, where it might fail and crash QEMU due to reaching g_assert_not_reached() (piix4) or error_abort (ich9). Fix it by checking if memory hotplug is enabled at pre_plug stage where we can gracefully abort hotplug request. Signed-off-by: Wei Yang CC: Igor Mammedov CC: Eric Blake Signed-off-by: Wei Yang Message-Id: <20190301033548.6691-1-richardw.yang@linux.intel.com> Reviewed-by: Igor Mammedov Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/acpi/ich9.c | 15 +++++++++++++-- hw/acpi/piix4.c | 13 ++++++++++--- hw/i386/pc.c | 2 ++ hw/isa/lpc_ich9.c | 1 + include/hw/acpi/ich9.h | 2 ++ 5 files changed, 28 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c index c5d8646abc..e53dfe1ee3 100644 --- a/hw/acpi/ich9.c +++ b/hw/acpi/ich9.c @@ -483,13 +483,24 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm, Error **errp) NULL); } +void ich9_pm_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) && + !lpc->pm.acpi_memory_hotplug.is_enabled) + error_setg(errp, + "memory hotplug is not enabled: %s.memory-hotplug-support " + "is not set", object_get_typename(OBJECT(lpc))); +} + void ich9_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); - if (lpc->pm.acpi_memory_hotplug.is_enabled && - object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { nvdimm_acpi_plug_cb(hotplug_dev, dev); } else { diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c index 7b98121070..9c079d6834 100644 --- a/hw/acpi/piix4.c +++ b/hw/acpi/piix4.c @@ -380,9 +380,17 @@ static void piix4_pm_powerdown_req(Notifier *n, void *opaque) static void piix4_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { + PIIX4PMState *s = PIIX4_PM(hotplug_dev); + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { acpi_pcihp_device_pre_plug_cb(hotplug_dev, dev, errp); - } else if (!object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) && + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (!s->acpi_memory_hotplug.is_enabled) { + error_setg(errp, + "memory hotplug is not enabled: %s.memory-hotplug-support " + "is not set", object_get_typename(OBJECT(s))); + } + } else if ( !object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { error_setg(errp, "acpi: device pre plug request for not supported" " device type: %s", object_get_typename(OBJECT(dev))); @@ -394,8 +402,7 @@ static void piix4_device_plug_cb(HotplugHandler *hotplug_dev, { PIIX4PMState *s = PIIX4_PM(hotplug_dev); - if (s->acpi_memory_hotplug.is_enabled && - object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { nvdimm_acpi_plug_cb(hotplug_dev, dev); } else { diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 1cdaff5f4d..6077d27361 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -2090,6 +2090,8 @@ static void pc_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, return; } + hotplug_handler_pre_plug(pcms->acpi_dev, dev, errp); + if (is_nvdimm && !ms->nvdimms_state->is_enabled) { error_setg(errp, "nvdimm is not enabled: missing 'nvdimm' in '-M'"); return; diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index e692b9fdc1..ac44aa53be 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -805,6 +805,7 @@ static void ich9_lpc_class_init(ObjectClass *klass, void *data) * pc_q35_init() */ dc->user_creatable = false; + hc->pre_plug = ich9_pm_device_pre_plug_cb; hc->plug = ich9_pm_device_plug_cb; hc->unplug_request = ich9_pm_device_unplug_request_cb; hc->unplug = ich9_pm_device_unplug_cb; diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h index 59aeb06393..41568d1837 100644 --- a/include/hw/acpi/ich9.h +++ b/include/hw/acpi/ich9.h @@ -74,6 +74,8 @@ extern const VMStateDescription vmstate_ich9_pm; void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm, Error **errp); +void ich9_pm_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp); void ich9_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); void ich9_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev, -- cgit v1.2.3-55-g7522