summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLu Baolu2019-06-12 02:28:51 +0200
committerJoerg Roedel2019-06-12 10:36:59 +0200
commit123b2ffc376e1b3e9e015c75175b61e88a8b8518 (patch)
tree53cc956263b3952a477dc210fe2e17e3573c7bce /drivers/iommu
parentiommu/vt-d: Cleanup after delegating DMA domain to generic iommu (diff)
downloadkernel-qcow2-linux-123b2ffc376e1b3e9e015c75175b61e88a8b8518.tar.gz
kernel-qcow2-linux-123b2ffc376e1b3e9e015c75175b61e88a8b8518.tar.xz
kernel-qcow2-linux-123b2ffc376e1b3e9e015c75175b61e88a8b8518.zip
iommu/vt-d: Consolidate domain_init() to avoid duplication
The domain_init() and md_domain_init() do almost the same job. Consolidate them to avoid duplication. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel-iommu.c123
1 files changed, 36 insertions, 87 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 8c6ed39dec01..466129e7e50c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1841,63 +1841,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
-static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
- int guest_width)
-{
- int adjust_width, agaw;
- unsigned long sagaw;
- int err;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-
- err = init_iova_flush_queue(&domain->iovad,
- iommu_flush_iova, iova_entry_free);
- if (err)
- return err;
-
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- agaw = width_to_agaw(adjust_width);
- sagaw = cap_sagaw(iommu->cap);
- if (!test_bit(agaw, &sagaw)) {
- /* hardware doesn't support it, choose a bigger one */
- pr_debug("Hardware doesn't support agaw %d\n", agaw);
- agaw = find_next_bit(&sagaw, 5, agaw);
- if (agaw >= 5)
- return -ENODEV;
- }
- domain->agaw = agaw;
-
- if (ecap_coherent(iommu->ecap))
- domain->iommu_coherency = 1;
- else
- domain->iommu_coherency = 0;
-
- if (ecap_sc_support(iommu->ecap))
- domain->iommu_snooping = 1;
- else
- domain->iommu_snooping = 0;
-
- if (intel_iommu_superpage)
- domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
- else
- domain->iommu_superpage = 0;
-
- domain->nid = iommu->node;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static void domain_exit(struct dmar_domain *domain)
{
struct page *freelist;
@@ -2578,6 +2521,31 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
return 0;
}
+static int domain_init(struct dmar_domain *domain, int guest_width)
+{
+ int adjust_width;
+
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+ domain_reserve_special_ranges(domain);
+
+ /* calculate AGAW */
+ domain->gaw = guest_width;
+ adjust_width = guestwidth_to_adjustwidth(guest_width);
+ domain->agaw = width_to_agaw(adjust_width);
+
+ domain->iommu_coherency = 0;
+ domain->iommu_snooping = 0;
+ domain->iommu_superpage = 0;
+ domain->max_addr = 0;
+
+ /* always allocate the top pgd */
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+ if (!domain->pgd)
+ return -ENOMEM;
+ domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+ return 0;
+}
+
static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
{
struct device_domain_info *info;
@@ -2615,11 +2583,19 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
domain = alloc_domain(0);
if (!domain)
return NULL;
- if (domain_init(domain, iommu, gaw)) {
+
+ if (domain_init(domain, gaw)) {
domain_exit(domain);
return NULL;
}
+ if (init_iova_flush_queue(&domain->iovad,
+ iommu_flush_iova,
+ iova_entry_free)) {
+ pr_warn("iova flush queue initialization failed\n");
+ intel_iommu_strict = 1;
+ }
+
out:
return domain;
}
@@ -2724,8 +2700,6 @@ static int domain_prepare_identity_map(struct device *dev,
return iommu_domain_identity_map(domain, start, end);
}
-static int md_domain_init(struct dmar_domain *domain, int guest_width);
-
static int __init si_domain_init(int hw)
{
struct dmar_rmrr_unit *rmrr;
@@ -2736,7 +2710,7 @@ static int __init si_domain_init(int hw)
if (!si_domain)
return -EFAULT;
- if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
return -EFAULT;
}
@@ -4865,31 +4839,6 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-static int md_domain_init(struct dmar_domain *domain, int guest_width)
-{
- int adjust_width;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- domain->agaw = width_to_agaw(adjust_width);
-
- domain->iommu_coherency = 0;
- domain->iommu_snooping = 0;
- domain->iommu_superpage = 0;
- domain->max_addr = 0;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
@@ -4904,7 +4853,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
pr_err("Can't allocate dmar_domain\n");
return NULL;
}
- if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
pr_err("Domain initialization failed\n");
domain_exit(dmar_domain);
return NULL;