summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorRobin Murphy2019-07-29 17:32:38 +0200
committerJoerg Roedel2019-08-06 17:22:49 +0200
commit8af23fad626173eed7cc02733874d4124049bd5e (patch)
tree33b648afb7bc141828ed9382dd5d0f13a8ab362e /drivers/iommu
parentLinux 5.3-rc3 (diff)
downloadkernel-qcow2-linux-8af23fad626173eed7cc02733874d4124049bd5e.tar.gz
kernel-qcow2-linux-8af23fad626173eed7cc02733874d4124049bd5e.tar.xz
kernel-qcow2-linux-8af23fad626173eed7cc02733874d4124049bd5e.zip
iommu/dma: Handle MSI mappings separately
MSI pages must always be mapped into a device's *current* domain, which *might* be the default DMA domain, but might instead be a VFIO domain with its own MSI cookie. This subtlety got accidentally lost in the streamlining of __iommu_dma_map(), but rather than reintroduce more complexity and/or special-casing, it turns out neater to just split this path out entirely. Since iommu_dma_get_msi_page() already duplicates much of what __iommu_dma_map() does, it can easily just make the allocation and mapping calls directly as well. That way we can further streamline the helper back to exclusively operating on DMA domains. Fixes: b61d271e59d7 ("iommu/dma: Move domain lookup into __iommu_dma_{map,unmap}") Reported-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Reported-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Tested-by: Marc Zyngier <maz@kernel.org> Tested-by: Andre Przywara <andre.przywara@arm.com> Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dma-iommu.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a7f9c3edbcb2..6441197a75ea 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- size_t iova_off = 0;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
- iova_off = iova_offset(&cookie->iovad, phys);
- size = iova_align(&cookie->iovad, size + iova_off);
- }
+ size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
@@ -1147,16 +1145,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot);
- if (iova == DMA_MAPPING_ERROR)
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ if (!iova)
goto out_free_page;
+ if (iommu_map(domain, iova, msi_addr, size, prot))
+ goto out_free_iova;
+
INIT_LIST_HEAD(&msi_page->list);
msi_page->phys = msi_addr;
msi_page->iova = iova;
list_add(&msi_page->list, &cookie->msi_page_list);
return msi_page;
+out_free_iova:
+ iommu_dma_free_iova(cookie, iova, size);
out_free_page:
kfree(msi_page);
return NULL;