summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLinus Torvalds2017-07-07 04:20:54 +0200
committerLinus Torvalds2017-07-07 04:20:54 +0200
commitf72e24a1240b78f421649c4d88f5c24ab1c896a1 (patch)
tree90bed3bf33ae0abf5636dafcc3eda3cc354612b0 /drivers/iommu
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s39... (diff)
parentARM: dma-mapping: Remove traces of NOMMU code (diff)
downloadkernel-qcow2-linux-f72e24a1240b78f421649c4d88f5c24ab1c896a1.tar.gz
kernel-qcow2-linux-f72e24a1240b78f421649c4d88f5c24ab1c896a1.tar.xz
kernel-qcow2-linux-f72e24a1240b78f421649c4d88f5c24ab1c896a1.zip
Merge tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping infrastructure from Christoph Hellwig: "This is the first pull request for the new dma-mapping subsystem In this new subsystem we'll try to properly maintain all the generic code related to dma-mapping, and will further consolidate arch code into common helpers. This pull request contains: - removal of the DMA_ERROR_CODE macro, replacing it with calls to ->mapping_error so that the dma_map_ops instances are more self contained and can be shared across architectures (me) - removal of the ->set_dma_mask method, which duplicates the ->dma_capable one in terms of functionality, but requires more duplicate code. - various updates for the coherent dma pool and related arm code (Vladimir) - various smaller cleanups (me)" * tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping: (56 commits) ARM: dma-mapping: Remove traces of NOMMU code ARM: NOMMU: Set ARM_DMA_MEM_BUFFERABLE for M-class cpus ARM: NOMMU: Introduce dma operations for noMMU drivers: dma-mapping: allow dma_common_mmap() for NOMMU drivers: dma-coherent: Introduce default DMA pool drivers: dma-coherent: Account dma_pfn_offset when used with device tree dma: Take into account dma_pfn_offset dma-mapping: replace dmam_alloc_noncoherent with dmam_alloc_attrs dma-mapping: remove dmam_free_noncoherent crypto: qat - avoid an uninitialized variable warning au1100fb: remove a bogus dma_free_nonconsistent call MAINTAINERS: add entry for dma mapping helpers powerpc: merge __dma_set_mask into dma_set_mask dma-mapping: remove the set_dma_mask method powerpc/cell: use the dma_supported method for ops switching powerpc/cell: clean up fixed mapping dma_ops initialization tile: remove dma_supported and mapping_error methods xen-swiotlb: remove xen_swiotlb_set_dma_mask arm: implement ->dma_supported instead of ->set_dma_mask mips/loongson64: implement ->dma_supported instead of ->set_dma_mask ...
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c20
-rw-r--r--drivers/iommu/dma-iommu.c18
-rw-r--r--drivers/iommu/intel-iommu.c3
3 files changed, 28 insertions, 13 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 5c9759ed22ca..f16d0f26ee24 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -54,6 +54,8 @@
#include "amd_iommu_types.h"
#include "irq_remapping.h"
+#define AMD_IOMMU_MAPPING_ERROR 0
+
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
#define LOOP_TIMEOUT 100000
@@ -2394,7 +2396,7 @@ static dma_addr_t __map_single(struct device *dev,
paddr &= PAGE_MASK;
address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
- if (address == DMA_ERROR_CODE)
+ if (address == AMD_IOMMU_MAPPING_ERROR)
goto out;
prot = dir2prot(direction);
@@ -2431,7 +2433,7 @@ out_unmap:
dma_ops_free_iova(dma_dom, address, pages);
- return DMA_ERROR_CODE;
+ return AMD_IOMMU_MAPPING_ERROR;
}
/*
@@ -2483,7 +2485,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
if (PTR_ERR(domain) == -EINVAL)
return (dma_addr_t)paddr;
else if (IS_ERR(domain))
- return DMA_ERROR_CODE;
+ return AMD_IOMMU_MAPPING_ERROR;
dma_mask = *dev->dma_mask;
dma_dom = to_dma_ops_domain(domain);
@@ -2560,7 +2562,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
npages = sg_num_pages(dev, sglist, nelems);
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
- if (address == DMA_ERROR_CODE)
+ if (address == AMD_IOMMU_MAPPING_ERROR)
goto out_err;
prot = dir2prot(direction);
@@ -2683,7 +2685,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
size, DMA_BIDIRECTIONAL, dma_mask);
- if (*dma_addr == DMA_ERROR_CODE)
+ if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
goto out_free;
return page_address(page);
@@ -2729,9 +2731,16 @@ free_mem:
*/
static int amd_iommu_dma_supported(struct device *dev, u64 mask)
{
+ if (!x86_dma_supported(dev, mask))
+ return 0;
return check_device(dev);
}
+static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == AMD_IOMMU_MAPPING_ERROR;
+}
+
static const struct dma_map_ops amd_iommu_dma_ops = {
.alloc = alloc_coherent,
.free = free_coherent,
@@ -2740,6 +2749,7 @@ static const struct dma_map_ops amd_iommu_dma_ops = {
.map_sg = map_sg,
.unmap_sg = unmap_sg,
.dma_supported = amd_iommu_dma_supported,
+ .mapping_error = amd_iommu_mapping_error,
};
static int init_reserved_iova_ranges(void)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 62618e77bedc..9403336f1fa6 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -31,6 +31,8 @@
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#define IOMMU_MAPPING_ERROR 0
+
struct iommu_dma_msi_page {
struct list_head list;
dma_addr_t iova;
@@ -500,7 +502,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
{
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
- *handle = DMA_ERROR_CODE;
+ *handle = IOMMU_MAPPING_ERROR;
}
/**
@@ -533,7 +535,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
dma_addr_t iova;
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
- *handle = DMA_ERROR_CODE;
+ *handle = IOMMU_MAPPING_ERROR;
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
@@ -627,11 +629,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
}
return iova + iova_off;
}
@@ -671,7 +673,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
s->offset += s_iova_off;
s->length = s_length;
- sg_dma_address(s) = DMA_ERROR_CODE;
+ sg_dma_address(s) = IOMMU_MAPPING_ERROR;
sg_dma_len(s) = 0;
/*
@@ -714,11 +716,11 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_ERROR_CODE)
+ if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
s->offset += sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
- sg_dma_address(s) = DMA_ERROR_CODE;
+ sg_dma_address(s) = IOMMU_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
}
@@ -836,7 +838,7 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return dma_addr == DMA_ERROR_CODE;
+ return dma_addr == IOMMU_MAPPING_ERROR;
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 8500deda9175..1e95475883cd 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3981,6 +3981,9 @@ struct dma_map_ops intel_dma_ops = {
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.mapping_error = intel_mapping_error,
+#ifdef CONFIG_X86
+ .dma_supported = x86_dma_supported,
+#endif
};
static inline int iommu_domain_cache_init(void)