summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorRobin Murphy2019-05-20 09:29:38 +0200
committerJoerg Roedel2019-05-27 17:31:11 +0200
commit8680aa5a58abfe6087a3d8248c02232d3e05dc80 (patch)
treeed6ad2ecc0d2caad3dbda6d5410635a57658f7ac /drivers/iommu
parentiommu/dma: Refactor iommu_dma_alloc (diff)
downloadkernel-qcow2-linux-8680aa5a58abfe6087a3d8248c02232d3e05dc80.tar.gz
kernel-qcow2-linux-8680aa5a58abfe6087a3d8248c02232d3e05dc80.tar.xz
kernel-qcow2-linux-8680aa5a58abfe6087a3d8248c02232d3e05dc80.zip
iommu/dma: Don't remap CMA unnecessarily
Always remapping CMA allocations was largely a bodge to keep the freeing logic manageable when it was split between here and an arch wrapper. Now that it's all together and streamlined, we can relax that limitation. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dma-iommu.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4134f13b5529..cffd30810d41 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -973,7 +973,6 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
{
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
size_t iosize = size;
struct page *page;
void *addr;
@@ -1021,13 +1020,19 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (*handle == DMA_MAPPING_ERROR)
goto out_free_pages;
- addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
- __builtin_return_address(0));
- if (!addr)
- goto out_unmap;
+ if (!coherent || PageHighMem(page)) {
+ pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
- if (!coherent)
- arch_dma_prep_coherent(page, iosize);
+ addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
+ __builtin_return_address(0));
+ if (!addr)
+ goto out_unmap;
+
+ if (!coherent)
+ arch_dma_prep_coherent(page, iosize);
+ } else {
+ addr = page_address(page);
+ }
memset(addr, 0, size);
return addr;
out_unmap: