summaryrefslogtreecommitdiffstats
path: root/kernel/dma/direct.c
diff options
context:
space:
mode:
authorChristoph Hellwig2019-06-14 16:17:27 +0200
committerChristoph Hellwig2019-06-25 14:28:05 +0200
commitd98849aff87911013aadb730138ab728b52fc547 (patch)
tree14dbf44779970bf7754648fb04c5b2c9bf40ba6f /kernel/dma/direct.c
parentdma-direct: handle DMA_ATTR_NON_CONSISTENT in common code (diff)
downloadkernel-qcow2-linux-d98849aff87911013aadb730138ab728b52fc547.tar.gz
kernel-qcow2-linux-d98849aff87911013aadb730138ab728b52fc547.tar.xz
kernel-qcow2-linux-d98849aff87911013aadb730138ab728b52fc547.zip
dma-direct: handle DMA_ATTR_NO_KERNEL_MAPPING in common code
DMA_ATTR_NO_KERNEL_MAPPING is generally implemented by allocating normal cacheable pages or CMA memory, and then returning the page pointer as the opaque handle. Lift that code from the xtensa and generic dma remapping implementations into the generic dma-direct code so that we don't even call arch_dma_alloc for these allocations. Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel/dma/direct.c')
-rw-r--r--kernel/dma/direct.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index fc354f4f490b..b90e1aede743 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -138,6 +138,14 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
if (!page)
return NULL;
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ /* remove any dirty cache lines on the kernel alias */
+ if (!PageHighMem(page))
+ arch_dma_prep_coherent(page, size);
+ /* return the page pointer as the opaque cookie */
+ return page;
+ }
+
if (PageHighMem(page)) {
/*
* Depending on the cma= arguments and per-arch setup
@@ -178,6 +186,12 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
unsigned int page_order = get_order(size);
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ /* cpu_addr is a struct page cookie, not a kernel address */
+ __dma_direct_free_pages(dev, size, cpu_addr);
+ return;
+ }
+
if (force_dma_unencrypted())
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);