summaryrefslogtreecommitdiffstats
path: root/kernel/dma/direct.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/dma/direct.c')
-rw-r--r--kernel/dma/direct.c99
1 files changed, 52 insertions, 47 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 2c2772e9702a..59bdceea3737 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -23,14 +23,6 @@
#define ARCH_ZONE_DMA_BITS 24
#endif
-/*
- * For AMD SEV all DMA must be to unencrypted addresses.
- */
-static inline bool force_dma_unencrypted(void)
-{
- return sev_active();
-}
-
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
{
if (!dev->dma_mask) {
@@ -46,7 +38,7 @@ static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
phys_addr_t phys)
{
- if (force_dma_unencrypted())
+ if (force_dma_unencrypted(dev))
return __phys_to_dma(dev, phys);
return phys_to_dma(dev, phys);
}
@@ -67,7 +59,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
dma_mask = dev->bus_dma_mask;
- if (force_dma_unencrypted())
+ if (force_dma_unencrypted(dev))
*phys_mask = __dma_to_phys(dev, dma_mask);
else
*phys_mask = dma_to_phys(dev, dma_mask);
@@ -96,8 +88,6 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- int page_order = get_order(size);
struct page *page = NULL;
u64 phys_mask;
@@ -109,20 +99,9 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_mask);
again:
- /* CMA can be used only in the context which permits sleeping */
- if (gfpflags_allow_blocking(gfp)) {
- page = dma_alloc_from_contiguous(dev, count, page_order,
- gfp & __GFP_NOWARN);
- if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
- }
- if (!page)
- page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
-
+ page = dma_alloc_contiguous(dev, size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- __free_pages(page, page_order);
+ dma_free_contiguous(dev, page, size);
page = NULL;
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
@@ -151,10 +130,18 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
if (!page)
return NULL;
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ /* remove any dirty cache lines on the kernel alias */
+ if (!PageHighMem(page))
+ arch_dma_prep_coherent(page, size);
+ /* return the page pointer as the opaque cookie */
+ return page;
+ }
+
if (PageHighMem(page)) {
/*
* Depending on the cma= arguments and per-arch setup
- * dma_alloc_from_contiguous could return highmem pages.
+ * dma_alloc_contiguous could return highmem pages.
* Without remapping there is no way to return them here,
* so log an error and fail.
*/
@@ -164,22 +151,26 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
}
ret = page_address(page);
- if (force_dma_unencrypted()) {
+ if (force_dma_unencrypted(dev)) {
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
} else {
*dma_handle = phys_to_dma(dev, page_to_phys(page));
}
memset(ret, 0, size);
+
+ if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+ dma_alloc_need_uncached(dev, attrs)) {
+ arch_dma_prep_coherent(page, size);
+ ret = uncached_kernel_address(ret);
+ }
+
return ret;
}
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
{
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
+ dma_free_contiguous(dev, page, size);
}
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
@@ -187,15 +178,26 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
unsigned int page_order = get_order(size);
- if (force_dma_unencrypted())
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ /* cpu_addr is a struct page cookie, not a kernel address */
+ __dma_direct_free_pages(dev, size, cpu_addr);
+ return;
+ }
+
+ if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+
+ if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+ dma_alloc_need_uncached(dev, attrs))
+ cpu_addr = cached_kernel_address(cpu_addr);
__dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
}
void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- if (!dev_is_dma_coherent(dev))
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+ dma_alloc_need_uncached(dev, attrs))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
}
@@ -203,7 +205,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
- if (!dev_is_dma_coherent(dev))
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+ dma_alloc_need_uncached(dev, attrs))
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
else
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
@@ -231,12 +234,14 @@ void dma_direct_sync_sg_for_device(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i) {
- if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
- swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
+ phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
+
+ if (unlikely(is_swiotlb_buffer(paddr)))
+ swiotlb_tbl_sync_single(dev, paddr, sg->length,
dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+ arch_sync_dma_for_device(dev, paddr, sg->length,
dir);
}
}
@@ -268,11 +273,13 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i) {
+ phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
+
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
-
- if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
- swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, dir,
+ arch_sync_dma_for_cpu(dev, paddr, sg->length, dir);
+
+ if (unlikely(is_swiotlb_buffer(paddr)))
+ swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
SYNC_FOR_CPU);
}
@@ -396,11 +403,9 @@ int dma_direct_supported(struct device *dev, u64 mask)
size_t dma_direct_max_mapping_size(struct device *dev)
{
- size_t size = SIZE_MAX;
-
/* If SWIOTLB is active, use its maximum mapping size */
- if (is_swiotlb_active())
- size = swiotlb_max_mapping_size(dev);
-
- return size;
+ if (is_swiotlb_active() &&
+ (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
+ return swiotlb_max_mapping_size(dev);
+ return SIZE_MAX;
}