summaryrefslogtreecommitdiffstats
path: root/include/linux/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dma-mapping.h')
-rw-r--r--include/linux/dma-mapping.h46
1 files changed, 35 insertions, 11 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 7653ea66874d..34fe8463d10e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -9,7 +9,6 @@
#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
-#include <linux/kmemcheck.h>
#include <linux/bug.h>
#include <linux/mem_encrypt.h>
@@ -127,6 +126,8 @@ struct dma_map_ops {
void (*sync_sg_for_device)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
+ void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
@@ -135,7 +136,7 @@ struct dma_map_ops {
int is_phys;
};
-extern const struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_direct_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -230,7 +231,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
- kmemcheck_mark_initialized(ptr, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size,
@@ -263,11 +263,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- int i, ents;
- struct scatterlist *s;
+ int ents;
- for_each_sg(sg, s, nents, i)
- kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
BUG_ON(ents < 0);
@@ -297,7 +294,6 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
- kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
@@ -437,6 +433,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->cache_sync)
+ ops->cache_sync(dev, vaddr, size, dir);
+}
+
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
@@ -506,10 +513,18 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
void *cpu_addr;
BUG_ON(!ops);
+ WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr;
+ /*
+ * Let the implementation decide on the zone to allocate from, and
+ * decide on the way of zeroing the memory given that the memory
+ * returned should always be zeroed.
+ */
+ flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO);
+
if (!arch_dma_alloc_attrs(&dev, &flag))
return NULL;
if (!ops->alloc)
@@ -561,6 +576,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+/*
+ * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
+ * don't use this is new code.
+ */
+#ifndef arch_dma_supported
+#define arch_dma_supported(dev, mask) (1)
+#endif
+
static inline void dma_check_mask(struct device *dev, u64 mask)
{
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
@@ -573,6 +596,9 @@ static inline int dma_supported(struct device *dev, u64 mask)
if (!ops)
return 0;
+ if (!arch_dma_supported(dev, mask))
+ return 0;
+
if (!ops->dma_supported)
return 1;
return ops->dma_supported(dev, mask);
@@ -685,7 +711,7 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
#ifndef dma_max_pfn
static inline unsigned long dma_max_pfn(struct device *dev)
{
- return *dev->dma_mask >> PAGE_SHIFT;
+ return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
}
#endif
@@ -697,7 +723,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
return ret;
}
-#ifdef CONFIG_HAS_DMA
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN
@@ -705,7 +730,6 @@ static inline int dma_get_cache_alignment(void)
#endif
return 1;
}
-#endif
/* flags for the coherent memory api */
#define DMA_MEMORY_EXCLUSIVE 0x01