summaryrefslogtreecommitdiffstats
path: root/include/linux/dma-mapping.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/dma-mapping.h')
-rw-r--r--include/linux/dma-mapping.h21
1 files changed, 14 insertions, 7 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 6309a721394b..f7d1eea32c78 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -679,6 +679,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
+/**
+ * dma_addressing_limited - return if the device is addressing limited
+ * @dev: device to check
+ *
+ * Return %true if the devices DMA mask is too small to address all memory in
+ * the system, else %false. Lack of addressing bits is the prime reason for
+ * bounce buffering, but might not be the only one.
+ */
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
+ dma_get_required_mask(dev);
+}
+
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent);
@@ -729,13 +743,6 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
-#ifndef dma_max_pfn
-static inline unsigned long dma_max_pfn(struct device *dev)
-{
- return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
-}
-#endif
-
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN