summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx4/icm.h
diff options
context:
space:
mode:
authorStephen Warren2019-01-03 18:23:23 +0100
committerDavid S. Miller2019-01-07 14:14:17 +0100
commitf65e192af35058e5c82da9e90871b472d24912bc (patch)
treeb6caf85193c35b0a88a5a0fa8aa180d956e2eeea /drivers/net/ethernet/mellanox/mlx4/icm.h
parentipv6: Take rcu_read_lock in __inet6_bind for mapped addresses (diff)
downloadkernel-qcow2-linux-f65e192af35058e5c82da9e90871b472d24912bc.tar.gz
kernel-qcow2-linux-f65e192af35058e5c82da9e90871b472d24912bc.tar.xz
kernel-qcow2-linux-f65e192af35058e5c82da9e90871b472d24912bc.zip
net/mlx4: Get rid of page operation after dma_alloc_coherent
This patch solves a crash at the time of mlx4 driver unload or system shutdown. The crash occurs because dma_alloc_coherent() returns one value in mlx4_alloc_icm_coherent(), but a different value is passed to dma_free_coherent() in mlx4_free_icm_coherent(). In turn this is because when allocated, that pointer is passed to sg_set_buf() to record it, then when freed it is re-calculated by calling lowmem_page_address(sg_page()) which returns a different value. Solve this by recording the value that dma_alloc_coherent() returns, and passing this to dma_free_coherent(). This patch is roughly equivalent to commit 378efe798ecf ("RDMA/hns: Get rid of page operation after dma_alloc_coherent"). Based-on-code-from: Christoph Hellwig <hch@lst.de> Signed-off-by: Stephen Warren <swarren@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/icm.h')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h22
1 files changed, 19 insertions, 3 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index c9169a490557..d199874b1c07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -47,11 +47,21 @@ enum {
MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
};
+struct mlx4_icm_buf {
+ void *addr;
+ size_t size;
+ dma_addr_t dma_addr;
+};
+
struct mlx4_icm_chunk {
struct list_head list;
int npages;
int nsg;
- struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
+ bool coherent;
+ union {
+ struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
+ struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
+ };
};
struct mlx4_icm {
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
{
- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+ if (iter->chunk->coherent)
+ return iter->chunk->buf[iter->page_idx].dma_addr;
+ else
+ return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
}
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
{
- return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
+ if (iter->chunk->coherent)
+ return iter->chunk->buf[iter->page_idx].size;
+ else
+ return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);