summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5
diff options
context:
space:
mode:
authorThomas Gleixner2017-11-23 16:29:05 +0100
committerThomas Gleixner2017-11-23 16:29:05 +0100
commit866c9b94ef968445c52214b3748ecc52a8491bca (patch)
tree1fd073acb9be8e89e77b35c41e2964ac6feabee6 /drivers/infiniband/hw/mlx5
parenttimekeeping: Remove CONFIG_GENERIC_TIME_VSYSCALL_OLD (diff)
parenttreewide: Remove TIMER_FUNC_TYPE and TIMER_DATA_TYPE casts (diff)
downloadkernel-qcow2-linux-866c9b94ef968445c52214b3748ecc52a8491bca.tar.gz
kernel-qcow2-linux-866c9b94ef968445c52214b3748ecc52a8491bca.tar.xz
kernel-qcow2-linux-866c9b94ef968445c52214b3748ecc52a8491bca.zip
Merge tag 'for-linus-timers-conversion-final-v4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into timers/urgent
Pull the last batch of manual timer conversions from Kees Cook: - final batch of "non trivial" timer conversions (multi-tree dependencies, things Coccinelle couldn't handle, etc). - treewide conversions via Coccinelle, in 4 steps: - DEFINE_TIMER() functions converted to struct timer_list * argument - init_timer() -> setup_timer() - setup_timer() -> timer_setup() - setup_timer() -> timer_setup() (with a single embedded structure) - deprecated timer API removals (init_timer(), setup_*timer()) - finalization of new API (remove global casts)
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c38
-rw-r--r--drivers/infiniband/hw/mlx5/main.c57
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c10
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c149
7 files changed, 241 insertions, 41 deletions
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 3363e29157f6..fe269f680103 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -89,10 +89,6 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
resp.response_length = min_resp_len;
- err = ib_resolve_eth_dmac(pd->device, ah_attr);
- if (err)
- return ERR_PTR(err);
-
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 2aa53f427685..18705cbcdc8c 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -124,11 +124,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
case MLX5_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX5_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX5_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX5_OPCODE_SEND:
case MLX5_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
@@ -752,13 +754,13 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int err;
ucmdlen = udata->inlen < sizeof(ucmd) ?
- (sizeof(ucmd) - sizeof(ucmd.reserved)) : sizeof(ucmd);
+ (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
return -EFAULT;
if (ucmdlen == sizeof(ucmd) &&
- ucmd.reserved != 0)
+ (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD)))
return -EINVAL;
if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
@@ -802,8 +804,10 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*index = to_mucontext(context)->bfregi.sys_pages[0];
if (ucmd.cqe_comp_en == 1) {
- if (unlikely((*cqe_size != 64) ||
- !MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
+ if (!((*cqe_size == 128 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
+ (*cqe_size == 64 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
err = -EOPNOTSUPP;
mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
*cqe_size);
@@ -826,6 +830,19 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
ilog2(ucmd.cqe_comp_res_format));
}
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
+ if (*cqe_size != 128 ||
+ !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev,
+ "CQE padding is not supported for CQE size of %dB!\n",
+ *cqe_size);
+ goto err_cqb;
+ }
+
+ cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
+ }
+
return 0;
err_cqb:
@@ -985,7 +1002,10 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->cqe_size = cqe_size;
cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
- MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
@@ -1129,6 +1149,9 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
return -ENOSYS;
+ if (cq_period > MLX5_MAX_CQ_PERIOD)
+ return -EINVAL;
+
err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
cq_period, cq_count);
if (err)
@@ -1335,7 +1358,10 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 552f7bd4ecc3..543d0a4c8bf3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -715,6 +715,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+ if (MLX5_CAP_GEN(mdev, end_pad))
+ props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@@ -787,6 +790,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
}
+ if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
+ props->cq_caps.max_cq_moderation_count =
+ MLX5_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period =
+ MLX5_MAX_CQ_PERIOD;
+ }
+
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
resp.cqe_comp_caps.max_num =
MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
@@ -824,8 +834,16 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
- if (field_avail(typeof(resp), reserved, uhw->outlen))
- resp.response_length += sizeof(resp.reserved);
+ if (field_avail(typeof(resp), flags, uhw->outlen)) {
+ resp.response_length += sizeof(resp.flags);
+
+ if (MLX5_CAP_GEN(mdev, cqe_compression_128))
+ resp.flags |=
+ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
+
+ if (MLX5_CAP_GEN(mdev, cqe_128_always))
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
+ }
if (field_avail(typeof(resp), sw_parsing_caps,
uhw->outlen)) {
@@ -848,6 +866,36 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
+ if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen)) {
+ resp.response_length += sizeof(resp.striding_rq_caps);
+ if (MLX5_CAP_GEN(mdev, striding_rq)) {
+ resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
+ resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
+ resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.supported_qpts =
+ BIT(IB_QPT_RAW_PACKET);
+ }
+ }
+
+ if (field_avail(typeof(resp), tunnel_offloads_caps,
+ uhw->outlen)) {
+ resp.response_length += sizeof(resp.tunnel_offloads_caps);
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GRE;
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -3097,6 +3145,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = MLX5_IB_QPT_REG_UMR;
+ qp->send_cq = init_attr->send_cq;
+ qp->recv_cq = init_attr->recv_cq;
attr->qp_state = IB_QPS_INIT;
attr->port_num = 1;
@@ -3979,7 +4029,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 189e80cd6b2f..6dd8cac78de2 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -228,6 +228,7 @@ struct wr_list {
enum mlx5_ib_rq_flags {
MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
+ MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
};
struct mlx5_ib_wq {
@@ -254,8 +255,14 @@ struct mlx5_ib_wq {
enum mlx5_ib_wq_flags {
MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
+ MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
};
+#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
+#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
+#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
+#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
+
struct mlx5_ib_rwq {
struct ib_wq ibwq;
struct mlx5_core_qp core_qp;
@@ -264,6 +271,9 @@ struct mlx5_ib_rwq {
u32 log_rq_size;
u32 rq_page_offset;
u32 log_page_size;
+ u32 log_num_strides;
+ u32 two_byte_shift_en;
+ u32 single_stride_log_num_of_bytes;
struct ib_umem *umem;
size_t buf_size;
unsigned int page_shift;
@@ -389,6 +399,7 @@ struct mlx5_ib_qp {
struct list_head cq_send_list;
u32 rate_limit;
u32 underlay_qpn;
+ bool tunnel_offload_en;
};
struct mlx5_ib_cq_buf {
@@ -411,6 +422,8 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_RSS = 1 << 8,
MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
MLX5_IB_QP_UNDERLAY = 1 << 10,
+ MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
+ MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
};
struct mlx5_umr_wr {
@@ -435,6 +448,10 @@ struct mlx5_shared_mr_info {
struct ib_umem *umem;
};
+enum mlx5_ib_cq_pr_flags {
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
+};
+
struct mlx5_ib_cq {
struct ib_cq ibcq;
struct mlx5_core_cq mcq;
@@ -457,6 +474,7 @@ struct mlx5_ib_cq {
struct list_head wc_list;
enum ib_cq_notify_flags notify_flags;
struct work_struct notify_work;
+ u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
};
struct mlx5_ib_wc {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 37bbc543847a..ee0ee1f9994b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -642,9 +642,9 @@ err:
return -ENOMEM;
}
-static void delay_time_func(unsigned long ctx)
+static void delay_time_func(struct timer_list *t)
{
- struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+ struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
dev->fill_delay = 0;
}
@@ -663,7 +663,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return -ENOMEM;
}
- setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
+ timer_setup(&dev->delay_timer, delay_time_func, 0);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
INIT_LIST_HEAD(&ent->head);
@@ -1230,13 +1230,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
- mlx5_ib_dbg(dev, "cache empty for order %d", order);
+ mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL;
}
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL;
- pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error;
}
use_umr = false;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3d701c7a4c91..e2197bdda89c 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -32,6 +32,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
+#include <linux/kernel.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -929,9 +930,8 @@ static int mlx5_ib_mr_initiator_pfault_handler(
return -EFAULT;
}
- if (unlikely(opcode >= sizeof(mlx5_ib_odp_opcode_cap) /
- sizeof(mlx5_ib_odp_opcode_cap[0]) ||
- !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
+ if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
+ !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
opcode);
return -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index acb79d3a4f1d..31ad28853efa 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1178,8 +1178,8 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, end_padding_mode,
- MLX5_GET(qpc, qpc, end_padding_mode));
+ if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
@@ -1204,8 +1204,16 @@ static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
}
+static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
+ MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
+ MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
+}
+
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq, u32 tdn)
+ struct mlx5_ib_rq *rq, u32 tdn,
+ bool tunnel_offload_en)
{
u32 *in;
void *tirc;
@@ -1221,6 +1229,8 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
MLX5_SET(tirc, tirc, transport_domain, tdn);
+ if (tunnel_offload_en)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
@@ -1266,12 +1276,15 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
+ if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
+ rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
err = create_raw_packet_qp_rq(dev, rq, in);
if (err)
goto err_destroy_sq;
- err = create_raw_packet_qp_tir(dev, rq, tdn);
+ err = create_raw_packet_qp_tir(dev, rq, tdn,
+ qp->tunnel_offload_en);
if (err)
goto err_destroy_rq;
}
@@ -1358,7 +1371,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (udata->outlen < min_resp_len)
return -EINVAL;
- required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
+ required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
if (udata->inlen < required_cmd_sz) {
mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
@@ -1381,8 +1394,20 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
- if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) {
- mlx5_ib_dbg(dev, "invalid reserved\n");
+ if (ucmd.flags & ~MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ mlx5_ib_dbg(dev, "invalid flags\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
+ !tunnel_offload_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
+ !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
+ mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
return -EOPNOTSUPP;
}
@@ -1405,6 +1430,15 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(tirc, tirc, transport_domain, tdn);
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+
+ if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
+ else
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
switch (ucmd.rx_hash_function) {
case MLX5_RX_HASH_FUNC_TOEPLITZ:
{
@@ -1604,6 +1638,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
+ !tunnel_offload_supported(mdev)) {
+ mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->tunnel_offload_en = true;
+ }
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
if (init_attr->qp_type != IB_QPT_UD ||
@@ -1781,6 +1823,19 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags |= MLX5_IB_QP_LSO;
}
+ if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
+ mlx5_ib_dbg(dev, "scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto err;
+ } else if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ } else {
+ qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING;
+ }
+ }
+
if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
@@ -1825,6 +1880,7 @@ err_create:
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
+err:
kvfree(in);
return err;
}
@@ -2283,8 +2339,12 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
return err;
memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
- path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
- grh->sgid_index);
+ if (qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ qp->ibqp.qp_type == IB_QPT_XRC_TGT)
+ path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
+ grh->sgid_index);
path->dci_cfi_prio_sl = (sl & 0x7) << 4;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
@@ -3858,7 +3918,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags;
unsigned idx;
int err = 0;
- int inl = 0;
int num_sge;
void *seg;
int nreq;
@@ -4053,6 +4112,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
goto out;
}
+ /* fall through */
case MLX5_IB_QPT_HW_GSI:
set_datagram_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_datagram_seg);
@@ -4116,7 +4176,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
goto out;
}
- inl = 1;
size += sz;
} else {
dpseg = seg;
@@ -4707,9 +4766,27 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, wq_type,
+ rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
+ if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
+ mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ } else {
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ }
+ }
MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
+ if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
+ MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ rwq->single_stride_log_num_of_bytes -
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides -
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
+ }
MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
@@ -4791,7 +4868,8 @@ static int prepare_user_rq(struct ib_pd *pd,
int err;
size_t required_cmd_sz;
- required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+ required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
+ + sizeof(ucmd.single_stride_log_num_of_bytes);
if (udata->inlen < required_cmd_sz) {
mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
@@ -4809,14 +4887,39 @@ static int prepare_user_rq(struct ib_pd *pd,
return -EFAULT;
}
- if (ucmd.comp_mask) {
+ if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
mlx5_ib_dbg(dev, "invalid comp mask\n");
return -EOPNOTSUPP;
- }
-
- if (ucmd.reserved) {
- mlx5_ib_dbg(dev, "invalid reserved\n");
- return -EOPNOTSUPP;
+ } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
+ if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
+ mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if ((ucmd.single_stride_log_num_of_bytes <
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
+ (ucmd.single_stride_log_num_of_bytes >
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
+ mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
+ ucmd.single_stride_log_num_of_bytes,
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
+ return -EINVAL;
+ }
+ if ((ucmd.single_wqe_log_num_of_strides >
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
+ (ucmd.single_wqe_log_num_of_strides <
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) {
+ mlx5_ib_dbg(dev, "Invalid log num strides (%u. Range is %u - %u)\n",
+ ucmd.single_wqe_log_num_of_strides,
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
+ return -EINVAL;
+ }
+ rwq->single_stride_log_num_of_bytes =
+ ucmd.single_stride_log_num_of_bytes;
+ rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
+ rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
+ rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
}
err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
@@ -5054,6 +5157,12 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
MLX5_SET(rqc, rqc, vsd,
(wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
}
+
+ if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
}
if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {