summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/bnxt_re
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re')
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c114
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h62
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c107
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c168
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c486
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c26
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c77
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h4
17 files changed, 875 insertions, 233 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index 19982a4a9bba..18f5ed082f41 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_BNXT_RE
tristate "Broadcom Netxtreme HCA support"
depends on ETHERNET && NETDEVICES && PCI && INET && DCB
+ depends on MAY_USE_DEVLINK
select NET_VENDOR_BROADCOM
select BNXT
---help---
diff --git a/drivers/infiniband/hw/bnxt_re/Makefile b/drivers/infiniband/hw/bnxt_re/Makefile
index 036f84efbc73..afbaa0e20670 100644
--- a/drivers/infiniband/hw/bnxt_re/Makefile
+++ b/drivers/infiniband/hw/bnxt_re/Makefile
@@ -3,4 +3,4 @@ ccflags-y := -Idrivers/net/ethernet/broadcom/bnxt
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re.o
bnxt_re-y := main.o ib_verbs.o \
qplib_res.o qplib_rcfw.o \
- qplib_sp.o qplib_fp.o
+ qplib_sp.o qplib_fp.o hw_counters.o
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 85527532c49d..b3ad37fec578 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -85,7 +85,7 @@ struct bnxt_re_sqp_entries {
};
#define BNXT_RE_MIN_MSIX 2
-#define BNXT_RE_MAX_MSIX 16
+#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
@@ -116,7 +116,7 @@ struct bnxt_re_dev {
struct bnxt_qplib_rcfw rcfw;
/* NQ */
- struct bnxt_qplib_nq nq;
+ struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;
@@ -140,6 +140,7 @@ struct bnxt_re_dev {
struct bnxt_re_qp *qp1_sqp;
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
+ atomic_t nq_alloc_cnt;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
new file mode 100644
index 000000000000..7b28219eba46
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -0,0 +1,114 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Statistics
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/prefetch.h>
+#include <linux/delay.h>
+
+#include <rdma/ib_addr.h>
+
+#include "bnxt_ulp.h"
+#include "roce_hsi.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "qplib_rcfw.h"
+#include "bnxt_re.h"
+#include "hw_counters.h"
+
+static const char * const bnxt_re_stat_name[] = {
+ [BNXT_RE_ACTIVE_QP] = "active_qps",
+ [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
+ [BNXT_RE_ACTIVE_CQ] = "active_cqs",
+ [BNXT_RE_ACTIVE_MR] = "active_mrs",
+ [BNXT_RE_ACTIVE_MW] = "active_mws",
+ [BNXT_RE_RX_PKTS] = "rx_pkts",
+ [BNXT_RE_RX_BYTES] = "rx_bytes",
+ [BNXT_RE_TX_PKTS] = "tx_pkts",
+ [BNXT_RE_TX_BYTES] = "tx_bytes",
+ [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors"
+};
+
+int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
+
+ if (!port || !stats)
+ return -EINVAL;
+
+ stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&rdev->qp_count);
+ stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&rdev->srq_count);
+ stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->cq_count);
+ stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->mr_count);
+ stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->mw_count);
+ if (bnxt_re_stats) {
+ stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
+ le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_RX_PKTS] =
+ le64_to_cpu(bnxt_re_stats->rx_ucast_pkts);
+ stats->value[BNXT_RE_RX_BYTES] =
+ le64_to_cpu(bnxt_re_stats->rx_ucast_bytes);
+ stats->value[BNXT_RE_TX_PKTS] =
+ le64_to_cpu(bnxt_re_stats->tx_ucast_pkts);
+ stats->value[BNXT_RE_TX_BYTES] =
+ le64_to_cpu(bnxt_re_stats->tx_ucast_bytes);
+ }
+ return ARRAY_SIZE(bnxt_re_stat_name);
+}
+
+struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS);
+ /* We support only per port stats */
+ if (!port_num)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(bnxt_re_stat_name,
+ ARRAY_SIZE(bnxt_re_stat_name),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
new file mode 100644
index 000000000000..be0dc0093b58
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -0,0 +1,62 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Statistics (header)
+ *
+ */
+
+#ifndef __BNXT_RE_HW_STATS_H__
+#define __BNXT_RE_HW_STATS_H__
+
+enum bnxt_re_hw_stats {
+ BNXT_RE_ACTIVE_QP,
+ BNXT_RE_ACTIVE_SRQ,
+ BNXT_RE_ACTIVE_CQ,
+ BNXT_RE_ACTIVE_MR,
+ BNXT_RE_ACTIVE_MW,
+ BNXT_RE_RX_PKTS,
+ BNXT_RE_RX_BYTES,
+ BNXT_RE_TX_PKTS,
+ BNXT_RE_TX_BYTES,
+ BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_NUM_COUNTERS
+};
+
+struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num);
+int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index);
+#endif /* __BNXT_RE_HW_STATS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index f0e01b3ac711..01eee15bbd65 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -223,50 +223,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev,
return 0;
}
-static void __to_ib_speed_width(struct net_device *netdev, u8 *speed, u8 *width)
-{
- struct ethtool_link_ksettings lksettings;
- u32 espeed;
-
- if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) {
- memset(&lksettings, 0, sizeof(lksettings));
- rtnl_lock();
- netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings);
- rtnl_unlock();
- espeed = lksettings.base.speed;
- } else {
- espeed = SPEED_UNKNOWN;
- }
- switch (espeed) {
- case SPEED_1000:
- *speed = IB_SPEED_SDR;
- *width = IB_WIDTH_1X;
- break;
- case SPEED_10000:
- *speed = IB_SPEED_QDR;
- *width = IB_WIDTH_1X;
- break;
- case SPEED_20000:
- *speed = IB_SPEED_DDR;
- *width = IB_WIDTH_4X;
- break;
- case SPEED_25000:
- *speed = IB_SPEED_EDR;
- *width = IB_WIDTH_1X;
- break;
- case SPEED_40000:
- *speed = IB_SPEED_QDR;
- *width = IB_WIDTH_4X;
- break;
- case SPEED_50000:
- break;
- default:
- *speed = IB_SPEED_SDR;
- *width = IB_WIDTH_1X;
- break;
- }
-}
-
/* Port */
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr)
@@ -308,25 +264,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
* IB stack to avoid race in the NETDEV_UNREG path
*/
if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
- __to_ib_speed_width(rdev->netdev, &port_attr->active_speed,
- &port_attr->active_width);
- return 0;
-}
-
-int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
- int port_modify_mask,
- struct ib_port_modify *port_modify)
-{
- switch (port_modify_mask) {
- case IB_PORT_SHUTDOWN:
- break;
- case IB_PORT_INIT_TYPE:
- break;
- case IB_PORT_RESET_QKEY_CNTR:
- break;
- default:
- break;
- }
+ if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
+ &port_attr->active_width))
+ return -EINVAL;
return 0;
}
@@ -846,6 +786,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
struct bnxt_re_dev *rdev = qp->rdev;
int rc;
+ bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
@@ -860,6 +801,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc;
}
+ bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp);
if (rc) {
@@ -969,7 +911,6 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
if (!ah)
return NULL;
- memset(ah, 0, sizeof(*ah));
ah->rdev = rdev;
ah->qplib_ah.pd = &pd->qplib_pd;
@@ -1016,7 +957,6 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
if (!qp)
return NULL;
- memset(qp, 0, sizeof(*qp));
qp->rdev = rdev;
/* Initialize the shadow QP structure from the QP1 values */
@@ -1404,6 +1344,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
+
+ if (!qp->sumem &&
+ qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Move QP = %p to flush list\n",
+ qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ }
+ if (!qp->sumem &&
+ qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Move QP = %p out of flush list\n",
+ qp);
+ bnxt_qplib_del_flush_qp(&qp->qplib_qp);
+ }
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
qp->qplib_qp.modify_flags |=
@@ -2333,6 +2288,7 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
struct bnxt_re_dev *rdev = cq->rdev;
int rc;
+ struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -2347,7 +2303,7 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
kfree(cq);
}
atomic_dec(&rdev->cq_count);
- rdev->nq.budget--;
+ nq->budget--;
return 0;
}
@@ -2361,6 +2317,8 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
struct bnxt_re_cq *cq = NULL;
int rc, entries;
int cqe = attr->cqe;
+ struct bnxt_qplib_nq *nq = NULL;
+ unsigned int nq_alloc_cnt;
/* Validate CQ fields */
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
@@ -2412,8 +2370,15 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
cq->qplib_cq.sghead = NULL;
cq->qplib_cq.nmap = 0;
}
+ /*
+ * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
+ * used for getting the NQ index.
+ */
+ nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
+ nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
cq->qplib_cq.max_wqe = entries;
- cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
+ cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
+ cq->qplib_cq.nq = nq;
rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -2423,7 +2388,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
cq->ib_cq.cqe = entries;
cq->cq_period = cq->qplib_cq.period;
- rdev->nq.budget++;
+ nq->budget++;
atomic_inc(&rdev->cq_count);
@@ -2921,6 +2886,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
sq->send_phantom = false;
}
}
+ if (ncqe < budget)
+ ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
+ cqe + ncqe,
+ budget - ncqe);
if (!ncqe)
break;
@@ -3410,7 +3379,7 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
&rdev->qplib_res.dpi_tbl,
&uctx->dpi);
if (rc)
- dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
+ dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
/* Don't fail, continue*/
uctx->dpi.dbr = NULL;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index a0bb7e33d7ca..1df11ed272ea 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -141,9 +141,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev,
struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
-int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
- int port_modify_mask,
- struct ib_port_modify *port_modify);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index ceae2d92fb08..82d1cbc27aee 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -64,13 +64,14 @@
#include "ib_verbs.h"
#include <rdma/bnxt_re-abi.h>
#include "bnxt.h"
+#include "hw_counters.h"
+
static char version[] =
BNXT_RE_DESC " v" ROCE_DRV_MODULE_VERSION "\n";
MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(ROCE_DRV_MODULE_VERSION);
/* globals */
static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
@@ -162,7 +163,7 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
{
- int rc = 0, num_msix_want = BNXT_RE_MIN_MSIX, num_msix_got;
+ int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
struct bnxt_en_dev *en_dev;
if (!rdev)
@@ -170,6 +171,8 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
en_dev = rdev->en_dev;
+ num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
+
rtnl_lock();
num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
rdev->msix_entries,
@@ -474,7 +477,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->modify_device = bnxt_re_modify_device;
ibdev->query_port = bnxt_re_query_port;
- ibdev->modify_port = bnxt_re_modify_port;
ibdev->get_port_immutable = bnxt_re_get_port_immutable;
ibdev->query_pkey = bnxt_re_query_pkey;
ibdev->query_gid = bnxt_re_query_gid;
@@ -513,6 +515,8 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
ibdev->dealloc_ucontext = bnxt_re_dealloc_ucontext;
ibdev->mmap = bnxt_re_mmap;
+ ibdev->get_hw_stats = bnxt_re_ib_get_hw_stats;
+ ibdev->alloc_hw_stats = bnxt_re_ib_alloc_hw_stats;
return ib_register_device(ibdev, NULL);
}
@@ -653,8 +657,12 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{
- if (rdev->nq.hwq.max_elements)
- bnxt_qplib_disable_nq(&rdev->nq);
+ int i;
+
+ if (rdev->nq[0].hwq.max_elements) {
+ for (i = 1; i < rdev->num_msix; i++)
+ bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
+ }
if (rdev->qplib_res.rcfw)
bnxt_qplib_cleanup_res(&rdev->qplib_res);
@@ -662,31 +670,41 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
{
- int rc = 0;
+ int rc = 0, i;
bnxt_qplib_init_res(&rdev->qplib_res);
- if (rdev->msix_entries[BNXT_RE_NQ_IDX].vector <= 0)
- return -EINVAL;
+ for (i = 1; i < rdev->num_msix ; i++) {
+ rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
+ i - 1, rdev->msix_entries[i].vector,
+ rdev->msix_entries[i].db_offset,
+ &bnxt_re_cqn_handler, NULL);
- rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq,
- rdev->msix_entries[BNXT_RE_NQ_IDX].vector,
- rdev->msix_entries[BNXT_RE_NQ_IDX].db_offset,
- &bnxt_re_cqn_handler,
- NULL);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to enable NQ with rc = 0x%x", rc);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ return rc;
+}
- if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to enable NQ: %#x", rc);
+static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait)
+{
+ int i;
- return rc;
+ for (i = 0; i < rdev->num_msix - 1; i++) {
+ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait);
+ bnxt_qplib_free_nq(&rdev->nq[i]);
+ }
}
static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
{
- if (rdev->nq.hwq.max_elements) {
- bnxt_re_net_ring_free(rdev, rdev->nq.ring_id, lock_wait);
- bnxt_qplib_free_nq(&rdev->nq);
- }
+ bnxt_re_free_nq_res(rdev, lock_wait);
+
if (rdev->qplib_res.dpi_tbl.max) {
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
&rdev->qplib_res.dpi_tbl,
@@ -700,7 +718,7 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
- int rc = 0;
+ int rc = 0, i;
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
@@ -717,30 +735,42 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
&rdev->dpi_privileged,
rdev);
if (rc)
- goto fail;
+ goto dealloc_res;
- rdev->nq.hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
- BNXT_RE_MAX_SRQC_COUNT + 2;
- rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate NQ memory: %#x", rc);
- goto fail;
- }
- rc = bnxt_re_net_ring_alloc
- (rdev, rdev->nq.hwq.pbl[PBL_LVL_0].pg_map_arr,
- rdev->nq.hwq.pbl[rdev->nq.hwq.level].pg_count,
- HWRM_RING_ALLOC_CMPL, BNXT_QPLIB_NQE_MAX_CNT - 1,
- rdev->msix_entries[BNXT_RE_NQ_IDX].ring_idx,
- &rdev->nq.ring_id);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate NQ ring: %#x", rc);
- goto free_nq;
+ for (i = 0; i < rdev->num_msix - 1; i++) {
+ rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
+ BNXT_RE_MAX_SRQC_COUNT + 2;
+ rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
+ i, rc);
+ goto dealloc_dpi;
+ }
+ rc = bnxt_re_net_ring_alloc
+ (rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
+ rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count,
+ HWRM_RING_ALLOC_CMPL,
+ BNXT_QPLIB_NQE_MAX_CNT - 1,
+ rdev->msix_entries[i + 1].ring_idx,
+ &rdev->nq[i].ring_id);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to allocate NQ fw id with rc = 0x%x",
+ rc);
+ goto free_nq;
+ }
}
return 0;
free_nq:
- bnxt_qplib_free_nq(&rdev->nq);
+ for (i = 0; i < rdev->num_msix - 1; i++)
+ bnxt_qplib_free_nq(&rdev->nq[i]);
+dealloc_dpi:
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &rdev->qplib_res.dpi_tbl,
+ &rdev->dpi_privileged);
+dealloc_res:
+ bnxt_qplib_free_res(&rdev->qplib_res);
+
fail:
rdev->qplib_res.rcfw = NULL;
return rc;
@@ -835,6 +865,42 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
mutex_unlock(&rdev->qp_lock);
}
+static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_qplib_gid gid;
+ u16 gid_idx, index;
+ int rc = 0;
+
+ if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
+ return 0;
+
+ if (!sgid_tbl) {
+ dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
+ return -EINVAL;
+ }
+
+ for (index = 0; index < sgid_tbl->active; index++) {
+ gid_idx = sgid_tbl->hw_id[index];
+
+ if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero)))
+ continue;
+ /* need to modify the VLAN enable setting of non VLAN GID only
+ * as setting is done for VLAN GID while adding GID
+ */
+ if (sgid_tbl->vlan[index])
+ continue;
+
+ memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
+
+ rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
+ rdev->qplib_res.netdev->dev_addr);
+ }
+
+ return rc;
+}
+
static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
{
u32 prio_map = 0, tmp_map = 0;
@@ -854,8 +920,6 @@ static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
tmp_map = dcb_ieee_getapp_mask(netdev, &app);
prio_map |= tmp_map;
- if (!prio_map)
- prio_map = -EFAULT;
return prio_map;
}
@@ -881,10 +945,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
int rc;
/* Get priority for roce */
- rc = bnxt_re_get_priority_mask(rdev);
- if (rc < 0)
- return rc;
- prio_map = (u8)rc;
+ prio_map = bnxt_re_get_priority_mask(rdev);
if (prio_map == rdev->cur_prio_map)
return 0;
@@ -906,6 +967,16 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return rc;
}
+ /* Actual priorities are not programmed as they are already
+ * done by L2 driver; just enable or disable priority vlan tagging
+ */
+ if ((prio_map == 0 && rdev->qplib_res.prio) ||
+ (prio_map != 0 && !rdev->qplib_res.prio)) {
+ rdev->qplib_res.prio = prio_map ? true : false;
+
+ bnxt_re_update_gid(rdev);
+ }
+
return 0;
}
@@ -998,7 +1069,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs
*/
- rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw);
+ rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
+ BNXT_RE_MAX_QPC_COUNT);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 9af1514e5944..e8afc47f8949 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -51,6 +51,168 @@
#include "qplib_fp.h"
static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
+static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+
+static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
+{
+ qp->sq.condition = false;
+ qp->sq.send_phantom = false;
+ qp->sq.single = false;
+}
+
+/* Flush list */
+static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_cq *scq, *rcq;
+
+ scq = qp->scq;
+ rcq = qp->rcq;
+
+ if (!qp->sq.flushed) {
+ dev_dbg(&scq->hwq.pdev->dev,
+ "QPLIB: FP: Adding to SQ Flush list = %p",
+ qp);
+ bnxt_qplib_cancel_phantom_processing(qp);
+ list_add_tail(&qp->sq_flush, &scq->sqf_head);
+ qp->sq.flushed = true;
+ }
+ if (!qp->srq) {
+ if (!qp->rq.flushed) {
+ dev_dbg(&rcq->hwq.pdev->dev,
+ "QPLIB: FP: Adding to RQ Flush list = %p",
+ qp);
+ list_add_tail(&qp->rq_flush, &rcq->rqf_head);
+ qp->rq.flushed = true;
+ }
+ }
+}
+
+void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
+{
+ spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
+ if (qp->scq == qp->rcq)
+ __acquire(&qp->rcq->hwq.lock);
+ else
+ spin_lock(&qp->rcq->hwq.lock);
+}
+
+void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
+{
+ if (qp->scq == qp->rcq)
+ __release(&qp->rcq->hwq.lock);
+ else
+ spin_unlock(&qp->rcq->hwq.lock);
+ spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
+}
+
+static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_cq *buddy_cq = NULL;
+
+ if (qp->scq == qp->rcq)
+ buddy_cq = NULL;
+ else if (qp->scq == cq)
+ buddy_cq = qp->rcq;
+ else
+ buddy_cq = qp->scq;
+ return buddy_cq;
+}
+
+static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq)
+ __acquires(&buddy_cq->hwq.lock)
+{
+ struct bnxt_qplib_cq *buddy_cq = NULL;
+
+ buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
+ if (!buddy_cq)
+ __acquire(&cq->hwq.lock);
+ else
+ spin_lock(&buddy_cq->hwq.lock);
+}
+
+static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq)
+ __releases(&buddy_cq->hwq.lock)
+{
+ struct bnxt_qplib_cq *buddy_cq = NULL;
+
+ buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
+ if (!buddy_cq)
+ __release(&cq->hwq.lock);
+ else
+ spin_unlock(&buddy_cq->hwq.lock);
+}
+
+void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ unsigned long flags;
+
+ bnxt_qplib_acquire_cq_locks(qp, &flags);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_release_cq_locks(qp, &flags);
+}
+
+static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_cq *scq, *rcq;
+
+ scq = qp->scq;
+ rcq = qp->rcq;
+
+ if (qp->sq.flushed) {
+ qp->sq.flushed = false;
+ list_del(&qp->sq_flush);
+ }
+ if (!qp->srq) {
+ if (qp->rq.flushed) {
+ qp->rq.flushed = false;
+ list_del(&qp->rq_flush);
+ }
+ }
+}
+
+void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ unsigned long flags;
+
+ bnxt_qplib_acquire_cq_locks(qp, &flags);
+ __clean_cq(qp->scq, (u64)(unsigned long)qp);
+ qp->sq.hwq.prod = 0;
+ qp->sq.hwq.cons = 0;
+ __clean_cq(qp->rcq, (u64)(unsigned long)qp);
+ qp->rq.hwq.prod = 0;
+ qp->rq.hwq.cons = 0;
+
+ __bnxt_qplib_del_flush_qp(qp);
+ bnxt_qplib_release_cq_locks(qp, &flags);
+}
+
+static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
+{
+ struct bnxt_qplib_nq_work *nq_work =
+ container_of(work, struct bnxt_qplib_nq_work, work);
+
+ struct bnxt_qplib_cq *cq = nq_work->cq;
+ struct bnxt_qplib_nq *nq = nq_work->nq;
+
+ if (cq && nq) {
+ spin_lock_bh(&cq->compl_lock);
+ if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
+ dev_dbg(&nq->pdev->dev,
+ "%s:Trigger cq = %p event nq = %p\n",
+ __func__, cq, nq);
+ nq->cqn_handler(nq, cq);
+ }
+ spin_unlock_bh(&cq->compl_lock);
+ }
+ kfree(nq_work);
+}
static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
@@ -119,6 +281,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
struct bnxt_qplib_hwq *hwq = &nq->hwq;
struct nq_base *nqe, **nq_ptr;
+ struct bnxt_qplib_cq *cq;
int num_cqne_processed = 0;
u32 sw_cons, raw_cons;
u16 type;
@@ -143,15 +306,17 @@ static void bnxt_qplib_service_nq(unsigned long data)
q_handle = le32_to_cpu(nqcne->cq_handle_low);
q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
<< 32;
- bnxt_qplib_arm_cq_enable((struct bnxt_qplib_cq *)
- ((unsigned long)q_handle));
- if (!nq->cqn_handler(nq, (struct bnxt_qplib_cq *)
- ((unsigned long)q_handle)))
+ cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
+ bnxt_qplib_arm_cq_enable(cq);
+ spin_lock_bh(&cq->compl_lock);
+ atomic_set(&cq->arm_state, 0);
+ if (!nq->cqn_handler(nq, (cq)))
num_cqne_processed++;
else
dev_warn(&nq->pdev->dev,
"QPLIB: cqn - type 0x%x not handled",
type);
+ spin_unlock_bh(&cq->compl_lock);
break;
}
case NQ_BASE_TYPE_DBQ_EVENT:
@@ -190,12 +355,17 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
{
+ if (nq->cqn_wq) {
+ destroy_workqueue(nq->cqn_wq);
+ nq->cqn_wq = NULL;
+ }
/* Make sure the HW is stopped! */
synchronize_irq(nq->vector);
tasklet_disable(&nq->worker);
tasklet_kill(&nq->worker);
if (nq->requested) {
+ irq_set_affinity_hint(nq->vector, NULL);
free_irq(nq->vector, nq);
nq->requested = false;
}
@@ -209,14 +379,14 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
}
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
- int msix_vector, int bar_reg_offset,
+ int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
void *, u8 event))
{
resource_size_t nq_base;
- int rc;
+ int rc = -1;
nq->pdev = pdev;
nq->vector = msix_vector;
@@ -227,14 +397,31 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
+ /* Have a task to schedule CQ notifiers in post send case */
+ nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
+ if (!nq->cqn_wq)
+ goto fail;
+
nq->requested = false;
- rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, "bnxt_qplib_nq", nq);
+ memset(nq->name, 0, 32);
+ sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
+ rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
if (rc) {
dev_err(&nq->pdev->dev,
"Failed to request IRQ for NQ: %#x", rc);
bnxt_qplib_disable_nq(nq);
goto fail;
}
+
+ cpumask_clear(&nq->mask);
+ cpumask_set_cpu(nq_idx, &nq->mask);
+ rc = irq_set_affinity_hint(nq->vector, &nq->mask);
+ if (rc) {
+ dev_warn(&nq->pdev->dev,
+ "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+ nq->vector, nq_idx);
+ }
+
nq->requested = true;
nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
nq->bar_reg_off = bar_reg_offset;
@@ -258,8 +445,10 @@ fail:
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
{
- if (nq->hwq.max_elements)
+ if (nq->hwq.max_elements) {
bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
+ nq->hwq.max_elements = 0;
+ }
}
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
@@ -401,8 +590,8 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
- sq->flush_in_progress = false;
- rq->flush_in_progress = false;
+ rcfw->qp_tbl[qp->id].qp_id = qp->id;
+ rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
return 0;
@@ -615,8 +804,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
- sq->flush_in_progress = false;
- rq->flush_in_progress = false;
+ INIT_LIST_HEAD(&qp->sq_flush);
+ INIT_LIST_HEAD(&qp->rq_flush);
+ rcfw->qp_tbl[qp->id].qp_id = qp->id;
+ rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
return 0;
@@ -963,13 +1154,19 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
u16 cmd_flags = 0;
int rc;
+ rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
+ rcfw->qp_tbl[qp->id].qp_handle = NULL;
+
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
req.qp_cid = cpu_to_le32(qp->id);
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
(void *)&resp, NULL, 0);
- if (rc)
+ if (rc) {
+ rcfw->qp_tbl[qp->id].qp_id = qp->id;
+ rcfw->qp_tbl[qp->id].qp_handle = qp;
return rc;
+ }
/* Must walk the associated CQs to nullified the QP ptr */
spin_lock_irqsave(&qp->scq->hwq.lock, flags);
@@ -1074,14 +1271,21 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swq *swq;
struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
struct sq_sge *hw_sge;
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ bool sch_handler = false;
u32 sw_prod;
u8 wqe_size16;
int i, rc = 0, data_len = 0, pkt_num = 0;
__le32 temp32;
if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
- rc = -EINVAL;
- goto done;
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ sch_handler = true;
+ dev_dbg(&sq->hwq.pdev->dev,
+ "%s Error QP. Scheduling for poll_cq\n",
+ __func__);
+ goto queue_err;
+ }
}
if (bnxt_qplib_queue_full(sq)) {
@@ -1301,12 +1505,35 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
SQ_PSN_SEARCH_NEXT_PSN_MASK));
}
-
+queue_err:
+ if (sch_handler) {
+ /* Store the ULP info in the software structures */
+ sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
+ swq = &sq->swq[sw_prod];
+ swq->wr_id = wqe->wr_id;
+ swq->type = wqe->type;
+ swq->flags = wqe->flags;
+ if (qp->sig_type)
+ swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
+ swq->start_psn = sq->psn & BTH_PSN_MASK;
+ }
sq->hwq.prod++;
-
qp->wqe_cnt++;
done:
+ if (sch_handler) {
+ nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
+ if (nq_work) {
+ nq_work->cq = qp->scq;
+ nq_work->nq = qp->scq->nq;
+ INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
+ queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
+ } else {
+ dev_err(&sq->hwq.pdev->dev,
+ "QPLIB: FP: Failed to allocate SQ nq_work!");
+ rc = -ENOMEM;
+ }
+ }
return rc;
}
@@ -1334,15 +1561,17 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_q *rq = &qp->rq;
struct rq_wqe *rqe, **rqe_ptr;
struct sq_sge *hw_sge;
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ bool sch_handler = false;
u32 sw_prod;
int i, rc = 0;
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
- dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: QP (0x%x) is in the 0x%x state",
- qp->id, qp->state);
- rc = -EINVAL;
- goto done;
+ sch_handler = true;
+ dev_dbg(&rq->hwq.pdev->dev,
+ "%s Error QP. Scheduling for poll_cq\n",
+ __func__);
+ goto queue_err;
}
if (bnxt_qplib_queue_full(rq)) {
dev_err(&rq->hwq.pdev->dev,
@@ -1378,7 +1607,27 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
/* Supply the rqe->wr_id index to the wr_id_tbl for now */
rqe->wr_id[0] = cpu_to_le32(sw_prod);
+queue_err:
+ if (sch_handler) {
+ /* Store the ULP info in the software structures */
+ sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
+ rq->swq[sw_prod].wr_id = wqe->wr_id;
+ }
+
rq->hwq.prod++;
+ if (sch_handler) {
+ nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
+ if (nq_work) {
+ nq_work->cq = qp->rcq;
+ nq_work->nq = qp->rcq->nq;
+ INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
+ queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
+ } else {
+ dev_err(&rq->hwq.pdev->dev,
+ "QPLIB: FP: Failed to allocate RQ nq_work!");
+ rc = -ENOMEM;
+ }
+ }
done:
return rc;
}
@@ -1471,6 +1720,9 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
init_waitqueue_head(&cq->waitq);
+ INIT_LIST_HEAD(&cq->sqf_head);
+ INIT_LIST_HEAD(&cq->rqf_head);
+ spin_lock_init(&cq->compl_lock);
bnxt_qplib_arm_cq_enable(cq);
return 0;
@@ -1513,9 +1765,13 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
while (*budget) {
sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
if (sw_cons == sw_prod) {
- sq->flush_in_progress = false;
break;
}
+ /* Skip the FENCE WQE completions */
+ if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
+ bnxt_qplib_cancel_phantom_processing(qp);
+ goto skip_compl;
+ }
memset(cqe, 0, sizeof(*cqe));
cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
@@ -1525,6 +1781,7 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
cqe->type = sq->swq[sw_cons].type;
cqe++;
(*budget)--;
+skip_compl:
sq->hwq.cons++;
}
*pcqe = cqe;
@@ -1536,11 +1793,24 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
}
static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
- int opcode, struct bnxt_qplib_cqe **pcqe, int *budget)
+ struct bnxt_qplib_cqe **pcqe, int *budget)
{
struct bnxt_qplib_cqe *cqe;
u32 sw_prod, sw_cons;
int rc = 0;
+ int opcode = 0;
+
+ switch (qp->type) {
+ case CMDQ_CREATE_QP1_TYPE_GSI:
+ opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
+ break;
+ case CMDQ_CREATE_QP_TYPE_RC:
+ opcode = CQ_BASE_CQE_TYPE_RES_RC;
+ break;
+ case CMDQ_CREATE_QP_TYPE_UD:
+ opcode = CQ_BASE_CQE_TYPE_RES_UD;
+ break;
+ }
/* Flush the rest of the RQ */
sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
@@ -1567,6 +1837,21 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
return rc;
}
+void bnxt_qplib_mark_qp_error(void *qp_handle)
+{
+ struct bnxt_qplib_qp *qp = qp_handle;
+
+ if (!qp)
+ return;
+
+ /* Must block new posting of SQ and RQ */
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ bnxt_qplib_cancel_phantom_processing(qp);
+
+ /* Add qp to flush list of the CQ */
+ __bnxt_qplib_add_flush_qp(qp);
+}
+
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
* CQE is track from sw_cq_cons to max_element but valid only if VALID=1
*/
@@ -1694,10 +1979,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
cqe_sq_cons, sq->hwq.max_elements);
return -EINVAL;
}
- /* If we were in the middle of flushing the SQ, continue */
- if (sq->flush_in_progress)
- goto flush;
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_sq_cons
@@ -1733,11 +2020,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
- sq->flush_in_progress = true;
- /* Must block new posting of SQ and RQ */
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- sq->condition = false;
- sq->single = false;
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ bnxt_qplib_mark_qp_error(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
} else {
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
/* Before we complete, do WA 9060 */
@@ -1768,15 +2053,6 @@ out:
* the WC for this CQE
*/
sq->single = false;
- if (!sq->flush_in_progress)
- goto done;
-flush:
- /* Require to walk the sq's swq to fabricate CQEs for all
- * previously posted SWQEs due to the error CQE received
- */
- rc = __flush_sq(sq, qp, pcqe, budget);
- if (!rc)
- sq->flush_in_progress = false;
done:
return rc;
}
@@ -1798,6 +2074,12 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
return -EINVAL;
}
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
+
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe->length = le32_to_cpu(hwcqe->length);
@@ -1817,8 +2099,6 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
- if (rq->flush_in_progress)
- goto flush_rq;
cqe->wr_id = rq->swq[wr_id_idx].wr_id;
cqe++;
@@ -1827,12 +2107,13 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- rq->flush_in_progress = true;
-flush_rq:
- rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_RC, pcqe, budget);
- if (!rc)
- rq->flush_in_progress = false;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
}
+
+done:
return rc;
}
@@ -1853,6 +2134,11 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
return -EINVAL;
}
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe->length = le32_to_cpu(hwcqe->length);
@@ -1876,8 +2162,6 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
- if (rq->flush_in_progress)
- goto flush_rq;
cqe->wr_id = rq->swq[wr_id_idx].wr_id;
cqe++;
@@ -1886,12 +2170,12 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- rq->flush_in_progress = true;
-flush_rq:
- rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_UD, pcqe, budget);
- if (!rc)
- rq->flush_in_progress = false;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
}
+done:
return rc;
}
@@ -1932,6 +2216,11 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
"QPLIB: process_cq Raw/QP1 qp is NULL");
return -EINVAL;
}
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe->flags = le16_to_cpu(hwcqe->flags);
@@ -1960,8 +2249,6 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
- if (rq->flush_in_progress)
- goto flush_rq;
cqe->wr_id = rq->swq[wr_id_idx].wr_id;
cqe++;
@@ -1970,13 +2257,13 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- rq->flush_in_progress = true;
-flush_rq:
- rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_RAWETH_QP1, pcqe,
- budget);
- if (!rc)
- rq->flush_in_progress = false;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
}
+
+done:
return rc;
}
@@ -1990,7 +2277,6 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe *cqe;
u32 sw_cons = 0, cqe_cons;
int rc = 0;
- u8 opcode = 0;
/* Check the Status */
if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
@@ -2005,6 +2291,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
"QPLIB: FP: CQ Process terminal qp is NULL");
return -EINVAL;
}
+
/* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
@@ -2023,9 +2310,12 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe_cons, sq->hwq.max_elements);
goto do_rq;
}
- /* If we were in the middle of flushing, continue */
- if (sq->flush_in_progress)
- goto flush_sq;
+
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto sq_done;
+ }
/* Terminal CQE can also include aggregated successful CQEs prior.
* So we must complete all CQEs from the current sq's cons to the
@@ -2055,11 +2345,6 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
rc = -EAGAIN;
goto sq_done;
}
- sq->flush_in_progress = true;
-flush_sq:
- rc = __flush_sq(sq, qp, pcqe, budget);
- if (!rc)
- sq->flush_in_progress = false;
sq_done:
if (rc)
return rc;
@@ -2075,26 +2360,23 @@ do_rq:
cqe_cons, rq->hwq.max_elements);
goto done;
}
+
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ rc = 0;
+ goto done;
+ }
+
/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
* from the current rq->cons to the rq->prod regardless what the
* rq->cons the terminal CQE indicates
*/
- rq->flush_in_progress = true;
- switch (qp->type) {
- case CMDQ_CREATE_QP1_TYPE_GSI:
- opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
- break;
- case CMDQ_CREATE_QP_TYPE_RC:
- opcode = CQ_BASE_CQE_TYPE_RES_RC;
- break;
- case CMDQ_CREATE_QP_TYPE_UD:
- opcode = CQ_BASE_CQE_TYPE_RES_UD;
- break;
- }
- rc = __flush_rq(rq, qp, opcode, pcqe, budget);
- if (!rc)
- rq->flush_in_progress = false;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
done:
return rc;
}
@@ -2115,6 +2397,33 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
return 0;
}
+int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes)
+{
+ struct bnxt_qplib_qp *qp = NULL;
+ u32 budget = num_cqes;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->hwq.lock, flags);
+ list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: Flushing SQ QP= %p",
+ qp);
+ __flush_sq(&qp->sq, qp, &cqe, &budget);
+ }
+
+ list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: Flushing RQ QP= %p",
+ qp);
+ __flush_rq(&qp->rq, qp, &cqe, &budget);
+ }
+ spin_unlock_irqrestore(&cq->hwq.lock, flags);
+
+ return num_cqes - budget;
+}
+
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
@@ -2205,6 +2514,7 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
spin_lock_irqsave(&cq->hwq.lock, flags);
if (arm_type)
bnxt_qplib_arm_cq(cq, arm_type);
-
+ /* Using cq->arm_state variable to track whether to issue cq handler */
+ atomic_set(&cq->arm_state, 1);
spin_unlock_irqrestore(&cq->hwq.lock, flags);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 19176e06c98a..8ead70ca1c1d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -220,19 +220,20 @@ struct bnxt_qplib_q {
u16 q_full_delta;
u16 max_sge;
u32 psn;
- bool flush_in_progress;
bool condition;
bool single;
bool send_phantom;
u32 phantom_wqe_cnt;
u32 phantom_cqe_cnt;
u32 next_cq_cons;
+ bool flushed;
};
struct bnxt_qplib_qp {
struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi;
u64 qp_handle;
+#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
u32 id;
u8 type;
u8 sig_type;
@@ -296,6 +297,8 @@ struct bnxt_qplib_qp {
dma_addr_t sq_hdr_buf_map;
void *rq_hdr_buf;
dma_addr_t rq_hdr_buf_map;
+ struct list_head sq_flush;
+ struct list_head rq_flush;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
@@ -351,6 +354,7 @@ struct bnxt_qplib_cq {
u16 period;
struct bnxt_qplib_hwq hwq;
u32 cnq_hw_ring_id;
+ struct bnxt_qplib_nq *nq;
bool resize_in_progress;
struct scatterlist *sghead;
u32 nmap;
@@ -360,6 +364,9 @@ struct bnxt_qplib_cq {
unsigned long flags;
#define CQ_FLAGS_RESIZE_IN_PROG 1
wait_queue_head_t waitq;
+ struct list_head sqf_head, rqf_head;
+ atomic_t arm_state;
+ spinlock_t compl_lock; /* synch CQ handlers */
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
@@ -400,6 +407,7 @@ struct bnxt_qplib_nq {
struct pci_dev *pdev;
int vector;
+ cpumask_t mask;
int budget;
bool requested;
struct tasklet_struct worker;
@@ -417,11 +425,19 @@ struct bnxt_qplib_nq {
(struct bnxt_qplib_nq *nq,
void *srq,
u8 event);
+ struct workqueue_struct *cqn_wq;
+ char name[32];
+};
+
+struct bnxt_qplib_nq_work {
+ struct work_struct work;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_qplib_cq *cq;
};
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
- int msix_vector, int bar_reg_offset,
+ int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
@@ -453,4 +469,13 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
+void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags);
+void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags);
+int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes);
#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 16e42754dbec..391bb7006e8f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -44,6 +44,9 @@
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_rcfw.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+
static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */
@@ -279,16 +282,29 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event)
{
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
+ struct creq_qp_error_notification *err_event;
struct bnxt_qplib_crsq *crsqe;
unsigned long flags;
+ struct bnxt_qplib_qp *qp;
u16 cbit, blocked = 0;
u16 cookie;
__le16 mcookie;
+ u32 qp_id;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
+ err_event = (struct creq_qp_error_notification *)qp_event;
+ qp_id = le32_to_cpu(err_event->xid);
+ qp = rcfw->qp_tbl[qp_id].qp_handle;
dev_dbg(&rcfw->pdev->dev,
"QPLIB: Received QP error notification");
+ dev_dbg(&rcfw->pdev->dev,
+ "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
+ qp_id, err_event->req_err_state_reason,
+ err_event->res_err_state_reason);
+ bnxt_qplib_acquire_cq_locks(qp, &flags);
+ bnxt_qplib_mark_qp_error(qp);
+ bnxt_qplib_release_cq_locks(qp, &flags);
break;
default:
/* Command Response */
@@ -507,6 +523,7 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
+ kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
@@ -514,7 +531,8 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
}
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
- struct bnxt_qplib_rcfw *rcfw)
+ struct bnxt_qplib_rcfw *rcfw,
+ int qp_tbl_sz)
{
rcfw->pdev = pdev;
rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
@@ -541,6 +559,12 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
if (!rcfw->crsqe_tbl)
goto fail;
+ rcfw->qp_tbl_size = qp_tbl_sz;
+ rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
+ GFP_KERNEL);
+ if (!rcfw->qp_tbl)
+ goto fail;
+
return 0;
fail:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 09ce121770cd..0ed312f17c8d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -148,6 +148,11 @@ struct bnxt_qplib_rcfw_sbuf {
u32 size;
};
+struct bnxt_qplib_qp_node {
+ u32 qp_id; /* QP id */
+ void *qp_handle; /* ptr to qplib_qp */
+};
+
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
@@ -181,11 +186,13 @@ struct bnxt_qplib_rcfw {
/* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq;
struct bnxt_qplib_crsq *crsqe_tbl;
+ int qp_tbl_size;
+ struct bnxt_qplib_qp_node *qp_tbl;
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
- struct bnxt_qplib_rcfw *rcfw);
+ struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
@@ -207,4 +214,5 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
+void bnxt_qplib_mark_qp_error(void *qp_handle);
#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 62447b3badec..4e101704e801 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -468,9 +468,11 @@ static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
kfree(sgid_tbl->tbl);
kfree(sgid_tbl->hw_id);
kfree(sgid_tbl->ctx);
+ kfree(sgid_tbl->vlan);
sgid_tbl->tbl = NULL;
sgid_tbl->hw_id = NULL;
sgid_tbl->ctx = NULL;
+ sgid_tbl->vlan = NULL;
sgid_tbl->max = 0;
sgid_tbl->active = 0;
}
@@ -491,8 +493,15 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
if (!sgid_tbl->ctx)
goto out_free2;
+ sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
+ if (!sgid_tbl->vlan)
+ goto out_free3;
+
sgid_tbl->max = max;
return 0;
+out_free3:
+ kfree(sgid_tbl->ctx);
+ sgid_tbl->ctx = NULL;
out_free2:
kfree(sgid_tbl->hw_id);
sgid_tbl->hw_id = NULL;
@@ -514,6 +523,7 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
}
memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+ memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
sgid_tbl->active = 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 2e4855509719..e87207526d2c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -116,6 +116,7 @@ struct bnxt_qplib_sgid_tbl {
u16 max;
u16 active;
void *ctx;
+ u8 *vlan;
};
struct bnxt_qplib_pkey_tbl {
@@ -188,6 +189,7 @@ struct bnxt_qplib_res {
struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_pkey_tbl pkey_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
+ bool prio;
};
#define to_bnxt_qplib(ptr, type, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index ef91ab786dd4..e277e54a05eb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -213,6 +213,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero));
+ sgid_tbl->vlan[index] = 0;
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
"QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x",
@@ -265,28 +266,32 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct cmdq_add_gid req;
struct creq_add_gid_resp resp;
u16 cmd_flags = 0;
- u32 temp32[4];
- u16 temp16[3];
int rc;
RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
- memcpy(temp32, gid->data, sizeof(struct bnxt_qplib_gid));
- req.gid[0] = cpu_to_be32(temp32[3]);
- req.gid[1] = cpu_to_be32(temp32[2]);
- req.gid[2] = cpu_to_be32(temp32[1]);
- req.gid[3] = cpu_to_be32(temp32[0]);
- if (vlan_id != 0xFFFF)
- req.vlan = cpu_to_le16((vlan_id &
- CMDQ_ADD_GID_VLAN_VLAN_ID_MASK) |
- CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
- CMDQ_ADD_GID_VLAN_VLAN_EN);
+ req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
+ req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
+ req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
+ req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
+ /*
+ * driver should ensure that all RoCE traffic is always VLAN
+ * tagged if RoCE traffic is running on non-zero VLAN ID or
+ * RoCE traffic is running on non-zero Priority.
+ */
+ if ((vlan_id != 0xFFFF) || res->prio) {
+ if (vlan_id != 0xFFFF)
+ req.vlan = cpu_to_le16
+ (vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK);
+ req.vlan |= cpu_to_le16
+ (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
+ CMDQ_ADD_GID_VLAN_VLAN_EN);
+ }
/* MAC in network format */
- memcpy(temp16, smac, 6);
- req.src_mac[0] = cpu_to_be16(temp16[0]);
- req.src_mac[1] = cpu_to_be16(temp16[1]);
- req.src_mac[2] = cpu_to_be16(temp16[2]);
+ req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
+ req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
+ req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
(void *)&resp, NULL, 0);
@@ -297,6 +302,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
/* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
sgid_tbl->active++;
+ if (vlan_id != 0xFFFF)
+ sgid_tbl->vlan[free_idx] = 1;
+
dev_dbg(&res->pdev->dev,
"QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x",
free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
@@ -306,6 +314,43 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return 0;
}
+int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 gid_idx,
+ u8 *smac)
+{
+ struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
+ struct bnxt_qplib_res,
+ sgid_tbl);
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_modify_gid_resp resp;
+ struct cmdq_modify_gid req;
+ int rc;
+ u16 cmd_flags = 0;
+
+ RCFW_CMD_PREP(req, MODIFY_GID, cmd_flags);
+
+ req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
+ req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
+ req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
+ req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
+ if (res->prio) {
+ req.vlan |= cpu_to_le16
+ (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
+ CMDQ_ADD_GID_VLAN_VLAN_EN);
+ }
+
+ /* MAC in network format */
+ req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
+ req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
+ req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
+
+ req.gid_index = cpu_to_le16(gid_idx);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ return rc;
+}
+
/* pkeys */
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 2ce7e2a32cf0..11322582f5e4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -135,6 +135,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
bool update, u32 *index);
+int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 gid_idx, u8 *smac);
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
u16 *pkey);
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index fc23477ac52f..eeb55b2db57e 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -1473,8 +1473,8 @@ struct cmdq_modify_gid {
u8 resp_size;
u8 reserved8;
__le64 resp_addr;
- __le32 gid[4];
- __le16 src_mac[3];
+ __be32 gid[4];
+ __be16 src_mac[3];
__le16 vlan;
#define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL
#define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0