summaryrefslogtreecommitdiffstats
path: root/hw/rdma/rdma_backend.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/rdma/rdma_backend.c')
-rw-r--r--hw/rdma/rdma_backend.c559
1 files changed, 470 insertions, 89 deletions
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
index d7a4bbd91f..fd571f21e5 100644
--- a/hw/rdma/rdma_backend.c
+++ b/hw/rdma/rdma_backend.c
@@ -15,41 +15,57 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
+#include "sysemu/sysemu.h"
#include "qapi/error.h"
+#include "qapi/qmp/qlist.h"
+#include "qapi/qmp/qnum.h"
+#include "qapi/qapi-events-rdma.h"
#include <infiniband/verbs.h>
+#include <infiniband/umad_types.h>
+#include <infiniband/umad.h>
+#include <rdma/rdma_user_cm.h>
+#include "contrib/rdmacm-mux/rdmacm-mux.h"
#include "trace.h"
#include "rdma_utils.h"
#include "rdma_rm.h"
#include "rdma_backend.h"
-/* Vendor Errors */
-#define VENDOR_ERR_FAIL_BACKEND 0x201
-#define VENDOR_ERR_TOO_MANY_SGES 0x202
-#define VENDOR_ERR_NOMEM 0x203
-#define VENDOR_ERR_QP0 0x204
-#define VENDOR_ERR_NO_SGE 0x205
-#define VENDOR_ERR_MAD_SEND 0x206
-#define VENDOR_ERR_INVLKEY 0x207
-#define VENDOR_ERR_MR_SMALL 0x208
-
#define THR_NAME_LEN 16
#define THR_POLL_TO 5000
+#define MAD_HDR_SIZE sizeof(struct ibv_grh)
+
typedef struct BackendCtx {
- uint64_t req_id;
void *up_ctx;
bool is_tx_req;
+ struct ibv_sge sge; /* Used to save MAD recv buffer */
} BackendCtx;
-static void (*comp_handler)(int status, unsigned int vendor_err, void *ctx);
+struct backend_umad {
+ struct ib_user_mad hdr;
+ char mad[RDMA_MAX_PRIVATE_DATA];
+};
+
+static void (*comp_handler)(void *ctx, struct ibv_wc *wc);
-static void dummy_comp_handler(int status, unsigned int vendor_err, void *ctx)
+static void dummy_comp_handler(void *ctx, struct ibv_wc *wc)
{
pr_err("No completion handler is registered\n");
}
+static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err,
+ void *ctx)
+{
+ struct ibv_wc wc = {0};
+
+ wc.status = status;
+ wc.vendor_err = vendor_err;
+
+ comp_handler(ctx, &wc);
+}
+
static void poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
{
int i, ne;
@@ -74,7 +90,7 @@ static void poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
}
pr_dbg("Processing %s CQE\n", bctx->is_tx_req ? "send" : "recv");
- comp_handler(wc[i].status, wc[i].vendor_err, bctx->up_ctx);
+ comp_handler(bctx->up_ctx, &wc[i]);
rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
g_free(bctx);
@@ -146,6 +162,77 @@ static void *comp_handler_thread(void *arg)
return NULL;
}
+static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
+{
+ atomic_set(&backend_dev->rdmacm_mux.can_receive, 0);
+}
+
+static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
+{
+ atomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg));
+}
+
+static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev)
+{
+ return atomic_read(&backend_dev->rdmacm_mux.can_receive);
+}
+
+static int check_mux_op_status(CharBackend *mad_chr_be)
+{
+ RdmaCmMuxMsg msg = {};
+ int ret;
+
+ pr_dbg("Reading response\n");
+ ret = qemu_chr_fe_read_all(mad_chr_be, (uint8_t *)&msg, sizeof(msg));
+ if (ret != sizeof(msg)) {
+ pr_dbg("Invalid message size %d, expecting %ld\n", ret, sizeof(msg));
+ return -EIO;
+ }
+
+ pr_dbg("msg_type=%d\n", msg.hdr.msg_type);
+ pr_dbg("op_code=%d\n", msg.hdr.op_code);
+ pr_dbg("err_code=%d\n", msg.hdr.err_code);
+
+ if (msg.hdr.msg_type != RDMACM_MUX_MSG_TYPE_RESP) {
+ pr_dbg("Invalid message type %d\n", msg.hdr.msg_type);
+ return -EIO;
+ }
+
+ if (msg.hdr.err_code != RDMACM_MUX_ERR_CODE_OK) {
+ pr_dbg("Operation failed in mux, error code %d\n", msg.hdr.err_code);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int exec_rdmacm_mux_req(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg)
+{
+ int rc = 0;
+
+ pr_dbg("Executing request %d\n", msg->hdr.op_code);
+
+ msg->hdr.msg_type = RDMACM_MUX_MSG_TYPE_REQ;
+ disable_rdmacm_mux_async(backend_dev);
+ rc = qemu_chr_fe_write(backend_dev->rdmacm_mux.chr_be,
+ (const uint8_t *)msg, sizeof(*msg));
+ if (rc != sizeof(*msg)) {
+ enable_rdmacm_mux_async(backend_dev);
+ pr_dbg("Fail to send request to rdmacm_mux (rc=%d)\n", rc);
+ return -EIO;
+ }
+
+ rc = check_mux_op_status(backend_dev->rdmacm_mux.chr_be);
+ if (rc) {
+ pr_dbg("Fail to execute rdmacm_mux request %d (rc=%d)\n",
+ msg->hdr.op_code, rc);
+ }
+
+ enable_rdmacm_mux_async(backend_dev);
+
+ return 0;
+}
+
static void stop_backend_thread(RdmaBackendThread *thread)
{
thread->run = false;
@@ -168,8 +255,8 @@ static void start_comp_thread(RdmaBackendDev *backend_dev)
comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED);
}
-void rdma_backend_register_comp_handler(void (*handler)(int status,
- unsigned int vendor_err, void *ctx))
+void rdma_backend_register_comp_handler(void (*handler)(void *ctx,
+ struct ibv_wc *wc))
{
comp_handler = handler;
}
@@ -286,11 +373,73 @@ static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res,
return 0;
}
+static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx,
+ union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge)
+{
+ RdmaCmMuxMsg msg = {};
+ char *hdr, *data;
+ int ret;
+
+ pr_dbg("num_sge=%d\n", num_sge);
+
+ if (num_sge != 2) {
+ return -EINVAL;
+ }
+
+ msg.hdr.op_code = RDMACM_MUX_OP_CODE_MAD;
+ memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid));
+
+ msg.umad_len = sge[0].length + sge[1].length;
+ pr_dbg("umad_len=%d\n", msg.umad_len);
+
+ if (msg.umad_len > sizeof(msg.umad.mad)) {
+ return -ENOMEM;
+ }
+
+ msg.umad.hdr.addr.qpn = htobe32(1);
+ msg.umad.hdr.addr.grh_present = 1;
+ pr_dbg("sgid_idx=%d\n", sgid_idx);
+ pr_dbg("sgid=0x%llx\n", sgid->global.interface_id);
+ msg.umad.hdr.addr.gid_index = sgid_idx;
+ memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid));
+ msg.umad.hdr.addr.hop_limit = 0xFF;
+
+ hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length);
+ if (!hdr) {
+ pr_dbg("Fail to map to sge[0]\n");
+ return -ENOMEM;
+ }
+ data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length);
+ if (!data) {
+ pr_dbg("Fail to map to sge[1]\n");
+ rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);
+ return -ENOMEM;
+ }
+
+ pr_dbg_buf("mad_hdr", hdr, sge[0].length);
+ pr_dbg_buf("mad_data", data, sge[1].length);
+
+ memcpy(&msg.umad.mad[0], hdr, sge[0].length);
+ memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length);
+
+ rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length);
+ rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);
+
+ ret = exec_rdmacm_mux_req(backend_dev, &msg);
+ if (ret) {
+ pr_dbg("Fail to send MAD to rdma_umadmux (%d)\n", ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
void rdma_backend_post_send(RdmaBackendDev *backend_dev,
RdmaBackendQP *qp, uint8_t qp_type,
struct ibv_sge *sge, uint32_t num_sge,
- union ibv_gid *dgid, uint32_t dqpn,
- uint32_t dqkey, void *ctx)
+ uint8_t sgid_idx, union ibv_gid *sgid,
+ union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey,
+ void *ctx)
{
BackendCtx *bctx;
struct ibv_sge new_sge[MAX_SGE];
@@ -301,21 +450,20 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev,
if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */
if (qp_type == IBV_QPT_SMI) {
pr_dbg("QP0 unsupported\n");
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
} else if (qp_type == IBV_QPT_GSI) {
pr_dbg("QP1\n");
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx);
+ rc = mad_send(backend_dev, sgid_idx, sgid, sge, num_sge);
+ if (rc) {
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx);
+ } else {
+ complete_work(IBV_WC_SUCCESS, 0, ctx);
+ }
}
- pr_dbg("qp->ibqp is NULL for qp_type %d!!!\n", qp_type);
return;
}
pr_dbg("num_sge=%d\n", num_sge);
- if (!num_sge) {
- pr_dbg("num_sge=0\n");
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_NO_SGE, ctx);
- return;
- }
bctx = g_malloc0(sizeof(*bctx));
bctx->up_ctx = ctx;
@@ -324,20 +472,23 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev,
rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
if (unlikely(rc)) {
pr_dbg("Failed to allocate cqe_ctx\n");
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
goto out_free_bctx;
}
rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge);
if (rc) {
pr_dbg("Error: Failed to build host SGE array\n");
- comp_handler(IBV_WC_GENERAL_ERR, rc, ctx);
+ complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
goto out_dealloc_cqe_ctx;
}
if (qp_type == IBV_QPT_UD) {
- wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd,
- backend_dev->backend_gid_idx, dgid);
+ wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd, sgid_idx, dgid);
+ if (!wr.wr.ud.ah) {
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
+ goto out_dealloc_cqe_ctx;
+ }
wr.wr.ud.remote_qpn = dqpn;
wr.wr.ud.remote_qkey = dqkey;
}
@@ -353,7 +504,7 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev,
if (rc) {
pr_dbg("Fail (%d, %d) to post send WQE to qpn %d\n", rc, errno,
qp->ibqp->qp_num);
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
goto out_dealloc_cqe_ctx;
}
@@ -366,6 +517,48 @@ out_free_bctx:
g_free(bctx);
}
+static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
+ struct ibv_sge *sge, uint32_t num_sge,
+ void *ctx)
+{
+ BackendCtx *bctx;
+ int rc;
+ uint32_t bctx_id;
+
+ if (num_sge != 1) {
+ pr_dbg("Invalid num_sge (%d), expecting 1\n", num_sge);
+ return VENDOR_ERR_INV_NUM_SGE;
+ }
+
+ if (sge[0].length < RDMA_MAX_PRIVATE_DATA + sizeof(struct ibv_grh)) {
+ pr_dbg("Too small buffer for MAD\n");
+ return VENDOR_ERR_INV_MAD_BUFF;
+ }
+
+ pr_dbg("addr=0x%" PRIx64"\n", sge[0].addr);
+ pr_dbg("length=%d\n", sge[0].length);
+ pr_dbg("lkey=%d\n", sge[0].lkey);
+
+ bctx = g_malloc0(sizeof(*bctx));
+
+ rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
+ if (unlikely(rc)) {
+ g_free(bctx);
+ pr_dbg("Fail to allocate cqe_ctx\n");
+ return VENDOR_ERR_NOMEM;
+ }
+
+ pr_dbg("bctx_id %d, bctx %p, ctx %p\n", bctx_id, bctx, ctx);
+ bctx->up_ctx = ctx;
+ bctx->sge = *sge;
+
+ qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
+ qlist_append_int(backend_dev->recv_mads_list.list, bctx_id);
+ qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
+
+ return 0;
+}
+
void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
RdmaDeviceResources *rdma_dev_res,
RdmaBackendQP *qp, uint8_t qp_type,
@@ -380,21 +573,19 @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */
if (qp_type == IBV_QPT_SMI) {
pr_dbg("QP0 unsupported\n");
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
}
if (qp_type == IBV_QPT_GSI) {
pr_dbg("QP1\n");
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx);
+ rc = save_mad_recv_buffer(backend_dev, sge, num_sge, ctx);
+ if (rc) {
+ complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
+ }
}
return;
}
pr_dbg("num_sge=%d\n", num_sge);
- if (!num_sge) {
- pr_dbg("num_sge=0\n");
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_NO_SGE, ctx);
- return;
- }
bctx = g_malloc0(sizeof(*bctx));
bctx->up_ctx = ctx;
@@ -403,14 +594,14 @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
rc = rdma_rm_alloc_cqe_ctx(rdma_dev_res, &bctx_id, bctx);
if (unlikely(rc)) {
pr_dbg("Failed to allocate cqe_ctx\n");
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
goto out_free_bctx;
}
rc = build_host_sge_array(rdma_dev_res, new_sge, sge, num_sge);
if (rc) {
pr_dbg("Error: Failed to build host SGE array\n");
- comp_handler(IBV_WC_GENERAL_ERR, rc, ctx);
+ complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
goto out_dealloc_cqe_ctx;
}
@@ -422,7 +613,7 @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
if (rc) {
pr_dbg("Fail (%d, %d) to post recv WQE to qpn %d\n", rc, errno,
qp->ibqp->qp_num);
- comp_handler(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
goto out_dealloc_cqe_ctx;
}
@@ -513,7 +704,6 @@ int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
switch (qp_type) {
case IBV_QPT_GSI:
- pr_dbg("QP1 unsupported\n");
return 0;
case IBV_QPT_RC:
@@ -594,9 +784,9 @@ int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
}
int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
- uint8_t qp_type, union ibv_gid *dgid,
- uint32_t dqpn, uint32_t rq_psn, uint32_t qkey,
- bool use_qkey)
+ uint8_t qp_type, uint8_t sgid_idx,
+ union ibv_gid *dgid, uint32_t dqpn,
+ uint32_t rq_psn, uint32_t qkey, bool use_qkey)
{
struct ibv_qp_attr attr = {0};
union ibv_gid ibv_gid = {
@@ -608,13 +798,15 @@ int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
attr.qp_state = IBV_QPS_RTR;
attr_mask = IBV_QP_STATE;
+ qp->sgid_idx = sgid_idx;
+
switch (qp_type) {
case IBV_QPT_RC:
pr_dbg("dgid=0x%" PRIx64 ",%" PRIx64 "\n",
be64_to_cpu(ibv_gid.global.subnet_prefix),
be64_to_cpu(ibv_gid.global.interface_id));
pr_dbg("dqpn=0x%x\n", dqpn);
- pr_dbg("sgid_idx=%d\n", backend_dev->backend_gid_idx);
+ pr_dbg("sgid_idx=%d\n", qp->sgid_idx);
pr_dbg("sport_num=%d\n", backend_dev->port_num);
pr_dbg("rq_psn=0x%x\n", rq_psn);
@@ -626,7 +818,7 @@ int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
attr.ah_attr.is_global = 1;
attr.ah_attr.grh.hop_limit = 1;
attr.ah_attr.grh.dgid = ibv_gid;
- attr.ah_attr.grh.sgid_index = backend_dev->backend_gid_idx;
+ attr.ah_attr.grh.sgid_index = qp->sgid_idx;
attr.rq_psn = rq_psn;
attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN |
@@ -635,8 +827,8 @@ int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
break;
case IBV_QPT_UD:
+ pr_dbg("qkey=0x%x\n", qkey);
if (use_qkey) {
- pr_dbg("qkey=0x%x\n", qkey);
attr.qkey = qkey;
attr_mask |= IBV_QP_QKEY;
}
@@ -725,21 +917,231 @@ void rdma_backend_destroy_qp(RdmaBackendQP *qp)
static int init_device_caps(RdmaBackendDev *backend_dev,
struct ibv_device_attr *dev_attr)
{
- if (ibv_query_device(backend_dev->context, &backend_dev->dev_attr)) {
+ struct ibv_device_attr bk_dev_attr;
+
+ if (ibv_query_device(backend_dev->context, &bk_dev_attr)) {
+ return -EIO;
+ }
+
+ dev_attr->max_sge = MAX_SGE;
+
+ CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64);
+ CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_qp_wr, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_cqe, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d");
+ CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d");
+
+ return 0;
+}
+
+static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid,
+ union ibv_gid *my_gid, int paylen)
+{
+ grh->paylen = htons(paylen);
+ grh->sgid = *sgid;
+ grh->dgid = *my_gid;
+
+ pr_dbg("paylen=%d (net=0x%x)\n", paylen, grh->paylen);
+ pr_dbg("dgid=0x%llx\n", my_gid->global.interface_id);
+ pr_dbg("sgid=0x%llx\n", sgid->global.interface_id);
+}
+
+static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
+ RdmaCmMuxMsg *msg)
+{
+ QObject *o_ctx_id;
+ unsigned long cqe_ctx_id;
+ BackendCtx *bctx;
+ char *mad;
+
+ pr_dbg("umad_len=%d\n", msg->umad_len);
+
+#ifdef PVRDMA_DEBUG
+ struct umad_hdr *hdr = (struct umad_hdr *)&msg->umad.mad;
+ pr_dbg("bv %x cls %x cv %x mtd %x st %d tid %" PRIx64 " at %x atm %x\n",
+ hdr->base_version, hdr->mgmt_class, hdr->class_version,
+ hdr->method, hdr->status, be64toh(hdr->tid),
+ hdr->attr_id, hdr->attr_mod);
+#endif
+
+ qemu_mutex_lock(&backend_dev->recv_mads_list.lock);
+ o_ctx_id = qlist_pop(backend_dev->recv_mads_list.list);
+ qemu_mutex_unlock(&backend_dev->recv_mads_list.lock);
+ if (!o_ctx_id) {
+ pr_dbg("No more free MADs buffers, waiting for a while\n");
+ sleep(THR_POLL_TO);
+ return;
+ }
+
+ cqe_ctx_id = qnum_get_uint(qobject_to(QNum, o_ctx_id));
+ bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
+ if (unlikely(!bctx)) {
+ pr_dbg("Error: Fail to find ctx for %ld\n", cqe_ctx_id);
+ return;
+ }
+
+ pr_dbg("id %ld, bctx %p, ctx %p\n", cqe_ctx_id, bctx, bctx->up_ctx);
+
+ mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr,
+ bctx->sge.length);
+ if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) {
+ complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF,
+ bctx->up_ctx);
+ } else {
+ struct ibv_wc wc = {0};
+ pr_dbg_buf("mad", msg->umad.mad, msg->umad_len);
+ memset(mad, 0, bctx->sge.length);
+ build_mad_hdr((struct ibv_grh *)mad,
+ (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid,
+ msg->umad_len);
+ memcpy(&mad[MAD_HDR_SIZE], msg->umad.mad, msg->umad_len);
+ rdma_pci_dma_unmap(backend_dev->dev, mad, bctx->sge.length);
+
+ wc.byte_len = msg->umad_len;
+ wc.status = IBV_WC_SUCCESS;
+ wc.wc_flags = IBV_WC_GRH;
+ comp_handler(bctx->up_ctx, &wc);
+ }
+
+ g_free(bctx);
+ rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
+}
+
+static inline int rdmacm_mux_can_receive(void *opaque)
+{
+ RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque;
+
+ return rdmacm_mux_can_process_async(backend_dev);
+}
+
+static void rdmacm_mux_read(void *opaque, const uint8_t *buf, int size)
+{
+ RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque;
+ RdmaCmMuxMsg *msg = (RdmaCmMuxMsg *)buf;
+
+ pr_dbg("Got %d bytes\n", size);
+ pr_dbg("msg_type=%d\n", msg->hdr.msg_type);
+ pr_dbg("op_code=%d\n", msg->hdr.op_code);
+
+ if (msg->hdr.msg_type != RDMACM_MUX_MSG_TYPE_REQ &&
+ msg->hdr.op_code != RDMACM_MUX_OP_CODE_MAD) {
+ pr_dbg("Error: Not a MAD request, skipping\n");
+ return;
+ }
+ process_incoming_mad_req(backend_dev, msg);
+}
+
+static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
+{
+ int ret;
+
+ backend_dev->rdmacm_mux.chr_be = mad_chr_be;
+
+ ret = qemu_chr_fe_backend_connected(backend_dev->rdmacm_mux.chr_be);
+ if (!ret) {
+ pr_dbg("Missing chardev for MAD multiplexer\n");
+ return -EIO;
+ }
+
+ qemu_mutex_init(&backend_dev->recv_mads_list.lock);
+ backend_dev->recv_mads_list.list = qlist_new();
+
+ enable_rdmacm_mux_async(backend_dev);
+
+ qemu_chr_fe_set_handlers(backend_dev->rdmacm_mux.chr_be,
+ rdmacm_mux_can_receive, rdmacm_mux_read, NULL,
+ NULL, backend_dev, NULL, true);
+
+ return 0;
+}
+
+static void mad_fini(RdmaBackendDev *backend_dev)
+{
+ pr_dbg("Stopping MAD\n");
+ disable_rdmacm_mux_async(backend_dev);
+ qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
+ if (backend_dev->recv_mads_list.list) {
+ qlist_destroy_obj(QOBJECT(backend_dev->recv_mads_list.list));
+ qemu_mutex_destroy(&backend_dev->recv_mads_list.lock);
+ }
+}
+
+int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,
+ union ibv_gid *gid)
+{
+ union ibv_gid sgid;
+ int ret;
+ int i = 0;
+
+ pr_dbg("0x%llx, 0x%llx\n",
+ (long long unsigned int)be64_to_cpu(gid->global.subnet_prefix),
+ (long long unsigned int)be64_to_cpu(gid->global.interface_id));
+
+ do {
+ ret = ibv_query_gid(backend_dev->context, backend_dev->port_num, i,
+ &sgid);
+ i++;
+ } while (!ret && (memcmp(&sgid, gid, sizeof(*gid))));
+
+ pr_dbg("gid_index=%d\n", i - 1);
+
+ return ret ? ret : i - 1;
+}
+
+int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname,
+ union ibv_gid *gid)
+{
+ RdmaCmMuxMsg msg = {};
+ int ret;
+
+ pr_dbg("0x%llx, 0x%llx\n",
+ (long long unsigned int)be64_to_cpu(gid->global.subnet_prefix),
+ (long long unsigned int)be64_to_cpu(gid->global.interface_id));
+
+ msg.hdr.op_code = RDMACM_MUX_OP_CODE_REG;
+ memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid));
+
+ ret = exec_rdmacm_mux_req(backend_dev, &msg);
+ if (ret) {
+ pr_dbg("Fail to register GID to rdma_umadmux (%d)\n", ret);
return -EIO;
}
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_mr_size, "%" PRId64);
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_qp, "%d");
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_sge, "%d");
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_qp_wr, "%d");
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_cq, "%d");
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_cqe, "%d");
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_mr, "%d");
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_pd, "%d");
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_qp_rd_atom, "%d");
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_qp_init_rd_atom, "%d");
- CHK_ATTR(dev_attr, backend_dev->dev_attr, max_ah, "%d");
+ qapi_event_send_rdma_gid_status_changed(ifname, true,
+ gid->global.subnet_prefix,
+ gid->global.interface_id);
+
+ return ret;
+}
+
+int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname,
+ union ibv_gid *gid)
+{
+ RdmaCmMuxMsg msg = {};
+ int ret;
+
+ pr_dbg("0x%llx, 0x%llx\n",
+ (long long unsigned int)be64_to_cpu(gid->global.subnet_prefix),
+ (long long unsigned int)be64_to_cpu(gid->global.interface_id));
+
+ msg.hdr.op_code = RDMACM_MUX_OP_CODE_UNREG;
+ memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid));
+
+ ret = exec_rdmacm_mux_req(backend_dev, &msg);
+ if (ret) {
+ pr_dbg("Fail to unregister GID from rdma_umadmux (%d)\n", ret);
+ return -EIO;
+ }
+
+ qapi_event_send_rdma_gid_status_changed(ifname, false,
+ gid->global.subnet_prefix,
+ gid->global.interface_id);
return 0;
}
@@ -747,20 +1149,17 @@ static int init_device_caps(RdmaBackendDev *backend_dev,
int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev,
RdmaDeviceResources *rdma_dev_res,
const char *backend_device_name, uint8_t port_num,
- uint8_t backend_gid_idx, struct ibv_device_attr *dev_attr,
+ struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be,
Error **errp)
{
int i;
int ret = 0;
int num_ibv_devices;
struct ibv_device **dev_list;
- struct ibv_port_attr port_attr;
memset(backend_dev, 0, sizeof(*backend_dev));
backend_dev->dev = pdev;
-
- backend_dev->backend_gid_idx = backend_gid_idx;
backend_dev->port_num = port_num;
backend_dev->rdma_dev_res = rdma_dev_res;
@@ -797,9 +1196,9 @@ int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev,
backend_dev->ib_dev = *dev_list;
}
- pr_dbg("Using backend device %s, port %d, gid_idx %d\n",
- ibv_get_device_name(backend_dev->ib_dev),
- backend_dev->port_num, backend_dev->backend_gid_idx);
+ pr_dbg("Using backend device %s, port %d\n",
+ ibv_get_device_name(backend_dev->ib_dev), backend_dev->port_num);
+ pr_dbg("uverb device %s\n", backend_dev->ib_dev->dev_name);
backend_dev->context = ibv_open_device(backend_dev->ib_dev);
if (!backend_dev->context) {
@@ -816,20 +1215,6 @@ int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev,
}
pr_dbg("dev->backend_dev.channel=%p\n", backend_dev->channel);
- ret = ibv_query_port(backend_dev->context, backend_dev->port_num,
- &port_attr);
- if (ret) {
- error_setg(errp, "Error %d from ibv_query_port", ret);
- ret = -EIO;
- goto out_destroy_comm_channel;
- }
-
- if (backend_dev->backend_gid_idx >= port_attr.gid_tbl_len) {
- error_setg(errp, "Invalid backend_gid_idx, should be less than %d",
- port_attr.gid_tbl_len);
- goto out_destroy_comm_channel;
- }
-
ret = init_device_caps(backend_dev, dev_attr);
if (ret) {
error_setg(errp, "Failed to initialize device capabilities");
@@ -837,18 +1222,13 @@ int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev,
goto out_destroy_comm_channel;
}
- ret = ibv_query_gid(backend_dev->context, backend_dev->port_num,
- backend_dev->backend_gid_idx, &backend_dev->gid);
+
+ ret = mad_init(backend_dev, mad_chr_be);
if (ret) {
- error_setg(errp, "Failed to query gid %d",
- backend_dev->backend_gid_idx);
+ error_setg(errp, "Fail to initialize mad");
ret = -EIO;
goto out_destroy_comm_channel;
}
- pr_dbg("subnet_prefix=0x%" PRIx64 "\n",
- be64_to_cpu(backend_dev->gid.global.subnet_prefix));
- pr_dbg("interface_id=0x%" PRIx64 "\n",
- be64_to_cpu(backend_dev->gid.global.interface_id));
backend_dev->comp_thread.run = false;
backend_dev->comp_thread.is_running = false;
@@ -886,6 +1266,7 @@ void rdma_backend_stop(RdmaBackendDev *backend_dev)
void rdma_backend_fini(RdmaBackendDev *backend_dev)
{
rdma_backend_stop(backend_dev);
+ mad_fini(backend_dev);
g_hash_table_destroy(ah_hash);
ibv_destroy_comp_channel(backend_dev->channel);
ibv_close_device(backend_dev->context);