diff options
author | Kamal Heib | 2019-04-03 13:33:41 +0200 |
---|---|---|
committer | Marcel Apfelbaum | 2019-05-04 14:55:56 +0200 |
commit | cdc84058bc6412179b22ce61f29db104cfcba9fc (patch) | |
tree | f9e04ea84921b9e5323a6262c5ce580af321ce89 /hw/rdma | |
parent | hw/rdma: Add SRQ support to backend layer (diff) | |
download | qemu-cdc84058bc6412179b22ce61f29db104cfcba9fc.tar.gz qemu-cdc84058bc6412179b22ce61f29db104cfcba9fc.tar.xz qemu-cdc84058bc6412179b22ce61f29db104cfcba9fc.zip |
hw/rdma: Add support for managing SRQ resource
Adding the required functions and definitions for support managing the
shared receive queues (SRQs).
Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
Message-Id: <20190403113343.26384-3-kamalheib1@gmail.com>
Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
Diffstat (limited to 'hw/rdma')
-rw-r--r-- | hw/rdma/rdma_rm.c | 93 | ||||
-rw-r--r-- | hw/rdma/rdma_rm.h | 10 | ||||
-rw-r--r-- | hw/rdma/rdma_rm_defs.h | 8 |
3 files changed, 111 insertions, 0 deletions
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c index b683506b86..c4fb140dcd 100644 --- a/hw/rdma/rdma_rm.c +++ b/hw/rdma/rdma_rm.c @@ -544,6 +544,96 @@ void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle) rdma_res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); } +RdmaRmSRQ *rdma_rm_get_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle) +{ + return rdma_res_tbl_get(&dev_res->srq_tbl, srq_handle); +} + +int rdma_rm_alloc_srq(RdmaDeviceResources *dev_res, uint32_t pd_handle, + uint32_t max_wr, uint32_t max_sge, uint32_t srq_limit, + uint32_t *srq_handle, void *opaque) +{ + RdmaRmSRQ *srq; + RdmaRmPD *pd; + int rc; + + pd = rdma_rm_get_pd(dev_res, pd_handle); + if (!pd) { + return -EINVAL; + } + + srq = rdma_res_tbl_alloc(&dev_res->srq_tbl, srq_handle); + if (!srq) { + return -ENOMEM; + } + + rc = rdma_backend_create_srq(&srq->backend_srq, &pd->backend_pd, + max_wr, max_sge, srq_limit); + if (rc) { + rc = -EIO; + goto out_dealloc_srq; + } + + srq->opaque = opaque; + + return 0; + +out_dealloc_srq: + rdma_res_tbl_dealloc(&dev_res->srq_tbl, *srq_handle); + + return rc; +} + +int rdma_rm_query_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle, + struct ibv_srq_attr *srq_attr) +{ + RdmaRmSRQ *srq; + + srq = rdma_rm_get_srq(dev_res, srq_handle); + if (!srq) { + return -EINVAL; + } + + return rdma_backend_query_srq(&srq->backend_srq, srq_attr); +} + +int rdma_rm_modify_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle, + struct ibv_srq_attr *srq_attr, int srq_attr_mask) +{ + RdmaRmSRQ *srq; + + srq = rdma_rm_get_srq(dev_res, srq_handle); + if (!srq) { + return -EINVAL; + } + + if ((srq_attr_mask & IBV_SRQ_LIMIT) && + (srq_attr->srq_limit == 0)) { + return -EINVAL; + } + + if ((srq_attr_mask & IBV_SRQ_MAX_WR) && + (srq_attr->max_wr == 0)) { + return -EINVAL; + } + + return rdma_backend_modify_srq(&srq->backend_srq, srq_attr, + srq_attr_mask); +} + +void rdma_rm_dealloc_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle) +{ + RdmaRmSRQ *srq; + + srq = rdma_rm_get_srq(dev_res, srq_handle); + if (!srq) { + return; + } + + rdma_backend_destroy_srq(&srq->backend_srq, dev_res); + rdma_res_tbl_dealloc(&dev_res->srq_tbl, srq_handle); +} + void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id) { void **cqe_ctx; @@ -673,6 +763,8 @@ int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr) res_tbl_init("CQE_CTX", &dev_res->cqe_ctx_tbl, dev_attr->max_qp * dev_attr->max_qp_wr, sizeof(void *)); res_tbl_init("UC", &dev_res->uc_tbl, MAX_UCS, sizeof(RdmaRmUC)); + res_tbl_init("SRQ", &dev_res->srq_tbl, dev_attr->max_srq, + sizeof(RdmaRmSRQ)); init_ports(dev_res); @@ -691,6 +783,7 @@ void rdma_rm_fini(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, fini_ports(dev_res, backend_dev, ifname); + res_tbl_free(&dev_res->srq_tbl); res_tbl_free(&dev_res->uc_tbl); res_tbl_free(&dev_res->cqe_ctx_tbl); res_tbl_free(&dev_res->qp_tbl); diff --git a/hw/rdma/rdma_rm.h b/hw/rdma/rdma_rm.h index 4f03f9b8c5..e88ab95e26 100644 --- a/hw/rdma/rdma_rm.h +++ b/hw/rdma/rdma_rm.h @@ -65,6 +65,16 @@ int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, int attr_mask, struct ibv_qp_init_attr *init_attr); void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle); +RdmaRmSRQ *rdma_rm_get_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle); +int rdma_rm_alloc_srq(RdmaDeviceResources *dev_res, uint32_t pd_handle, + uint32_t max_wr, uint32_t max_sge, uint32_t srq_limit, + uint32_t *srq_handle, void *opaque); +int rdma_rm_query_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle, + struct ibv_srq_attr *srq_attr); +int rdma_rm_modify_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle, + struct ibv_srq_attr *srq_attr, int srq_attr_mask); +void rdma_rm_dealloc_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle); + int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id, void *ctx); void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id); diff --git a/hw/rdma/rdma_rm_defs.h b/hw/rdma/rdma_rm_defs.h index e774af5280..7bdd9f291f 100644 --- a/hw/rdma/rdma_rm_defs.h +++ b/hw/rdma/rdma_rm_defs.h @@ -33,6 +33,7 @@ #define MAX_QP_RD_ATOM 16 #define MAX_QP_INIT_RD_ATOM 16 #define MAX_AH 64 +#define MAX_SRQ 512 #define MAX_RM_TBL_NAME 16 #define MAX_CONSEQ_EMPTY_POLL_CQ 4096 /* considered as error above this */ @@ -89,6 +90,12 @@ typedef struct RdmaRmQP { enum ibv_qp_state qp_state; } RdmaRmQP; +typedef struct RdmaRmSRQ { + RdmaBackendSRQ backend_srq; + uint32_t recv_cq_handle; + void *opaque; +} RdmaRmSRQ; + typedef struct RdmaRmGid { union ibv_gid gid; int backend_gid_index; @@ -129,6 +136,7 @@ struct RdmaDeviceResources { RdmaRmResTbl qp_tbl; RdmaRmResTbl cq_tbl; RdmaRmResTbl cqe_ctx_tbl; + RdmaRmResTbl srq_tbl; GHashTable *qp_hash; /* Keeps mapping between real and emulated */ QemuMutex lock; RdmaRmStats stats; |