diff options
author | Chuck Lever | 2018-05-07 21:28:09 +0200 |
---|---|---|
committer | J. Bruce Fields | 2018-05-11 21:48:57 +0200 |
commit | 25fd86eca11c26bad2aede6dd4709ff58f89c7cb (patch) | |
tree | 924d70a458777408f56a2194a87a6bb14f825644 /include/linux/sunrpc/svc_rdma.h | |
parent | svcrdma: Introduce svc_rdma_send_ctxt (diff) | |
download | kernel-qcow2-linux-25fd86eca11c26bad2aede6dd4709ff58f89c7cb.tar.gz kernel-qcow2-linux-25fd86eca11c26bad2aede6dd4709ff58f89c7cb.tar.xz kernel-qcow2-linux-25fd86eca11c26bad2aede6dd4709ff58f89c7cb.zip |
svcrdma: Don't overrun the SGE array in svc_rdma_send_ctxt
Receive buffers are always the same size, but each Send WR has a
variable number of SGEs, based on the contents of the xdr_buf being
sent.
While assembling a Send WR, keep track of the number of SGEs so that
we don't exceed the device's maximum, or walk off the end of the
Send SGE array.
For now the Send path just fails if it exceeds the maximum.
The current logic in svc_rdma_accept bases the maximum number of
Send SGEs on the largest NFS request that can be sent or received.
In the transport layer, the limit is actually based on the
capabilities of the underlying device, not on properties of the
Upper Layer Protocol.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'include/linux/sunrpc/svc_rdma.h')
-rw-r--r-- | include/linux/sunrpc/svc_rdma.h | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index d3e2bb331264..bfb8824e31e1 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -96,7 +96,7 @@ struct svcxprt_rdma { struct rdma_cm_id *sc_cm_id; /* RDMA connection id */ struct list_head sc_accept_q; /* Conn. waiting accept */ int sc_ord; /* RDMA read limit */ - int sc_max_sge; + int sc_max_send_sges; bool sc_snd_w_inv; /* OK to use Send With Invalidate */ atomic_t sc_sq_avail; /* SQEs ready to be consumed */ @@ -158,17 +158,14 @@ struct svc_rdma_recv_ctxt { struct page *rc_pages[RPCSVC_MAXPAGES]; }; -enum { - RPCRDMA_MAX_SGES = 1 + (RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE), -}; - struct svc_rdma_send_ctxt { struct list_head sc_list; struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; int sc_page_count; + int sc_cur_sge_no; struct page *sc_pages[RPCSVC_MAXPAGES]; - struct ib_sge sc_sges[RPCRDMA_MAX_SGES]; + struct ib_sge sc_sges[]; }; /* svc_rdma_backchannel.c */ |