summaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma/verbs.c
diff options
context:
space:
mode:
authorChuck Lever2015-05-26 17:53:13 +0200
committerAnna Schumaker2015-06-12 19:10:37 +0200
commit58d1dcf5a8ebb0ce8a521286a99efdd636012bf0 (patch)
tree8fcc6ca4d9a5f1234f9f73acdd0fa0bd20a5e06c /net/sunrpc/xprtrdma/verbs.c
parentxprtrdma: Remove rpcrdma_ia::ri_memreg_strategy (diff)
downloadkernel-qcow2-linux-58d1dcf5a8ebb0ce8a521286a99efdd636012bf0.tar.gz
kernel-qcow2-linux-58d1dcf5a8ebb0ce8a521286a99efdd636012bf0.tar.xz
kernel-qcow2-linux-58d1dcf5a8ebb0ce8a521286a99efdd636012bf0.zip
xprtrdma: Split rb_lock
/proc/lock_stat showed contention between rpcrdma_buffer_get/put and the MR allocation functions during I/O intensive workloads. Now that MRs are no longer allocated in rpcrdma_buffer_get(), there's no reason the rb_mws list has to be managed using the same lock as the send/receive buffers. Split that lock. The new lock does not need to disable interrupts because buffer get/put is never called in an interrupt context. struct rpcrdma_buffer is re-arranged to ensure rb_mwlock and rb_mws are always in a different cacheline than rb_lock and the buffer pointers. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Tested-By: Devesh Sharma <devesh.sharma@avagotech.com> Reviewed-by: Doug Ledford <dledford@redhat.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc/xprtrdma/verbs.c')
-rw-r--r--net/sunrpc/xprtrdma/verbs.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index cc1a52609974..234083560d0e 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1173,15 +1173,14 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_mw *mw = NULL;
- unsigned long flags;
- spin_lock_irqsave(&buf->rb_lock, flags);
+ spin_lock(&buf->rb_mwlock);
if (!list_empty(&buf->rb_mws)) {
mw = list_first_entry(&buf->rb_mws,
struct rpcrdma_mw, mw_list);
list_del_init(&mw->mw_list);
}
- spin_unlock_irqrestore(&buf->rb_lock, flags);
+ spin_unlock(&buf->rb_mwlock);
if (!mw)
pr_err("RPC: %s: no MWs available\n", __func__);
@@ -1192,11 +1191,10 @@ void
rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- unsigned long flags;
- spin_lock_irqsave(&buf->rb_lock, flags);
+ spin_lock(&buf->rb_mwlock);
list_add_tail(&mw->mw_list, &buf->rb_mws);
- spin_unlock_irqrestore(&buf->rb_lock, flags);
+ spin_unlock(&buf->rb_mwlock);
}
static void