xprtrdma: Split rb_lock
/proc/lock_stat showed contention between rpcrdma_buffer_get/put and the MR allocation functions during I/O intensive workloads. Now that MRs are no longer allocated in rpcrdma_buffer_get(), there's no reason the rb_mws list has to be managed using the same lock as the send/receive buffers. Split that lock. The new lock does not need to disable interrupts because buffer get/put is never called in an interrupt context. struct rpcrdma_buffer is re-arranged to ensure rb_mwlock and rb_mws are always in a different cacheline than rb_lock and the buffer pointers. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Tested-By: Devesh Sharma <devesh.sharma@avagotech.com> Reviewed-by: Doug Ledford <dledford@redhat.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
7e53df111b
commit
58d1dcf5a8
4 changed files with 15 additions and 13 deletions
|
@ -65,6 +65,7 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt)
|
||||||
struct rpcrdma_mw *r;
|
struct rpcrdma_mw *r;
|
||||||
int i, rc;
|
int i, rc;
|
||||||
|
|
||||||
|
spin_lock_init(&buf->rb_mwlock);
|
||||||
INIT_LIST_HEAD(&buf->rb_mws);
|
INIT_LIST_HEAD(&buf->rb_mws);
|
||||||
INIT_LIST_HEAD(&buf->rb_all);
|
INIT_LIST_HEAD(&buf->rb_all);
|
||||||
|
|
||||||
|
|
|
@ -266,6 +266,7 @@ frwr_op_init(struct rpcrdma_xprt *r_xprt)
|
||||||
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
|
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
spin_lock_init(&buf->rb_mwlock);
|
||||||
INIT_LIST_HEAD(&buf->rb_mws);
|
INIT_LIST_HEAD(&buf->rb_mws);
|
||||||
INIT_LIST_HEAD(&buf->rb_all);
|
INIT_LIST_HEAD(&buf->rb_all);
|
||||||
|
|
||||||
|
|
|
@ -1173,15 +1173,14 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
|
||||||
{
|
{
|
||||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||||
struct rpcrdma_mw *mw = NULL;
|
struct rpcrdma_mw *mw = NULL;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&buf->rb_lock, flags);
|
spin_lock(&buf->rb_mwlock);
|
||||||
if (!list_empty(&buf->rb_mws)) {
|
if (!list_empty(&buf->rb_mws)) {
|
||||||
mw = list_first_entry(&buf->rb_mws,
|
mw = list_first_entry(&buf->rb_mws,
|
||||||
struct rpcrdma_mw, mw_list);
|
struct rpcrdma_mw, mw_list);
|
||||||
list_del_init(&mw->mw_list);
|
list_del_init(&mw->mw_list);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&buf->rb_lock, flags);
|
spin_unlock(&buf->rb_mwlock);
|
||||||
|
|
||||||
if (!mw)
|
if (!mw)
|
||||||
pr_err("RPC: %s: no MWs available\n", __func__);
|
pr_err("RPC: %s: no MWs available\n", __func__);
|
||||||
|
@ -1192,11 +1191,10 @@ void
|
||||||
rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
|
rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
|
||||||
{
|
{
|
||||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&buf->rb_lock, flags);
|
spin_lock(&buf->rb_mwlock);
|
||||||
list_add_tail(&mw->mw_list, &buf->rb_mws);
|
list_add_tail(&mw->mw_list, &buf->rb_mws);
|
||||||
spin_unlock_irqrestore(&buf->rb_lock, flags);
|
spin_unlock(&buf->rb_mwlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -282,15 +282,17 @@ rpcr_to_rdmar(struct rpc_rqst *rqst)
|
||||||
* One of these is associated with a transport instance
|
* One of these is associated with a transport instance
|
||||||
*/
|
*/
|
||||||
struct rpcrdma_buffer {
|
struct rpcrdma_buffer {
|
||||||
spinlock_t rb_lock; /* protects indexes */
|
spinlock_t rb_mwlock; /* protect rb_mws list */
|
||||||
u32 rb_max_requests;/* client max requests */
|
struct list_head rb_mws;
|
||||||
struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */
|
struct list_head rb_all;
|
||||||
struct list_head rb_all;
|
char *rb_pool;
|
||||||
int rb_send_index;
|
|
||||||
|
spinlock_t rb_lock; /* protect buf arrays */
|
||||||
|
u32 rb_max_requests;
|
||||||
|
int rb_send_index;
|
||||||
|
int rb_recv_index;
|
||||||
struct rpcrdma_req **rb_send_bufs;
|
struct rpcrdma_req **rb_send_bufs;
|
||||||
int rb_recv_index;
|
|
||||||
struct rpcrdma_rep **rb_recv_bufs;
|
struct rpcrdma_rep **rb_recv_bufs;
|
||||||
char *rb_pool;
|
|
||||||
};
|
};
|
||||||
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
|
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue