xprtrdma: Introduce helpers for allocating MWs
We eventually want to handle allocating MWs one at a time, as needed, instead of grabbing 64 and throwing them at each RPC in the pipeline. Add a helper for grabbing an MW off rb_mws, and a helper for returning an MW to rb_mws. These will be used in a subsequent patch. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Tested-By: Devesh Sharma <devesh.sharma@avagotech.com> Reviewed-by: Doug Ledford <dledford@redhat.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
89e0d11258
commit
346aa66b2a
2 changed files with 33 additions and 0 deletions
|
@ -1173,6 +1173,37 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
|
||||||
kfree(buf->rb_pool);
|
kfree(buf->rb_pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct rpcrdma_mw *
|
||||||
|
rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
|
||||||
|
{
|
||||||
|
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||||
|
struct rpcrdma_mw *mw = NULL;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&buf->rb_lock, flags);
|
||||||
|
if (!list_empty(&buf->rb_mws)) {
|
||||||
|
mw = list_first_entry(&buf->rb_mws,
|
||||||
|
struct rpcrdma_mw, mw_list);
|
||||||
|
list_del_init(&mw->mw_list);
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&buf->rb_lock, flags);
|
||||||
|
|
||||||
|
if (!mw)
|
||||||
|
pr_err("RPC: %s: no MWs available\n", __func__);
|
||||||
|
return mw;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
|
||||||
|
{
|
||||||
|
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&buf->rb_lock, flags);
|
||||||
|
list_add_tail(&mw->mw_list, &buf->rb_mws);
|
||||||
|
spin_unlock_irqrestore(&buf->rb_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
|
/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
|
||||||
* some req segments uninitialized.
|
* some req segments uninitialized.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -413,6 +413,8 @@ int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
|
||||||
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
|
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
|
||||||
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
|
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
|
||||||
|
|
||||||
|
struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
|
||||||
|
void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
|
||||||
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
|
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
|
||||||
void rpcrdma_buffer_put(struct rpcrdma_req *);
|
void rpcrdma_buffer_put(struct rpcrdma_req *);
|
||||||
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
|
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
|
||||||
|
|
Loading…
Add table
Reference in a new issue