@@ -90,10 +90,9 @@ struct svc_serv {
int (*sv_threadfn)(void *data);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head sv_cb_list; /* queue for callback requests
+ struct lwq sv_cb_list; /* queue for callback requests
* that arrive over the same
* connection */
- spinlock_t sv_cb_lock; /* protects the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
@@ -57,6 +57,7 @@ struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
+#include <linux/sunrpc/svc_lwq.h>
/*
* This describes a complete RPC request
@@ -121,7 +122,7 @@ struct rpc_rqst {
int rq_ntrans;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head rq_bc_list; /* Callback service list */
+ struct lwq_node rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_SUNRPC_BACKCHANEL */
@@ -83,7 +83,6 @@ static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
return NULL;
req->rq_xprt = xprt;
- INIT_LIST_HEAD(&req->rq_bc_list);
/* Preallocate one XDR receive buffer */
if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
@@ -367,8 +366,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
dprintk("RPC: add callback request to list\n");
xprt_get(xprt);
- spin_lock(&bc_serv->sv_cb_lock);
- list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
- spin_unlock(&bc_serv->sv_cb_lock);
+ lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
}
@@ -438,8 +438,7 @@ EXPORT_SYMBOL_GPL(svc_bind);
static void
__svc_init_bc(struct svc_serv *serv)
{
- INIT_LIST_HEAD(&serv->sv_cb_list);
- spin_lock_init(&serv->sv_cb_lock);
+ lwq_init(&serv->sv_cb_list);
}
#else
static void
@@ -705,7 +705,7 @@ rqst_should_sleep(struct svc_rqst *rqstp)
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
if (svc_is_backchannel(rqstp)) {
- if (!list_empty(&rqstp->rq_server->sv_cb_list))
+ if (!lwq_empty(&rqstp->rq_server->sv_cb_list))
return false;
}
#endif
@@ -880,18 +880,12 @@ void svc_recv(struct svc_rqst *rqstp)
struct svc_serv *serv = rqstp->rq_server;
struct rpc_rqst *req;
- spin_lock_bh(&serv->sv_cb_lock);
- req = list_first_entry_or_null(&serv->sv_cb_list,
- struct rpc_rqst, rq_bc_list);
+ req = lwq_dequeue(&serv->sv_cb_list,
+ struct rpc_rqst, rq_bc_list);
if (req) {
- list_del(&req->rq_bc_list);
- spin_unlock_bh(&serv->sv_cb_lock);
wake_next(rqstp);
-
svc_process_bc(req, rqstp);
- return;
}
- spin_unlock_bh(&serv->sv_cb_lock);
}
#endif
}
@@ -263,9 +263,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
/* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv;
xprt_get(xprt);
- spin_lock(&bc_serv->sv_cb_lock);
- list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
- spin_unlock(&bc_serv->sv_cb_lock);
+ lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
This removes the need to store and update back-links in the list. It also remove the need for the _bh version of spin_lock(). Signed-off-by: NeilBrown <neilb@suse.de> --- include/linux/sunrpc/svc.h | 3 +-- include/linux/sunrpc/xprt.h | 3 ++- net/sunrpc/backchannel_rqst.c | 5 +---- net/sunrpc/svc.c | 3 +-- net/sunrpc/svc_xprt.c | 12 +++--------- net/sunrpc/xprtrdma/backchannel.c | 4 +--- 6 files changed, 9 insertions(+), 21 deletions(-)