Message ID | 168966228864.11075.17318657609206358910.stgit@noble.brown (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Refactor SUNRPC svc thread code, and use llist | expand |
On Tue, Jul 18, 2023 at 04:38:08PM +1000, NeilBrown wrote: > svc_get_next_xprt() does a lot more than get an xprt. It mostly waits. > > So rename to svc_wait_for_work() and don't bother returning a value. Or svc_rqst_wait_for_work() ? > The xprt can be found in ->rq_xprt. > > Also move all the code to handle ->rq_xprt into a single if branch, so > that other handlers can be added there if other work is found. > > Remove the call to svc_xprt_dequeue() that is before we set TASK_IDLE. > If there is still something to dequeue will still get it after a few > more checks - no sleeping. This was an unnecessary optimisation which > muddles the code. I think "This was an unnecessary optimisation" needs to be demonstrated, and the removal needs to be a separate patch. I would also move it before the patch adding trace_svc_pool_polled() so we don't have a series with a patch that adds a tracepoint followed by another patch that removes the same tracepoint. > Drop a call to kthread_should_stop(). There are enough of those in > svc_wait_for_work(). A bit of this clean-up could be moved back to 2/14. > (This patch is best viewed with "-b") > > Signed-off-by: NeilBrown <neilb@suse.de> > --- > net/sunrpc/svc_xprt.c | 70 +++++++++++++++++++------------------------------ > 1 file changed, 27 insertions(+), 43 deletions(-) > > diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c > index 44a33b1f542f..c7095ff7d5fd 100644 > --- a/net/sunrpc/svc_xprt.c > +++ b/net/sunrpc/svc_xprt.c > @@ -735,19 +735,10 @@ rqst_should_sleep(struct svc_rqst *rqstp) > return true; > } > > -static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp) > +static void svc_wait_for_work(struct svc_rqst *rqstp) > { > struct svc_pool *pool = rqstp->rq_pool; > > - /* rq_xprt should be clear on entry */ > - WARN_ON_ONCE(rqstp->rq_xprt); > - > - rqstp->rq_xprt = svc_xprt_dequeue(pool); > - if (rqstp->rq_xprt) { > - trace_svc_pool_polled(rqstp); > - goto out_found; > - } > - > set_current_state(TASK_IDLE); > smp_mb__before_atomic(); > clear_bit(SP_CONGESTED, &pool->sp_flags); > @@ -769,10 +760,9 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp) > goto out_found; > } > > - if (kthread_should_stop()) > - return NULL; > - percpu_counter_inc(&pool->sp_threads_no_work); > - return NULL; > + if (!kthread_should_stop()) > + percpu_counter_inc(&pool->sp_threads_no_work); > + return; > out_found: > /* Normally we will wait up to 5 seconds for any required > * cache information to be provided. > @@ -781,7 +771,6 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp) > rqstp->rq_chandle.thread_wait = 5*HZ; > else > rqstp->rq_chandle.thread_wait = 1*HZ; > - return rqstp->rq_xprt; > } > > static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) > @@ -855,45 +844,40 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) > */ > void svc_recv(struct svc_rqst *rqstp) > { > - struct svc_xprt *xprt = NULL; > - struct svc_serv *serv = rqstp->rq_server; > - int len; > - > if (!svc_alloc_arg(rqstp)) > - goto out; > + return; > > try_to_freeze(); > cond_resched(); > - if (kthread_should_stop()) > - goto out; > > - xprt = svc_get_next_xprt(rqstp); > - if (!xprt) > - goto out; > + svc_wait_for_work(rqstp); > > - len = svc_handle_xprt(rqstp, xprt); > + if (rqstp->rq_xprt) { > + struct svc_serv *serv = rqstp->rq_server; > + struct svc_xprt *xprt = rqstp->rq_xprt; > + int len; > > - /* No data, incomplete (TCP) read, or accept() */ > - if (len <= 0) > - goto out_release; > + len = svc_handle_xprt(rqstp, xprt); > > - trace_svc_xdr_recvfrom(&rqstp->rq_arg); > + /* No data, incomplete (TCP) read, or accept() */ > + if (len <= 0) { > + rqstp->rq_res.len = 0; > + svc_xprt_release(rqstp); > + } else { > > - clear_bit(XPT_OLD, &xprt->xpt_flags); > + trace_svc_xdr_recvfrom(&rqstp->rq_arg); > > - rqstp->rq_chandle.defer = svc_defer; > + clear_bit(XPT_OLD, &xprt->xpt_flags); > > - if (serv->sv_stats) > - serv->sv_stats->netcnt++; > - percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived); > - rqstp->rq_stime = ktime_get(); > - svc_process(rqstp); > - return; > -out_release: > - rqstp->rq_res.len = 0; > - svc_xprt_release(rqstp); > -out: > - return; > + rqstp->rq_chandle.defer = svc_defer; > + > + if (serv->sv_stats) > + serv->sv_stats->netcnt++; > + percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived); > + rqstp->rq_stime = ktime_get(); > + svc_process(rqstp); > + } > + } > } > EXPORT_SYMBOL_GPL(svc_recv); > > >
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 44a33b1f542f..c7095ff7d5fd 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -735,19 +735,10 @@ rqst_should_sleep(struct svc_rqst *rqstp) return true; } -static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp) +static void svc_wait_for_work(struct svc_rqst *rqstp) { struct svc_pool *pool = rqstp->rq_pool; - /* rq_xprt should be clear on entry */ - WARN_ON_ONCE(rqstp->rq_xprt); - - rqstp->rq_xprt = svc_xprt_dequeue(pool); - if (rqstp->rq_xprt) { - trace_svc_pool_polled(rqstp); - goto out_found; - } - set_current_state(TASK_IDLE); smp_mb__before_atomic(); clear_bit(SP_CONGESTED, &pool->sp_flags); @@ -769,10 +760,9 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp) goto out_found; } - if (kthread_should_stop()) - return NULL; - percpu_counter_inc(&pool->sp_threads_no_work); - return NULL; + if (!kthread_should_stop()) + percpu_counter_inc(&pool->sp_threads_no_work); + return; out_found: /* Normally we will wait up to 5 seconds for any required * cache information to be provided. @@ -781,7 +771,6 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp) rqstp->rq_chandle.thread_wait = 5*HZ; else rqstp->rq_chandle.thread_wait = 1*HZ; - return rqstp->rq_xprt; } static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) @@ -855,45 +844,40 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) */ void svc_recv(struct svc_rqst *rqstp) { - struct svc_xprt *xprt = NULL; - struct svc_serv *serv = rqstp->rq_server; - int len; - if (!svc_alloc_arg(rqstp)) - goto out; + return; try_to_freeze(); cond_resched(); - if (kthread_should_stop()) - goto out; - xprt = svc_get_next_xprt(rqstp); - if (!xprt) - goto out; + svc_wait_for_work(rqstp); - len = svc_handle_xprt(rqstp, xprt); + if (rqstp->rq_xprt) { + struct svc_serv *serv = rqstp->rq_server; + struct svc_xprt *xprt = rqstp->rq_xprt; + int len; - /* No data, incomplete (TCP) read, or accept() */ - if (len <= 0) - goto out_release; + len = svc_handle_xprt(rqstp, xprt); - trace_svc_xdr_recvfrom(&rqstp->rq_arg); + /* No data, incomplete (TCP) read, or accept() */ + if (len <= 0) { + rqstp->rq_res.len = 0; + svc_xprt_release(rqstp); + } else { - clear_bit(XPT_OLD, &xprt->xpt_flags); + trace_svc_xdr_recvfrom(&rqstp->rq_arg); - rqstp->rq_chandle.defer = svc_defer; + clear_bit(XPT_OLD, &xprt->xpt_flags); - if (serv->sv_stats) - serv->sv_stats->netcnt++; - percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived); - rqstp->rq_stime = ktime_get(); - svc_process(rqstp); - return; -out_release: - rqstp->rq_res.len = 0; - svc_xprt_release(rqstp); -out: - return; + rqstp->rq_chandle.defer = svc_defer; + + if (serv->sv_stats) + serv->sv_stats->netcnt++; + percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived); + rqstp->rq_stime = ktime_get(); + svc_process(rqstp); + } + } } EXPORT_SYMBOL_GPL(svc_recv);
svc_get_next_xprt() does a lot more than get an xprt. It mostly waits. So rename to svc_wait_for_work() and don't bother returning a value. The xprt can be found in ->rq_xprt. Also move all the code to handle ->rq_xprt into a single if branch, so that other handlers can be added there if other work is found. Remove the call to svc_xprt_dequeue() that is before we set TASK_IDLE. If there is still something to dequeue will still get it after a few more checks - no sleeping. This was an unnecessary optimisation which muddles the code. Drop a call to kthread_should_stop(). There are enough of those in svc_wait_for_work(). (This patch is best viewed with "-b") Signed-off-by: NeilBrown <neilb@suse.de> --- net/sunrpc/svc_xprt.c | 70 +++++++++++++++++++------------------------------ 1 file changed, 27 insertions(+), 43 deletions(-)