diff mbox series

[08/12] SUNRPC: move task-dequeueing code into svc_recv()

Message ID 20230731064839.7729-9-neilb@suse.de (mailing list archive)
State New, archived
Headers show
Series SUNRPC: various thread management improvements | expand

Commit Message

NeilBrown July 31, 2023, 6:48 a.m. UTC
svc_recv() has become rather small, and svc_rqst_wait_and_dequeue_work()
performs two different tasks.

So move the "dequeue" part out of svc_rqst_wait_and_dequeue_work()
into svc_recv().  This balances code between the two.

svc_rqst_wait_and_dequeue_work() is now svc_rqst_wait_for_work() and
returns bool if it actually waited.  This is used to guide tracing and
some statistics gathering.

Signed-off-by: NeilBrown <neilb@suse.de>
---
 net/sunrpc/svc_xprt.c | 67 +++++++++++++++++++++----------------------
 1 file changed, 32 insertions(+), 35 deletions(-)
diff mbox series

Patch

diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 604c486c8576..45a76313b7e1 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -722,14 +722,11 @@  rqst_should_sleep(struct svc_rqst *rqstp)
 	return true;
 }
 
-static void svc_rqst_wait_and_dequeue_work(struct svc_rqst *rqstp)
+static bool svc_rqst_wait_for_work(struct svc_rqst *rqstp)
 {
-	struct svc_pool		*pool = rqstp->rq_pool;
+	struct svc_pool *pool = rqstp->rq_pool;
 	bool slept = false;
 
-	/* rq_xprt should be clear on entry */
-	WARN_ON_ONCE(rqstp->rq_xprt);
-
 	if (rqst_should_sleep(rqstp)) {
 		set_current_state(TASK_IDLE);
 		smp_mb__before_atomic();
@@ -749,31 +746,7 @@  static void svc_rqst_wait_and_dequeue_work(struct svc_rqst *rqstp)
 		smp_mb__after_atomic();
 	}
 	try_to_freeze();
-
-	if (kthread_should_stop())
-		return;
-
-	clear_bit(SP_TASK_PENDING, &pool->sp_flags);
-	rqstp->rq_xprt = svc_xprt_dequeue(pool);
-	if (rqstp->rq_xprt) {
-		if (slept)
-			trace_svc_pool_awoken(rqstp);
-		else
-			trace_svc_pool_polled(rqstp);
-		goto out_found;
-	}
-
-	if (slept)
-		percpu_counter_inc(&pool->sp_threads_no_work);
-	return;
-out_found:
-	/* Normally we will wait up to 5 seconds for any required
-	 * cache information to be provided.
-	 */
-	if (!test_bit(SP_CONGESTED, &pool->sp_flags))
-		rqstp->rq_chandle.thread_wait = 5*HZ;
-	else
-		rqstp->rq_chandle.thread_wait = 1*HZ;
+	return slept;
 }
 
 static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -865,17 +838,41 @@  static void svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
  */
 void svc_recv(struct svc_rqst *rqstp)
 {
-	struct svc_xprt		*xprt = NULL;
+	struct svc_pool *pool = rqstp->rq_pool;
+	bool slept;
 
 	if (!svc_alloc_arg(rqstp))
 		return;
 
-	svc_rqst_wait_and_dequeue_work(rqstp);
+	slept = svc_rqst_wait_for_work(rqstp);
 
-	xprt = rqstp->rq_xprt;
-	if (xprt)
+	if (kthread_should_stop())
+		return;
+
+	clear_bit(SP_TASK_PENDING, &pool->sp_flags);
+
+	rqstp->rq_xprt = svc_xprt_dequeue(pool);
+	if (rqstp->rq_xprt) {
+		struct svc_xprt *xprt = rqstp->rq_xprt;
+
+		if (slept)
+			trace_svc_pool_awoken(rqstp);
+		else
+			trace_svc_pool_polled(rqstp);
+
+		/* Normally we will wait up to 5 seconds for any required
+		 * cache information to be provided.
+		 */
+		if (test_bit(SP_CONGESTED, &pool->sp_flags))
+			rqstp->rq_chandle.thread_wait = 5 * HZ;
+		else
+			rqstp->rq_chandle.thread_wait = 1 * HZ;
 		svc_handle_xprt(rqstp, xprt);
-out:
+		return;
+	}
+
+	if (slept)
+		percpu_counter_inc(&pool->sp_threads_no_work);
 }
 EXPORT_SYMBOL_GPL(svc_recv);