diff mbox series

[12/12] SUNRPC: discard SP_CONGESTED

Message ID 20230731064839.7729-13-neilb@suse.de (mailing list archive)
State New, archived
Headers show
Series SUNRPC: various thread management improvements | expand

Commit Message

NeilBrown July 31, 2023, 6:48 a.m. UTC
We can tell if a pool is congested by checking if the idle list is
empty.  We don't need a separate flag.

Signed-off-by: NeilBrown <neilb@suse.de>
---
 include/linux/sunrpc/svc.h    | 1 -
 include/trace/events/sunrpc.h | 2 --
 net/sunrpc/svc.c              | 1 -
 net/sunrpc/svc_xprt.c         | 4 +---
 4 files changed, 1 insertion(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index c2111bc8a7a1..b100ca16a25f 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -51,7 +51,6 @@  struct svc_pool {
 /* bits for sp_flags */
 enum {
 	SP_TASK_PENDING,	/* still work to do even if no xprt is queued */
-	SP_CONGESTED,		/* all threads are busy, none idle */
 	SP_NEED_VICTIM,		/* One thread needs to agree to exit */
 	SP_VICTIM_REMAINS,	/* One thread needs to actually exit */
 };
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index d00a1a6b9616..6101c1e38eb0 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -2058,14 +2058,12 @@  TRACE_EVENT(svc_xprt_enqueue,
 );
 
 TRACE_DEFINE_ENUM(SP_TASK_PENDING);
-TRACE_DEFINE_ENUM(SP_CONGESTED);
 TRACE_DEFINE_ENUM(SP_NEED_VICTIM);
 TRACE_DEFINE_ENUM(SP_VICTIM_REMAINS);
 
 #define show_svc_pool_flags(x)						\
 	__print_flags(x, "|",						\
 		{ BIT(SP_TASK_PENDING),		"TASK_PENDING" },	\
-		{ BIT(SP_CONGESTED),		"CONGESTED" },		\
 		{ BIT(SP_NEED_VICTIM),		"NEED_VICTIM" },	\
 		{ BIT(SP_VICTIM_REMAINS),	"VICTIM_REMAINS" })
 DECLARE_EVENT_CLASS(svc_pool_scheduler_class,
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 44a614d96d8d..9102cbd3976c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -724,7 +724,6 @@  void svc_pool_wake_idle_thread(struct svc_serv *serv, struct svc_pool *pool)
 
 	trace_svc_pool_starved(serv, pool);
 	percpu_counter_inc(&pool->sp_threads_starved);
-	set_bit(SP_CONGESTED, &pool->sp_flags);
 }
 EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread);
 
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 0ba16cbb998b..f294523595fa 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -736,8 +736,6 @@  static bool svc_rqst_wait_for_work(struct svc_rqst *rqstp)
 
 	if (rqst_should_sleep(rqstp)) {
 		set_current_state(TASK_IDLE);
-		smp_mb__before_atomic();
-		clear_bit(SP_CONGESTED, &pool->sp_flags);
 		spin_lock_bh(&pool->sp_lock);
 		list_add(&rqstp->rq_idle, &pool->sp_idle_threads);
 		spin_unlock_bh(&pool->sp_lock);
@@ -877,7 +875,7 @@  void svc_recv(struct svc_rqst *rqstp)
 		/* Normally we will wait up to 5 seconds for any required
 		 * cache information to be provided.
 		 */
-		if (test_bit(SP_CONGESTED, &pool->sp_flags))
+		if (list_empty(&pool->sp_idle_threads))
 			rqstp->rq_chandle.thread_wait = 5 * HZ;
 		else
 			rqstp->rq_chandle.thread_wait = 1 * HZ;