diff mbox series

[v2,7/9] SUNRPC: Don't disable BH's when taking sp_lock

Message ID 168842929557.139194.4420161035549339648.stgit@manet.1015granger.net (mailing list archive)
State New, archived
Headers show
Series SUNRPC service thread scheduler optimizations | expand

Commit Message

Chuck Lever July 4, 2023, 12:08 a.m. UTC
From: Chuck Lever <chuck.lever@oracle.com>

Consumers of sp_lock now all run in process context. We can avoid
the overhead of disabling bottom halves when taking this lock.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 net/sunrpc/svc_xprt.c |   14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

Comments

NeilBrown July 4, 2023, 12:56 a.m. UTC | #1
On Tue, 04 Jul 2023, Chuck Lever wrote:
> From: Chuck Lever <chuck.lever@oracle.com>
> 
> Consumers of sp_lock now all run in process context. We can avoid
> the overhead of disabling bottom halves when taking this lock.

"now" suggests that something has recently changed so that sp_lock isn't
taken in bh context.  What was that change - I don't see it.

I think svc_data_ready() is called in bh, and that calls
svc_xprt_enqueue() which take sp_lock to add the xprt to ->sp_sockets. 

Is that not still the case?

NeilBrown


> 
> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> ---
>  net/sunrpc/svc_xprt.c |   14 +++++++-------
>  1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
> index ecbccf0d89b9..8ced7591ce07 100644
> --- a/net/sunrpc/svc_xprt.c
> +++ b/net/sunrpc/svc_xprt.c
> @@ -472,9 +472,9 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
>  	pool = svc_pool_for_cpu(xprt->xpt_server);
>  
>  	percpu_counter_inc(&pool->sp_sockets_queued);
> -	spin_lock_bh(&pool->sp_lock);
> +	spin_lock(&pool->sp_lock);
>  	list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
> -	spin_unlock_bh(&pool->sp_lock);
> +	spin_unlock(&pool->sp_lock);
>  
>  	rqstp = svc_pool_wake_idle_thread(xprt->xpt_server, pool);
>  	if (!rqstp) {
> @@ -496,14 +496,14 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
>  	if (list_empty(&pool->sp_sockets))
>  		goto out;
>  
> -	spin_lock_bh(&pool->sp_lock);
> +	spin_lock(&pool->sp_lock);
>  	if (likely(!list_empty(&pool->sp_sockets))) {
>  		xprt = list_first_entry(&pool->sp_sockets,
>  					struct svc_xprt, xpt_ready);
>  		list_del_init(&xprt->xpt_ready);
>  		svc_xprt_get(xprt);
>  	}
> -	spin_unlock_bh(&pool->sp_lock);
> +	spin_unlock(&pool->sp_lock);
>  out:
>  	return xprt;
>  }
> @@ -1116,15 +1116,15 @@ static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
>  	for (i = 0; i < serv->sv_nrpools; i++) {
>  		pool = &serv->sv_pools[i];
>  
> -		spin_lock_bh(&pool->sp_lock);
> +		spin_lock(&pool->sp_lock);
>  		list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
>  			if (xprt->xpt_net != net)
>  				continue;
>  			list_del_init(&xprt->xpt_ready);
> -			spin_unlock_bh(&pool->sp_lock);
> +			spin_unlock(&pool->sp_lock);
>  			return xprt;
>  		}
> -		spin_unlock_bh(&pool->sp_lock);
> +		spin_unlock(&pool->sp_lock);
>  	}
>  	return NULL;
>  }
> 
> 
>
Chuck Lever July 4, 2023, 1:48 a.m. UTC | #2
On Tue, Jul 04, 2023 at 10:56:08AM +1000, NeilBrown wrote:
> On Tue, 04 Jul 2023, Chuck Lever wrote:
> > From: Chuck Lever <chuck.lever@oracle.com>
> > 
> > Consumers of sp_lock now all run in process context. We can avoid
> > the overhead of disabling bottom halves when taking this lock.
> 
> "now" suggests that something has recently changed so that sp_lock isn't
> taken in bh context.  What was that change - I don't see it.
> 
> I think svc_data_ready() is called in bh, and that calls
> svc_xprt_enqueue() which take sp_lock to add the xprt to ->sp_sockets. 
> 
> Is that not still the case?

Darn, I forgot about that one. I'll drop this patch.

> NeilBrown
> 
> 
> > 
> > Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> > ---
> >  net/sunrpc/svc_xprt.c |   14 +++++++-------
> >  1 file changed, 7 insertions(+), 7 deletions(-)
> > 
> > diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
> > index ecbccf0d89b9..8ced7591ce07 100644
> > --- a/net/sunrpc/svc_xprt.c
> > +++ b/net/sunrpc/svc_xprt.c
> > @@ -472,9 +472,9 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
> >  	pool = svc_pool_for_cpu(xprt->xpt_server);
> >  
> >  	percpu_counter_inc(&pool->sp_sockets_queued);
> > -	spin_lock_bh(&pool->sp_lock);
> > +	spin_lock(&pool->sp_lock);
> >  	list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
> > -	spin_unlock_bh(&pool->sp_lock);
> > +	spin_unlock(&pool->sp_lock);
> >  
> >  	rqstp = svc_pool_wake_idle_thread(xprt->xpt_server, pool);
> >  	if (!rqstp) {
> > @@ -496,14 +496,14 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
> >  	if (list_empty(&pool->sp_sockets))
> >  		goto out;
> >  
> > -	spin_lock_bh(&pool->sp_lock);
> > +	spin_lock(&pool->sp_lock);
> >  	if (likely(!list_empty(&pool->sp_sockets))) {
> >  		xprt = list_first_entry(&pool->sp_sockets,
> >  					struct svc_xprt, xpt_ready);
> >  		list_del_init(&xprt->xpt_ready);
> >  		svc_xprt_get(xprt);
> >  	}
> > -	spin_unlock_bh(&pool->sp_lock);
> > +	spin_unlock(&pool->sp_lock);
> >  out:
> >  	return xprt;
> >  }
> > @@ -1116,15 +1116,15 @@ static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
> >  	for (i = 0; i < serv->sv_nrpools; i++) {
> >  		pool = &serv->sv_pools[i];
> >  
> > -		spin_lock_bh(&pool->sp_lock);
> > +		spin_lock(&pool->sp_lock);
> >  		list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
> >  			if (xprt->xpt_net != net)
> >  				continue;
> >  			list_del_init(&xprt->xpt_ready);
> > -			spin_unlock_bh(&pool->sp_lock);
> > +			spin_unlock(&pool->sp_lock);
> >  			return xprt;
> >  		}
> > -		spin_unlock_bh(&pool->sp_lock);
> > +		spin_unlock(&pool->sp_lock);
> >  	}
> >  	return NULL;
> >  }
> > 
> > 
> > 
>
diff mbox series

Patch

diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index ecbccf0d89b9..8ced7591ce07 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -472,9 +472,9 @@  void svc_xprt_enqueue(struct svc_xprt *xprt)
 	pool = svc_pool_for_cpu(xprt->xpt_server);
 
 	percpu_counter_inc(&pool->sp_sockets_queued);
-	spin_lock_bh(&pool->sp_lock);
+	spin_lock(&pool->sp_lock);
 	list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
-	spin_unlock_bh(&pool->sp_lock);
+	spin_unlock(&pool->sp_lock);
 
 	rqstp = svc_pool_wake_idle_thread(xprt->xpt_server, pool);
 	if (!rqstp) {
@@ -496,14 +496,14 @@  static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
 	if (list_empty(&pool->sp_sockets))
 		goto out;
 
-	spin_lock_bh(&pool->sp_lock);
+	spin_lock(&pool->sp_lock);
 	if (likely(!list_empty(&pool->sp_sockets))) {
 		xprt = list_first_entry(&pool->sp_sockets,
 					struct svc_xprt, xpt_ready);
 		list_del_init(&xprt->xpt_ready);
 		svc_xprt_get(xprt);
 	}
-	spin_unlock_bh(&pool->sp_lock);
+	spin_unlock(&pool->sp_lock);
 out:
 	return xprt;
 }
@@ -1116,15 +1116,15 @@  static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
 	for (i = 0; i < serv->sv_nrpools; i++) {
 		pool = &serv->sv_pools[i];
 
-		spin_lock_bh(&pool->sp_lock);
+		spin_lock(&pool->sp_lock);
 		list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
 			if (xprt->xpt_net != net)
 				continue;
 			list_del_init(&xprt->xpt_ready);
-			spin_unlock_bh(&pool->sp_lock);
+			spin_unlock(&pool->sp_lock);
 			return xprt;
 		}
-		spin_unlock_bh(&pool->sp_lock);
+		spin_unlock(&pool->sp_lock);
 	}
 	return NULL;
 }