diff mbox series

[v2,05/16] rcu: Refactor code a bit in rcu_nocb_do_flush_bypass()

Message ID 20221122010421.3799681-5-paulmck@kernel.org (mailing list archive)
State Accepted
Commit 3d222a0c0cfef85bad2c9cff5d541836cb81cfbd
Headers show
Series [v2,01/16] rcu: Simplify rcu_init_nohz() cpumask handling | expand

Commit Message

Paul E. McKenney Nov. 22, 2022, 1:04 a.m. UTC
From: "Joel Fernandes (Google)" <joel@joelfernandes.org>

This consolidates the code a bit and makes it cleaner. Functionally it
is the same.

Reported-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/rcu/tree_nocb.h | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

Comments

Frederic Weisbecker Nov. 23, 2022, 3:59 p.m. UTC | #1
On Mon, Nov 21, 2022 at 05:04:10PM -0800, Paul E. McKenney wrote:
> From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
> 
> This consolidates the code a bit and makes it cleaner. Functionally it
> is the same.
> 
> Reported-by: Paul E. McKenney <paulmck@kernel.org>
> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> ---
>  kernel/rcu/tree_nocb.h | 17 +++++++++--------
>  1 file changed, 9 insertions(+), 8 deletions(-)
> 
> diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
> index d6e4c076b0515..213daf81c057f 100644
> --- a/kernel/rcu/tree_nocb.h
> +++ b/kernel/rcu/tree_nocb.h
> @@ -327,10 +327,11 @@ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
>   *
>   * Note that this function always returns true if rhp is NULL.
>   */
> -static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
> +static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
>  				     unsigned long j, bool lazy)
>  {
>  	struct rcu_cblist rcl;
> +	struct rcu_head *rhp = rhp_in;

Why that intermediate rhp_in?

>  
>  	WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
>  	rcu_lockdep_assert_cblist_protected(rdp);
> @@ -345,16 +346,16 @@ static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
>  
>  	/*
>  	 * If the new CB requested was a lazy one, queue it onto the main
> -	 * ->cblist so we can take advantage of a sooner grade period.
> +	 * ->cblist so that we can take advantage of the grace-period that will
> +	 * happen regardless. But queue it onto the bypass list first so that
> +	 * the lazy CB is ordered with the existing CBs in the bypass list.
>  	 */
>  	if (lazy && rhp) {
> -		rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, NULL);
> -		rcu_cblist_enqueue(&rcl, rhp);
> -		WRITE_ONCE(rdp->lazy_len, 0);
> -	} else {
> -		rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
> -		WRITE_ONCE(rdp->lazy_len, 0);
> +		rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
> +		rhp = NULL;
>  	}
> +	rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
> +	WRITE_ONCE(rdp->lazy_len, 0);

Reviewed-by: Frederic Weisbecker <frederic@kernel.org>

Thanks.

>  
>  	rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
>  	WRITE_ONCE(rdp->nocb_bypass_first, j);
> -- 
> 2.31.1.189.g2e36527f23
>
Paul E. McKenney Nov. 23, 2022, 5:54 p.m. UTC | #2
On Wed, Nov 23, 2022 at 04:59:29PM +0100, Frederic Weisbecker wrote:
> On Mon, Nov 21, 2022 at 05:04:10PM -0800, Paul E. McKenney wrote:
> > From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
> > 
> > This consolidates the code a bit and makes it cleaner. Functionally it
> > is the same.
> > 
> > Reported-by: Paul E. McKenney <paulmck@kernel.org>
> > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > ---
> >  kernel/rcu/tree_nocb.h | 17 +++++++++--------
> >  1 file changed, 9 insertions(+), 8 deletions(-)
> > 
> > diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
> > index d6e4c076b0515..213daf81c057f 100644
> > --- a/kernel/rcu/tree_nocb.h
> > +++ b/kernel/rcu/tree_nocb.h
> > @@ -327,10 +327,11 @@ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
> >   *
> >   * Note that this function always returns true if rhp is NULL.
> >   */
> > -static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
> > +static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
> >  				     unsigned long j, bool lazy)
> >  {
> >  	struct rcu_cblist rcl;
> > +	struct rcu_head *rhp = rhp_in;
> 
> Why that intermediate rhp_in?

To avoid modifying the formal parameter, should the original value prove
useful, for example, for tracing or debugging.

> >  	WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
> >  	rcu_lockdep_assert_cblist_protected(rdp);
> > @@ -345,16 +346,16 @@ static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
> >  
> >  	/*
> >  	 * If the new CB requested was a lazy one, queue it onto the main
> > -	 * ->cblist so we can take advantage of a sooner grade period.
> > +	 * ->cblist so that we can take advantage of the grace-period that will
> > +	 * happen regardless. But queue it onto the bypass list first so that
> > +	 * the lazy CB is ordered with the existing CBs in the bypass list.
> >  	 */
> >  	if (lazy && rhp) {
> > -		rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, NULL);
> > -		rcu_cblist_enqueue(&rcl, rhp);
> > -		WRITE_ONCE(rdp->lazy_len, 0);
> > -	} else {
> > -		rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
> > -		WRITE_ONCE(rdp->lazy_len, 0);
> > +		rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
> > +		rhp = NULL;
> >  	}
> > +	rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
> > +	WRITE_ONCE(rdp->lazy_len, 0);
> 
> Reviewed-by: Frederic Weisbecker <frederic@kernel.org>

Thank you!  I will apply this on my next rebase.

							Thanx, Paul

> Thanks.
> 
> >  
> >  	rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
> >  	WRITE_ONCE(rdp->nocb_bypass_first, j);
> > -- 
> > 2.31.1.189.g2e36527f23
> >
Joel Fernandes Nov. 24, 2022, 1 a.m. UTC | #3
> On Nov 23, 2022, at 12:54 PM, Paul E. McKenney <paulmck@kernel.org> wrote:
> 
> On Wed, Nov 23, 2022 at 04:59:29PM +0100, Frederic Weisbecker wrote:
>>> On Mon, Nov 21, 2022 at 05:04:10PM -0800, Paul E. McKenney wrote:
>>> From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
>>> 
>>> This consolidates the code a bit and makes it cleaner. Functionally it
>>> is the same.
>>> 
>>> Reported-by: Paul E. McKenney <paulmck@kernel.org>
>>> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
>>> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
>>> ---
>>> kernel/rcu/tree_nocb.h | 17 +++++++++--------
>>> 1 file changed, 9 insertions(+), 8 deletions(-)
>>> 
>>> diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
>>> index d6e4c076b0515..213daf81c057f 100644
>>> --- a/kernel/rcu/tree_nocb.h
>>> +++ b/kernel/rcu/tree_nocb.h
>>> @@ -327,10 +327,11 @@ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
>>>  *
>>>  * Note that this function always returns true if rhp is NULL.
>>>  */
>>> -static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
>>> +static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
>>>                     unsigned long j, bool lazy)
>>> {
>>>    struct rcu_cblist rcl;
>>> +    struct rcu_head *rhp = rhp_in;
>> 
>> Why that intermediate rhp_in?
> 
> To avoid modifying the formal parameter, should the original value prove
> useful, for example, for tracing or debugging.

So as to not re assign function parameter and introduce bugs down the line (someone reading code thinks they passed a certain rhp but code is using something else later in the function).

Thanks.



> 
>>>    WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
>>>    rcu_lockdep_assert_cblist_protected(rdp);
>>> @@ -345,16 +346,16 @@ static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
>>> 
>>>    /*
>>>     * If the new CB requested was a lazy one, queue it onto the main
>>> -     * ->cblist so we can take advantage of a sooner grade period.
>>> +     * ->cblist so that we can take advantage of the grace-period that will
>>> +     * happen regardless. But queue it onto the bypass list first so that
>>> +     * the lazy CB is ordered with the existing CBs in the bypass list.
>>>     */
>>>    if (lazy && rhp) {
>>> -        rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, NULL);
>>> -        rcu_cblist_enqueue(&rcl, rhp);
>>> -        WRITE_ONCE(rdp->lazy_len, 0);
>>> -    } else {
>>> -        rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
>>> -        WRITE_ONCE(rdp->lazy_len, 0);
>>> +        rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
>>> +        rhp = NULL;
>>>    }
>>> +    rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
>>> +    WRITE_ONCE(rdp->lazy_len, 0);
>> 
>> Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
> 
> Thank you!  I will apply this on my next rebase.
> 
>                            Thanx, Paul
> 
>> Thanks.
>> 
>>> 
>>>    rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
>>>    WRITE_ONCE(rdp->nocb_bypass_first, j);
>>> -- 
>>> 2.31.1.189.g2e36527f23
>>>
diff mbox series

Patch

diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index d6e4c076b0515..213daf81c057f 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -327,10 +327,11 @@  static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
  *
  * Note that this function always returns true if rhp is NULL.
  */
-static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
 				     unsigned long j, bool lazy)
 {
 	struct rcu_cblist rcl;
+	struct rcu_head *rhp = rhp_in;
 
 	WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
 	rcu_lockdep_assert_cblist_protected(rdp);
@@ -345,16 +346,16 @@  static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
 
 	/*
 	 * If the new CB requested was a lazy one, queue it onto the main
-	 * ->cblist so we can take advantage of a sooner grade period.
+	 * ->cblist so that we can take advantage of the grace-period that will
+	 * happen regardless. But queue it onto the bypass list first so that
+	 * the lazy CB is ordered with the existing CBs in the bypass list.
 	 */
 	if (lazy && rhp) {
-		rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, NULL);
-		rcu_cblist_enqueue(&rcl, rhp);
-		WRITE_ONCE(rdp->lazy_len, 0);
-	} else {
-		rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
-		WRITE_ONCE(rdp->lazy_len, 0);
+		rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
+		rhp = NULL;
 	}
+	rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
+	WRITE_ONCE(rdp->lazy_len, 0);
 
 	rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
 	WRITE_ONCE(rdp->nocb_bypass_first, j);