diff mbox series

[1/2] rcu/kvfree: Remove useless monitor_todo flag

Message ID 20220602080644.432156-1-urezki@gmail.com (mailing list archive)
State Accepted
Commit 3b8484a8dab80a3aac69ef82874225604bdad5c0
Headers show
Series [1/2] rcu/kvfree: Remove useless monitor_todo flag | expand

Commit Message

Uladzislau Rezki June 2, 2022, 8:06 a.m. UTC
From: "Joel Fernandes (Google)" <joel@joelfernandes.org>

monitor_todo is not needed as the work struct already tracks
if work is pending. Just use that to know if work is pending
using schedule_delayed_work() helper.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
---
 kernel/rcu/tree.c | 33 ++++++++++++++++-----------------
 1 file changed, 16 insertions(+), 17 deletions(-)

Comments

Joel Fernandes June 2, 2022, 11:43 p.m. UTC | #1
On Thu, Jun 02, 2022 at 10:06:43AM +0200, Uladzislau Rezki (Sony) wrote:
> From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
> 
> monitor_todo is not needed as the work struct already tracks
> if work is pending. Just use that to know if work is pending
> using schedule_delayed_work() helper.
> 
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> ---
>  kernel/rcu/tree.c | 33 ++++++++++++++++-----------------
>  1 file changed, 16 insertions(+), 17 deletions(-)
> 
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 222d59299a2a..fd16c0b46d9e 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3295,7 +3295,6 @@ struct kfree_rcu_cpu_work {
>   * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
>   * @lock: Synchronize access to this structure
>   * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
>   * @initialized: The @rcu_work fields have been initialized
>   * @count: Number of objects for which GP not started
>   * @bkvcache:
> @@ -3320,7 +3319,6 @@ struct kfree_rcu_cpu {
>  	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
>  	raw_spinlock_t lock;
>  	struct delayed_work monitor_work;
> -	bool monitor_todo;
>  	bool initialized;
>  	int count;
>  
> @@ -3500,6 +3498,18 @@ static void kfree_rcu_work(struct work_struct *work)
>  	}
>  }
>  
> +static bool
> +need_offload_krc(struct kfree_rcu_cpu *krcp)
> +{
> +	int i;
> +
> +	for (i = 0; i < FREE_N_CHANNELS; i++)
> +		if (krcp->bkvhead[i])
> +			return true;
> +
> +	return !!krcp->head;
> +}

Thanks for modifying my original patch to do this, and thanks for giving me
the attribution for the patch. This function is a nice addition.

For the patch in its entirety:
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>

thanks,

 - Joel



> +
>  /*
>   * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
>   */
> @@ -3556,9 +3566,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
>  	// of the channels that is still busy we should rearm the
>  	// work to repeat an attempt. Because previous batches are
>  	// still in progress.
> -	if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
> -		krcp->monitor_todo = false;
> -	else
> +	if (need_offload_krc(krcp))
>  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
>  
>  	raw_spin_unlock_irqrestore(&krcp->lock, flags);
> @@ -3746,11 +3754,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
>  	WRITE_ONCE(krcp->count, krcp->count + 1);
>  
>  	// Set timer to drain after KFREE_DRAIN_JIFFIES.
> -	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
> -	    !krcp->monitor_todo) {
> -		krcp->monitor_todo = true;
> +	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
>  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> -	}
>  
>  unlock_return:
>  	krc_this_cpu_unlock(krcp, flags);
> @@ -3825,14 +3830,8 @@ void __init kfree_rcu_scheduler_running(void)
>  		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
>  
>  		raw_spin_lock_irqsave(&krcp->lock, flags);
> -		if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
> -				krcp->monitor_todo) {
> -			raw_spin_unlock_irqrestore(&krcp->lock, flags);
> -			continue;
> -		}
> -		krcp->monitor_todo = true;
> -		schedule_delayed_work_on(cpu, &krcp->monitor_work,
> -					 KFREE_DRAIN_JIFFIES);
> +		if (need_offload_krc(krcp))
> +			schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
>  		raw_spin_unlock_irqrestore(&krcp->lock, flags);
>  	}
>  }
> -- 
> 2.30.2
>
Uladzislau Rezki June 3, 2022, 9:51 a.m. UTC | #2
> On Thu, Jun 02, 2022 at 10:06:43AM +0200, Uladzislau Rezki (Sony) wrote:
> > From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
> > 
> > monitor_todo is not needed as the work struct already tracks
> > if work is pending. Just use that to know if work is pending
> > using schedule_delayed_work() helper.
> > 
> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > ---
> >  kernel/rcu/tree.c | 33 ++++++++++++++++-----------------
> >  1 file changed, 16 insertions(+), 17 deletions(-)
> > 
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 222d59299a2a..fd16c0b46d9e 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -3295,7 +3295,6 @@ struct kfree_rcu_cpu_work {
> >   * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
> >   * @lock: Synchronize access to this structure
> >   * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> > - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
> >   * @initialized: The @rcu_work fields have been initialized
> >   * @count: Number of objects for which GP not started
> >   * @bkvcache:
> > @@ -3320,7 +3319,6 @@ struct kfree_rcu_cpu {
> >  	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
> >  	raw_spinlock_t lock;
> >  	struct delayed_work monitor_work;
> > -	bool monitor_todo;
> >  	bool initialized;
> >  	int count;
> >  
> > @@ -3500,6 +3498,18 @@ static void kfree_rcu_work(struct work_struct *work)
> >  	}
> >  }
> >  
> > +static bool
> > +need_offload_krc(struct kfree_rcu_cpu *krcp)
> > +{
> > +	int i;
> > +
> > +	for (i = 0; i < FREE_N_CHANNELS; i++)
> > +		if (krcp->bkvhead[i])
> > +			return true;
> > +
> > +	return !!krcp->head;
> > +}
> 
> Thanks for modifying my original patch to do this, and thanks for giving me
> the attribution for the patch. This function is a nice addition.
> 
It was you who did it :) Actually the second patch depends on it therefore 
i decided to upload it on behalf of you with slight modification hoping that
you would not mind.

>
> For the patch in its entirety:
> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> 
Thanks for the review!

--
Uladzislau Rezki
Joel Fernandes June 4, 2022, 3:07 a.m. UTC | #3
On Fri, Jun 03, 2022 at 11:51:34AM +0200, Uladzislau Rezki wrote:
> > On Thu, Jun 02, 2022 at 10:06:43AM +0200, Uladzislau Rezki (Sony) wrote:
> > > From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
> > > 
> > > monitor_todo is not needed as the work struct already tracks
> > > if work is pending. Just use that to know if work is pending
> > > using schedule_delayed_work() helper.
> > > 
> > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > > ---
> > >  kernel/rcu/tree.c | 33 ++++++++++++++++-----------------
> > >  1 file changed, 16 insertions(+), 17 deletions(-)
> > > 
> > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > index 222d59299a2a..fd16c0b46d9e 100644
> > > --- a/kernel/rcu/tree.c
> > > +++ b/kernel/rcu/tree.c
> > > @@ -3295,7 +3295,6 @@ struct kfree_rcu_cpu_work {
> > >   * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
> > >   * @lock: Synchronize access to this structure
> > >   * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> > > - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
> > >   * @initialized: The @rcu_work fields have been initialized
> > >   * @count: Number of objects for which GP not started
> > >   * @bkvcache:
> > > @@ -3320,7 +3319,6 @@ struct kfree_rcu_cpu {
> > >  	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
> > >  	raw_spinlock_t lock;
> > >  	struct delayed_work monitor_work;
> > > -	bool monitor_todo;
> > >  	bool initialized;
> > >  	int count;
> > >  
> > > @@ -3500,6 +3498,18 @@ static void kfree_rcu_work(struct work_struct *work)
> > >  	}
> > >  }
> > >  
> > > +static bool
> > > +need_offload_krc(struct kfree_rcu_cpu *krcp)
> > > +{
> > > +	int i;
> > > +
> > > +	for (i = 0; i < FREE_N_CHANNELS; i++)
> > > +		if (krcp->bkvhead[i])
> > > +			return true;
> > > +
> > > +	return !!krcp->head;
> > > +}
> > 
> > Thanks for modifying my original patch to do this, and thanks for giving me
> > the attribution for the patch. This function is a nice addition.
> > 
> It was you who did it :) Actually the second patch depends on it therefore 
> i decided to upload it on behalf of you with slight modification hoping that
> you would not mind.

Yes I don't mind at all :) Thank you again for seeing it through!

> > For the patch in its entirety:
> > Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > 
> Thanks for the review!

Sure, any time. By the way I am out in the Carribean next week. I will catch
you all the week after.

Quick update on my side on the lazy CB stuff, I made some progress on using
the bypass lists for lazy CBs, its looking good and builds now. I fixed bug
in my code where idle loop was flushing lazy CBs on its way to idle.. I think
its probably O(workingdays) away from v2 posting assuming all goes well.

thanks,

 - Joel
Paul E. McKenney June 4, 2022, 4:03 p.m. UTC | #4
On Thu, Jun 02, 2022 at 11:43:39PM +0000, Joel Fernandes wrote:
> On Thu, Jun 02, 2022 at 10:06:43AM +0200, Uladzislau Rezki (Sony) wrote:
> > From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
> > 
> > monitor_todo is not needed as the work struct already tracks
> > if work is pending. Just use that to know if work is pending
> > using schedule_delayed_work() helper.
> > 
> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > ---
> >  kernel/rcu/tree.c | 33 ++++++++++++++++-----------------
> >  1 file changed, 16 insertions(+), 17 deletions(-)
> > 
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 222d59299a2a..fd16c0b46d9e 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -3295,7 +3295,6 @@ struct kfree_rcu_cpu_work {
> >   * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
> >   * @lock: Synchronize access to this structure
> >   * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> > - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
> >   * @initialized: The @rcu_work fields have been initialized
> >   * @count: Number of objects for which GP not started
> >   * @bkvcache:
> > @@ -3320,7 +3319,6 @@ struct kfree_rcu_cpu {
> >  	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
> >  	raw_spinlock_t lock;
> >  	struct delayed_work monitor_work;
> > -	bool monitor_todo;
> >  	bool initialized;
> >  	int count;
> >  
> > @@ -3500,6 +3498,18 @@ static void kfree_rcu_work(struct work_struct *work)
> >  	}
> >  }
> >  
> > +static bool
> > +need_offload_krc(struct kfree_rcu_cpu *krcp)
> > +{
> > +	int i;
> > +
> > +	for (i = 0; i < FREE_N_CHANNELS; i++)
> > +		if (krcp->bkvhead[i])
> > +			return true;
> > +
> > +	return !!krcp->head;
> > +}
> 
> Thanks for modifying my original patch to do this, and thanks for giving me
> the attribution for the patch. This function is a nice addition.
> 
> For the patch in its entirety:
> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>

I pulled this one in for testing and further review, thank you both!

Given the description, I reversed the order of the Signed-off-by
tags to indicate that the patch came through Uladzislau from Joel.

							Thanx, Paul

> thanks,
> 
>  - Joel
> 
> 
> 
> > +
> >  /*
> >   * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
> >   */
> > @@ -3556,9 +3566,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
> >  	// of the channels that is still busy we should rearm the
> >  	// work to repeat an attempt. Because previous batches are
> >  	// still in progress.
> > -	if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
> > -		krcp->monitor_todo = false;
> > -	else
> > +	if (need_offload_krc(krcp))
> >  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> >  
> >  	raw_spin_unlock_irqrestore(&krcp->lock, flags);
> > @@ -3746,11 +3754,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> >  	WRITE_ONCE(krcp->count, krcp->count + 1);
> >  
> >  	// Set timer to drain after KFREE_DRAIN_JIFFIES.
> > -	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
> > -	    !krcp->monitor_todo) {
> > -		krcp->monitor_todo = true;
> > +	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
> >  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> > -	}
> >  
> >  unlock_return:
> >  	krc_this_cpu_unlock(krcp, flags);
> > @@ -3825,14 +3830,8 @@ void __init kfree_rcu_scheduler_running(void)
> >  		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
> >  
> >  		raw_spin_lock_irqsave(&krcp->lock, flags);
> > -		if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
> > -				krcp->monitor_todo) {
> > -			raw_spin_unlock_irqrestore(&krcp->lock, flags);
> > -			continue;
> > -		}
> > -		krcp->monitor_todo = true;
> > -		schedule_delayed_work_on(cpu, &krcp->monitor_work,
> > -					 KFREE_DRAIN_JIFFIES);
> > +		if (need_offload_krc(krcp))
> > +			schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> >  		raw_spin_unlock_irqrestore(&krcp->lock, flags);
> >  	}
> >  }
> > -- 
> > 2.30.2
> >
Uladzislau Rezki June 5, 2022, 8:52 a.m. UTC | #5
> On Thu, Jun 02, 2022 at 11:43:39PM +0000, Joel Fernandes wrote:
> > On Thu, Jun 02, 2022 at 10:06:43AM +0200, Uladzislau Rezki (Sony) wrote:
> > > From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
> > > 
> > > monitor_todo is not needed as the work struct already tracks
> > > if work is pending. Just use that to know if work is pending
> > > using schedule_delayed_work() helper.
> > > 
> > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > > ---
> > >  kernel/rcu/tree.c | 33 ++++++++++++++++-----------------
> > >  1 file changed, 16 insertions(+), 17 deletions(-)
> > > 
> > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > index 222d59299a2a..fd16c0b46d9e 100644
> > > --- a/kernel/rcu/tree.c
> > > +++ b/kernel/rcu/tree.c
> > > @@ -3295,7 +3295,6 @@ struct kfree_rcu_cpu_work {
> > >   * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
> > >   * @lock: Synchronize access to this structure
> > >   * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> > > - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
> > >   * @initialized: The @rcu_work fields have been initialized
> > >   * @count: Number of objects for which GP not started
> > >   * @bkvcache:
> > > @@ -3320,7 +3319,6 @@ struct kfree_rcu_cpu {
> > >  	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
> > >  	raw_spinlock_t lock;
> > >  	struct delayed_work monitor_work;
> > > -	bool monitor_todo;
> > >  	bool initialized;
> > >  	int count;
> > >  
> > > @@ -3500,6 +3498,18 @@ static void kfree_rcu_work(struct work_struct *work)
> > >  	}
> > >  }
> > >  
> > > +static bool
> > > +need_offload_krc(struct kfree_rcu_cpu *krcp)
> > > +{
> > > +	int i;
> > > +
> > > +	for (i = 0; i < FREE_N_CHANNELS; i++)
> > > +		if (krcp->bkvhead[i])
> > > +			return true;
> > > +
> > > +	return !!krcp->head;
> > > +}
> > 
> > Thanks for modifying my original patch to do this, and thanks for giving me
> > the attribution for the patch. This function is a nice addition.
> > 
> > For the patch in its entirety:
> > Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> 
> I pulled this one in for testing and further review, thank you both!
> 
> Given the description, I reversed the order of the Signed-off-by
> tags to indicate that the patch came through Uladzislau from Joel.
> 
Thanks! The author is Joel i might messed up with "Signed-off-by" tags :)

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 222d59299a2a..fd16c0b46d9e 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3295,7 +3295,6 @@  struct kfree_rcu_cpu_work {
  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
  * @lock: Synchronize access to this structure
  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
- * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
  * @initialized: The @rcu_work fields have been initialized
  * @count: Number of objects for which GP not started
  * @bkvcache:
@@ -3320,7 +3319,6 @@  struct kfree_rcu_cpu {
 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
 	raw_spinlock_t lock;
 	struct delayed_work monitor_work;
-	bool monitor_todo;
 	bool initialized;
 	int count;
 
@@ -3500,6 +3498,18 @@  static void kfree_rcu_work(struct work_struct *work)
 	}
 }
 
+static bool
+need_offload_krc(struct kfree_rcu_cpu *krcp)
+{
+	int i;
+
+	for (i = 0; i < FREE_N_CHANNELS; i++)
+		if (krcp->bkvhead[i])
+			return true;
+
+	return !!krcp->head;
+}
+
 /*
  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
  */
@@ -3556,9 +3566,7 @@  static void kfree_rcu_monitor(struct work_struct *work)
 	// of the channels that is still busy we should rearm the
 	// work to repeat an attempt. Because previous batches are
 	// still in progress.
-	if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
-		krcp->monitor_todo = false;
-	else
+	if (need_offload_krc(krcp))
 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
 
 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -3746,11 +3754,8 @@  void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 	WRITE_ONCE(krcp->count, krcp->count + 1);
 
 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
-	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
-	    !krcp->monitor_todo) {
-		krcp->monitor_todo = true;
+	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
-	}
 
 unlock_return:
 	krc_this_cpu_unlock(krcp, flags);
@@ -3825,14 +3830,8 @@  void __init kfree_rcu_scheduler_running(void)
 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
 
 		raw_spin_lock_irqsave(&krcp->lock, flags);
-		if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
-				krcp->monitor_todo) {
-			raw_spin_unlock_irqrestore(&krcp->lock, flags);
-			continue;
-		}
-		krcp->monitor_todo = true;
-		schedule_delayed_work_on(cpu, &krcp->monitor_work,
-					 KFREE_DRAIN_JIFFIES);
+		if (need_offload_krc(krcp))
+			schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
 	}
 }