diff mbox series

[RFC,v1,12/14] rcu/kfree: remove useless monitor_todo flag

Message ID 20220512030442.2530552-13-joel@joelfernandes.org (mailing list archive)
State Superseded
Headers show
Series Implement call_rcu_lazy() and miscellaneous fixes | expand

Commit Message

Joel Fernandes May 12, 2022, 3:04 a.m. UTC
monitor_todo is not needed as the work struct already tracks if work is
pending. Just use that to know if work is pending using
delayed_work_pending() helper.

Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
---
 kernel/rcu/tree.c | 22 +++++++---------------
 1 file changed, 7 insertions(+), 15 deletions(-)

Comments

Uladzislau Rezki May 13, 2022, 2:53 p.m. UTC | #1
> monitor_todo is not needed as the work struct already tracks if work is
> pending. Just use that to know if work is pending using
> delayed_work_pending() helper.
> 
> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> ---
>  kernel/rcu/tree.c | 22 +++++++---------------
>  1 file changed, 7 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 3baf29014f86..3828ac3bf1c4 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3155,7 +3155,6 @@ struct kfree_rcu_cpu_work {
>   * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
>   * @lock: Synchronize access to this structure
>   * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
>   * @initialized: The @rcu_work fields have been initialized
>   * @count: Number of objects for which GP not started
>   * @bkvcache:
> @@ -3180,7 +3179,6 @@ struct kfree_rcu_cpu {
>  	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
>  	raw_spinlock_t lock;
>  	struct delayed_work monitor_work;
> -	bool monitor_todo;
>  	bool initialized;
>  	int count;
>  
> @@ -3416,9 +3414,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
>  	// of the channels that is still busy we should rearm the
>  	// work to repeat an attempt. Because previous batches are
>  	// still in progress.
> -	if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
> -		krcp->monitor_todo = false;
> -	else
> +	if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
>  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
>  
>  	raw_spin_unlock_irqrestore(&krcp->lock, flags);
> @@ -3607,10 +3603,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
>  
>  	// Set timer to drain after KFREE_DRAIN_JIFFIES.
>  	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
> -	    !krcp->monitor_todo) {
> -		krcp->monitor_todo = true;
> +	    !delayed_work_pending(&krcp->monitor_work))
>  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> -	}
>  
>  unlock_return:
>  	krc_this_cpu_unlock(krcp, flags);
> @@ -3685,14 +3679,12 @@ void __init kfree_rcu_scheduler_running(void)
>  		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
>  
>  		raw_spin_lock_irqsave(&krcp->lock, flags);
> -		if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
> -				krcp->monitor_todo) {
> -			raw_spin_unlock_irqrestore(&krcp->lock, flags);
> -			continue;
> +		if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) {
> +			if (delayed_work_pending(&krcp->monitor_work)) {
> +				schedule_delayed_work_on(cpu, &krcp->monitor_work,
> +						KFREE_DRAIN_JIFFIES);
> +			}
>  		}
> -		krcp->monitor_todo = true;
> -		schedule_delayed_work_on(cpu, &krcp->monitor_work,
> -					 KFREE_DRAIN_JIFFIES);
>  		raw_spin_unlock_irqrestore(&krcp->lock, flags);
>  	}
>  }
> -- 
>
Looks good to me from the first glance, but let me know to have a look
at it more closely.

--
Uladzislau Rezki
Joel Fernandes May 14, 2022, 2:35 p.m. UTC | #2
On Fri, May 13, 2022 at 04:53:05PM +0200, Uladzislau Rezki wrote:
> > monitor_todo is not needed as the work struct already tracks if work is
> > pending. Just use that to know if work is pending using
> > delayed_work_pending() helper.
> > 
> > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > ---
> >  kernel/rcu/tree.c | 22 +++++++---------------
> >  1 file changed, 7 insertions(+), 15 deletions(-)
> > 
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 3baf29014f86..3828ac3bf1c4 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -3155,7 +3155,6 @@ struct kfree_rcu_cpu_work {
> >   * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
> >   * @lock: Synchronize access to this structure
> >   * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> > - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
> >   * @initialized: The @rcu_work fields have been initialized
> >   * @count: Number of objects for which GP not started
> >   * @bkvcache:
> > @@ -3180,7 +3179,6 @@ struct kfree_rcu_cpu {
> >  	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
> >  	raw_spinlock_t lock;
> >  	struct delayed_work monitor_work;
> > -	bool monitor_todo;
> >  	bool initialized;
> >  	int count;
> >  
> > @@ -3416,9 +3414,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
> >  	// of the channels that is still busy we should rearm the
> >  	// work to repeat an attempt. Because previous batches are
> >  	// still in progress.
> > -	if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
> > -		krcp->monitor_todo = false;
> > -	else
> > +	if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
> >  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> >  
> >  	raw_spin_unlock_irqrestore(&krcp->lock, flags);
> > @@ -3607,10 +3603,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> >  
> >  	// Set timer to drain after KFREE_DRAIN_JIFFIES.
> >  	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
> > -	    !krcp->monitor_todo) {
> > -		krcp->monitor_todo = true;
> > +	    !delayed_work_pending(&krcp->monitor_work))
> >  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> > -	}
> >  
> >  unlock_return:
> >  	krc_this_cpu_unlock(krcp, flags);
> > @@ -3685,14 +3679,12 @@ void __init kfree_rcu_scheduler_running(void)
> >  		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
> >  
> >  		raw_spin_lock_irqsave(&krcp->lock, flags);
> > -		if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
> > -				krcp->monitor_todo) {
> > -			raw_spin_unlock_irqrestore(&krcp->lock, flags);
> > -			continue;
> > +		if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) {
> > +			if (delayed_work_pending(&krcp->monitor_work)) {
> > +				schedule_delayed_work_on(cpu, &krcp->monitor_work,
> > +						KFREE_DRAIN_JIFFIES);
> > +			}
> >  		}
> > -		krcp->monitor_todo = true;
> > -		schedule_delayed_work_on(cpu, &krcp->monitor_work,
> > -					 KFREE_DRAIN_JIFFIES);
> >  		raw_spin_unlock_irqrestore(&krcp->lock, flags);
> >  	}
> >  }
> > -- 
> >
> Looks good to me from the first glance, but let me know to have a look
> at it more closely.

Thanks, I appreciate it.

 - Joel
Uladzislau Rezki May 14, 2022, 7:48 p.m. UTC | #3
> On Fri, May 13, 2022 at 04:53:05PM +0200, Uladzislau Rezki wrote:
> > > monitor_todo is not needed as the work struct already tracks if work is
> > > pending. Just use that to know if work is pending using
> > > delayed_work_pending() helper.
> > > 
> > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > > ---
> > >  kernel/rcu/tree.c | 22 +++++++---------------
> > >  1 file changed, 7 insertions(+), 15 deletions(-)
> > > 
> > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > > index 3baf29014f86..3828ac3bf1c4 100644
> > > --- a/kernel/rcu/tree.c
> > > +++ b/kernel/rcu/tree.c
> > > @@ -3155,7 +3155,6 @@ struct kfree_rcu_cpu_work {
> > >   * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
> > >   * @lock: Synchronize access to this structure
> > >   * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> > > - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
> > >   * @initialized: The @rcu_work fields have been initialized
> > >   * @count: Number of objects for which GP not started
> > >   * @bkvcache:
> > > @@ -3180,7 +3179,6 @@ struct kfree_rcu_cpu {
> > >  	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
> > >  	raw_spinlock_t lock;
> > >  	struct delayed_work monitor_work;
> > > -	bool monitor_todo;
> > >  	bool initialized;
> > >  	int count;
> > >  
> > > @@ -3416,9 +3414,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
> > >  	// of the channels that is still busy we should rearm the
> > >  	// work to repeat an attempt. Because previous batches are
> > >  	// still in progress.
> > > -	if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
> > > -		krcp->monitor_todo = false;
> > > -	else
> > > +	if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
>
Can we place those three checks into separate inline function because
it is used in two places: krc_needs_offload()?

> > >  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> > >  
> > >  	raw_spin_unlock_irqrestore(&krcp->lock, flags);
> > > @@ -3607,10 +3603,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> > >  
> > >  	// Set timer to drain after KFREE_DRAIN_JIFFIES.
> > >  	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
> > > -	    !krcp->monitor_todo) {
> > > -		krcp->monitor_todo = true;
> > > +	    !delayed_work_pending(&krcp->monitor_work))
>
I think checking weather it is pending or not does not make much sense.
schedule_delayed_work() checks inside if the work can be queued.

> > >  		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> > > -	}
> > >  
> > >  unlock_return:
> > >  	krc_this_cpu_unlock(krcp, flags);
> > > @@ -3685,14 +3679,12 @@ void __init kfree_rcu_scheduler_running(void)
> > >  		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
> > >  
> > >  		raw_spin_lock_irqsave(&krcp->lock, flags);
> > > -		if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
> > > -				krcp->monitor_todo) {
> > > -			raw_spin_unlock_irqrestore(&krcp->lock, flags);
> > > -			continue;
> > > +		if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) {
Same here. Move to the separate function, IMHO makes sense.

> > > +			if (delayed_work_pending(&krcp->monitor_work)) {
Same here. Should we check it here?

> > > +				schedule_delayed_work_on(cpu, &krcp->monitor_work,
> > > +						KFREE_DRAIN_JIFFIES);
> > > +			}
> > >  		}
> > > -		krcp->monitor_todo = true;
> > > -		schedule_delayed_work_on(cpu, &krcp->monitor_work,
> > > -					 KFREE_DRAIN_JIFFIES);
> > >  		raw_spin_unlock_irqrestore(&krcp->lock, flags);
> > >  	}
> > >  }
> > > -- 
> > >
> > Looks good to me from the first glance, but let me know to have a look
> > at it more closely.
> 
> Thanks, I appreciate it.
> 
One change in design after this patch is a drain work can be queued even
though there is already nothing to drain. I do not find it as a big issue
because it will just bail out. So i tend to simplification.

The monitor_todo guarantees that kvfree_rcu() caller will not schedule
any work until "monitor work" completes its job and if there is still to
do something there it rearms itself.

--
Uladzsialu Rezki
diff mbox series

Patch

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3baf29014f86..3828ac3bf1c4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3155,7 +3155,6 @@  struct kfree_rcu_cpu_work {
  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
  * @lock: Synchronize access to this structure
  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
- * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
  * @initialized: The @rcu_work fields have been initialized
  * @count: Number of objects for which GP not started
  * @bkvcache:
@@ -3180,7 +3179,6 @@  struct kfree_rcu_cpu {
 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
 	raw_spinlock_t lock;
 	struct delayed_work monitor_work;
-	bool monitor_todo;
 	bool initialized;
 	int count;
 
@@ -3416,9 +3414,7 @@  static void kfree_rcu_monitor(struct work_struct *work)
 	// of the channels that is still busy we should rearm the
 	// work to repeat an attempt. Because previous batches are
 	// still in progress.
-	if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
-		krcp->monitor_todo = false;
-	else
+	if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
 
 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -3607,10 +3603,8 @@  void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 
 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
-	    !krcp->monitor_todo) {
-		krcp->monitor_todo = true;
+	    !delayed_work_pending(&krcp->monitor_work))
 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
-	}
 
 unlock_return:
 	krc_this_cpu_unlock(krcp, flags);
@@ -3685,14 +3679,12 @@  void __init kfree_rcu_scheduler_running(void)
 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
 
 		raw_spin_lock_irqsave(&krcp->lock, flags);
-		if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
-				krcp->monitor_todo) {
-			raw_spin_unlock_irqrestore(&krcp->lock, flags);
-			continue;
+		if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) {
+			if (delayed_work_pending(&krcp->monitor_work)) {
+				schedule_delayed_work_on(cpu, &krcp->monitor_work,
+						KFREE_DRAIN_JIFFIES);
+			}
 		}
-		krcp->monitor_todo = true;
-		schedule_delayed_work_on(cpu, &krcp->monitor_work,
-					 KFREE_DRAIN_JIFFIES);
 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
 	}
 }