diff mbox series

[2/2] rcu/kvfree: Introduce KFREE_DRAIN_JIFFIES_[MAX/MIN] interval

Message ID 20220602080644.432156-2-urezki@gmail.com (mailing list archive)
State New, archived
Headers show
Series [1/2] rcu/kvfree: Remove useless monitor_todo flag | expand

Commit Message

Uladzislau Rezki June 2, 2022, 8:06 a.m. UTC
Currently the monitor work is scheduled with a fixed interval that
is HZ/20 or each 50 milliseconds. The drawback of such approach is
a low utilization of page slot in some scenarios. The page can store
up to 512 records. For example on Android system it can look like:

<snip>
  kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
  kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
  kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
  kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
  kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
  kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
  kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
  kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
  kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
  kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
  kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
  kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
  kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
  kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
  kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
<snip>

where a page only carries few records to reclaim a memory. In order to
improve batching and make utilization more efficient the patch introduces
a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
case a memory reclaim occurs more often whereas in mostly idle cases the
interval is set to its maximum timeout that improves the utilization of
page slots.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 kernel/rcu/tree.c | 29 +++++++++++++++++++++++++----
 1 file changed, 25 insertions(+), 4 deletions(-)

Comments

Joel Fernandes June 2, 2022, 11:32 p.m. UTC | #1
On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> Currently the monitor work is scheduled with a fixed interval that
> is HZ/20 or each 50 milliseconds. The drawback of such approach is
> a low utilization of page slot in some scenarios. The page can store
> up to 512 records. For example on Android system it can look like:
> 
> <snip>
>   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
>   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
>   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
>   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
>   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
>   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
>   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
>   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
>   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
>   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
>   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
>   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
>   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
>   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
>   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> <snip>
> 
> where a page only carries few records to reclaim a memory. In order to
> improve batching and make utilization more efficient the patch introduces
> a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> case a memory reclaim occurs more often whereas in mostly idle cases the
> interval is set to its maximum timeout that improves the utilization of
> page slots.
> 
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>

thanks,

 - Joel



> ---
>  kernel/rcu/tree.c | 29 +++++++++++++++++++++++++----
>  1 file changed, 25 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index fd16c0b46d9e..c02a64995b85 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3249,7 +3249,8 @@ EXPORT_SYMBOL_GPL(call_rcu);
>  
>  
>  /* Maximum number of jiffies to wait before draining a batch. */
> -#define KFREE_DRAIN_JIFFIES (HZ / 50)
> +#define KFREE_DRAIN_JIFFIES_MAX (HZ)
> +#define KFREE_DRAIN_JIFFIES_MIN (HZ / 50)
>  #define KFREE_N_BATCHES 2
>  #define FREE_N_CHANNELS 2
>  
> @@ -3510,6 +3511,26 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
>  	return !!krcp->head;
>  }
>  
> +static void
> +schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
> +{
> +	long delay, delay_left;
> +
> +	delay = READ_ONCE(krcp->count) >= KVFREE_BULK_MAX_ENTR ?
> +		KFREE_DRAIN_JIFFIES_MIN:KFREE_DRAIN_JIFFIES_MAX;
> +
> +	if (delayed_work_pending(&krcp->monitor_work)) {
> +		delay_left = krcp->monitor_work.timer.expires - jiffies;
> +
> +		if (delay < delay_left)
> +			mod_delayed_work(system_wq, &krcp->monitor_work, delay);
> +
> +		return;
> +	}
> +
> +	queue_delayed_work(system_wq, &krcp->monitor_work, delay);
> +}
> +
>  /*
>   * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
>   */
> @@ -3567,7 +3588,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
>  	// work to repeat an attempt. Because previous batches are
>  	// still in progress.
>  	if (need_offload_krc(krcp))
> -		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> +		schedule_delayed_monitor_work(krcp);
>  
>  	raw_spin_unlock_irqrestore(&krcp->lock, flags);
>  }
> @@ -3755,7 +3776,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
>  
>  	// Set timer to drain after KFREE_DRAIN_JIFFIES.
>  	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
> -		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> +		schedule_delayed_monitor_work(krcp);
>  
>  unlock_return:
>  	krc_this_cpu_unlock(krcp, flags);
> @@ -3831,7 +3852,7 @@ void __init kfree_rcu_scheduler_running(void)
>  
>  		raw_spin_lock_irqsave(&krcp->lock, flags);
>  		if (need_offload_krc(krcp))
> -			schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> +			schedule_delayed_monitor_work(krcp);
>  		raw_spin_unlock_irqrestore(&krcp->lock, flags);
>  	}
>  }
> -- 
> 2.30.2
>
Uladzislau Rezki June 3, 2022, 9:55 a.m. UTC | #2
On Thu, Jun 02, 2022 at 11:32:23PM +0000, Joel Fernandes wrote:
> On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > Currently the monitor work is scheduled with a fixed interval that
> > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > a low utilization of page slot in some scenarios. The page can store
> > up to 512 records. For example on Android system it can look like:
> > 
> > <snip>
> >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > <snip>
> > 
> > where a page only carries few records to reclaim a memory. In order to
> > improve batching and make utilization more efficient the patch introduces
> > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > case a memory reclaim occurs more often whereas in mostly idle cases the
> > interval is set to its maximum timeout that improves the utilization of
> > page slots.
> > 
> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> 
> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> 
Thanks!

This patch makes the interval hard-coded in some sense so you can not change
it in runtime, only recompilation. If there is a need or request we can make
both as module_param().

If we are to do that we can just add one extra patch on top of it.

--
Uladzislau Rezki
Joel Fernandes June 4, 2022, 3:03 a.m. UTC | #3
On Fri, Jun 03, 2022 at 11:55:10AM +0200, Uladzislau Rezki wrote:
> On Thu, Jun 02, 2022 at 11:32:23PM +0000, Joel Fernandes wrote:
> > On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > > Currently the monitor work is scheduled with a fixed interval that
> > > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > > a low utilization of page slot in some scenarios. The page can store
> > > up to 512 records. For example on Android system it can look like:
> > > 
> > > <snip>
> > >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> > >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> > >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> > >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> > >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> > >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> > >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> > >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> > >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> > >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> > >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > > <snip>
> > > 
> > > where a page only carries few records to reclaim a memory. In order to
> > > improve batching and make utilization more efficient the patch introduces
> > > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > > case a memory reclaim occurs more often whereas in mostly idle cases the
> > > interval is set to its maximum timeout that improves the utilization of
> > > page slots.
> > > 
> > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > 
> > Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > 
> Thanks!
> 
> This patch makes the interval hard-coded in some sense so you can not change
> it in runtime, only recompilation. If there is a need or request we can make
> both as module_param().

Yes, this seems a good first step.

> If we are to do that we can just add one extra patch on top of it.

Yes.

thanks,

 - Joel
Paul E. McKenney June 4, 2022, 3:51 p.m. UTC | #4
On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> Currently the monitor work is scheduled with a fixed interval that
> is HZ/20 or each 50 milliseconds. The drawback of such approach is
> a low utilization of page slot in some scenarios. The page can store
> up to 512 records. For example on Android system it can look like:
> 
> <snip>
>   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
>   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
>   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
>   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
>   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
>   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
>   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
>   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
>   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
>   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
>   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
>   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
>   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
>   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
>   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> <snip>
> 
> where a page only carries few records to reclaim a memory. In order to
> improve batching and make utilization more efficient the patch introduces
> a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> case a memory reclaim occurs more often whereas in mostly idle cases the
> interval is set to its maximum timeout that improves the utilization of
> page slots.
> 
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

That does look like a problem well worth solving!

But I am missing one thing.  If we are having a callback flood, why do we
need a shorter timeout?  Wouldn't a check on the number of blocks queued
be simpler, more direct, and provide faster response to the start of a
callback flood?

							Thanx, Paul

> ---
>  kernel/rcu/tree.c | 29 +++++++++++++++++++++++++----
>  1 file changed, 25 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index fd16c0b46d9e..c02a64995b85 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3249,7 +3249,8 @@ EXPORT_SYMBOL_GPL(call_rcu);
>  
>  
>  /* Maximum number of jiffies to wait before draining a batch. */
> -#define KFREE_DRAIN_JIFFIES (HZ / 50)
> +#define KFREE_DRAIN_JIFFIES_MAX (HZ)
> +#define KFREE_DRAIN_JIFFIES_MIN (HZ / 50)
>  #define KFREE_N_BATCHES 2
>  #define FREE_N_CHANNELS 2
>  
> @@ -3510,6 +3511,26 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
>  	return !!krcp->head;
>  }
>  
> +static void
> +schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
> +{
> +	long delay, delay_left;
> +
> +	delay = READ_ONCE(krcp->count) >= KVFREE_BULK_MAX_ENTR ?
> +		KFREE_DRAIN_JIFFIES_MIN:KFREE_DRAIN_JIFFIES_MAX;
> +
> +	if (delayed_work_pending(&krcp->monitor_work)) {
> +		delay_left = krcp->monitor_work.timer.expires - jiffies;
> +
> +		if (delay < delay_left)
> +			mod_delayed_work(system_wq, &krcp->monitor_work, delay);
> +
> +		return;
> +	}
> +
> +	queue_delayed_work(system_wq, &krcp->monitor_work, delay);
> +}
> +
>  /*
>   * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
>   */
> @@ -3567,7 +3588,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
>  	// work to repeat an attempt. Because previous batches are
>  	// still in progress.
>  	if (need_offload_krc(krcp))
> -		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> +		schedule_delayed_monitor_work(krcp);
>  
>  	raw_spin_unlock_irqrestore(&krcp->lock, flags);
>  }
> @@ -3755,7 +3776,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
>  
>  	// Set timer to drain after KFREE_DRAIN_JIFFIES.
>  	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
> -		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> +		schedule_delayed_monitor_work(krcp);
>  
>  unlock_return:
>  	krc_this_cpu_unlock(krcp, flags);
> @@ -3831,7 +3852,7 @@ void __init kfree_rcu_scheduler_running(void)
>  
>  		raw_spin_lock_irqsave(&krcp->lock, flags);
>  		if (need_offload_krc(krcp))
> -			schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> +			schedule_delayed_monitor_work(krcp);
>  		raw_spin_unlock_irqrestore(&krcp->lock, flags);
>  	}
>  }
> -- 
> 2.30.2
>
Uladzislau Rezki June 5, 2022, 9:10 a.m. UTC | #5
> On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > Currently the monitor work is scheduled with a fixed interval that
> > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > a low utilization of page slot in some scenarios. The page can store
> > up to 512 records. For example on Android system it can look like:
> > 
> > <snip>
> >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > <snip>
> > 
> > where a page only carries few records to reclaim a memory. In order to
> > improve batching and make utilization more efficient the patch introduces
> > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > case a memory reclaim occurs more often whereas in mostly idle cases the
> > interval is set to its maximum timeout that improves the utilization of
> > page slots.
> > 
> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> 
> That does look like a problem well worth solving!
>
Agree, better ideas make better final solution :)

> 
> But I am missing one thing. If we are having a callback flood, why do we
> need a shorter timeout?
>
To offload faster, because otherwise we run into classical issue, it is a low
memory condition state resulting in OOM.

>
> Wouldn't a check on the number of blocks queued be simpler, more direct,
> and provide faster response to the start of a callback flood?
>
I rely on krcp->count because not always we can store the pointer in the page
slots. We can not allocate a page in the caller context thus we use page-cache
worker that fills the cache in normal context. While it populates the cache, 
pointers temporary are queued to the linked-list.

Any thoughts?

--
Uladzislau Rezki
Paul E. McKenney June 7, 2022, 3:47 a.m. UTC | #6
On Sun, Jun 05, 2022 at 11:10:31AM +0200, Uladzislau Rezki wrote:
> > On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > > Currently the monitor work is scheduled with a fixed interval that
> > > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > > a low utilization of page slot in some scenarios. The page can store
> > > up to 512 records. For example on Android system it can look like:
> > > 
> > > <snip>
> > >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> > >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> > >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> > >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> > >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> > >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> > >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> > >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> > >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> > >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> > >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > > <snip>
> > > 
> > > where a page only carries few records to reclaim a memory. In order to
> > > improve batching and make utilization more efficient the patch introduces
> > > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > > case a memory reclaim occurs more often whereas in mostly idle cases the
> > > interval is set to its maximum timeout that improves the utilization of
> > > page slots.
> > > 
> > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > 
> > That does look like a problem well worth solving!
> >
> Agree, better ideas make better final solution :)
> 
> > 
> > But I am missing one thing. If we are having a callback flood, why do we
> > need a shorter timeout?
> >
> To offload faster, because otherwise we run into classical issue, it is a low
> memory condition state resulting in OOM.

But doesn't each callback queued during the flood give us an opportunity
to react to the flood?  That will be way more fine-grained than any
reasonable timer, right?  Or am I missing something?

I do agree that the action would often need to be indirect to avoid the
memory-allocation-state hassles, but we already can do that, either via
an extremely short-term hrtimer or something like irq-work.

> > Wouldn't a check on the number of blocks queued be simpler, more direct,
> > and provide faster response to the start of a callback flood?
> >
> I rely on krcp->count because not always we can store the pointer in the page
> slots. We can not allocate a page in the caller context thus we use page-cache
> worker that fills the cache in normal context. While it populates the cache, 
> pointers temporary are queued to the linked-list.
> 
> Any thoughts?

There are a great many ways to approach this.  One of them is to maintain
a per-CPU free-running counter of kvfree_rcu() calls, and to reset this
counter each jiffy.

Or am I missing a trick here?

							Thanx, Paul
Uladzislau Rezki June 9, 2022, 1:10 p.m. UTC | #7
On Tue, Jun 7, 2022 at 5:47 AM Paul E. McKenney <paulmck@kernel.org> wrote:
>
> On Sun, Jun 05, 2022 at 11:10:31AM +0200, Uladzislau Rezki wrote:
> > > On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > > > Currently the monitor work is scheduled with a fixed interval that
> > > > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > > > a low utilization of page slot in some scenarios. The page can store
> > > > up to 512 records. For example on Android system it can look like:
> > > >
> > > > <snip>
> > > >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> > > >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> > > >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> > > >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> > > >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> > > >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> > > >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> > > >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> > > >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> > > >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> > > >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > > > <snip>
> > > >
> > > > where a page only carries few records to reclaim a memory. In order to
> > > > improve batching and make utilization more efficient the patch introduces
> > > > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > > > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > > > case a memory reclaim occurs more often whereas in mostly idle cases the
> > > > interval is set to its maximum timeout that improves the utilization of
> > > > page slots.
> > > >
> > > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > >
> > > That does look like a problem well worth solving!
> > >
> > Agree, better ideas make better final solution :)
> >
> > >
> > > But I am missing one thing. If we are having a callback flood, why do we
> > > need a shorter timeout?
> > >
> > To offload faster, because otherwise we run into classical issue, it is a low
> > memory condition state resulting in OOM.
>
> But doesn't each callback queued during the flood give us an opportunity
> to react to the flood?  That will be way more fine-grained than any
> reasonable timer, right?  Or am I missing something?
>
We can set the timer to zero or to current "jiffies" to initiate the
offloading if the
page is full. In that sense probably it make sense to propagate those two attr.
to user space, so the user can configure min/max drain interval.

Or we can only deal with fixed interval exposed via sysfs to control it by user.
In that case we can get rid of MIN one and just trigger a timer if the page is
full. I think this approach is better.

>
> I do agree that the action would often need to be indirect to avoid the
> memory-allocation-state hassles, but we already can do that, either via
> an extremely short-term hrtimer or something like irq-work.
>
> > > Wouldn't a check on the number of blocks queued be simpler, more direct,
> > > and provide faster response to the start of a callback flood?
> > >
> > I rely on krcp->count because not always we can store the pointer in the page
> > slots. We can not allocate a page in the caller context thus we use page-cache
> > worker that fills the cache in normal context. While it populates the cache,
> > pointers temporary are queued to the linked-list.
> >
> > Any thoughts?
>
> There are a great many ways to approach this.  One of them is to maintain
> a per-CPU free-running counter of kvfree_rcu() calls, and to reset this
> counter each jiffy.
>
> Or am I missing a trick here?
>
Do you mean to have a per-cpu timer that checks the per-cpu-freed counter
and schedule the work when if it is needed? Or i have missed your point?
Joel Fernandes June 10, 2022, 4:45 p.m. UTC | #8
Hi Vlad, Paul,

On Thu, Jun 09, 2022 at 03:10:57PM +0200, Uladzislau Rezki wrote:
> On Tue, Jun 7, 2022 at 5:47 AM Paul E. McKenney <paulmck@kernel.org> wrote:
> >
> > On Sun, Jun 05, 2022 at 11:10:31AM +0200, Uladzislau Rezki wrote:
> > > > On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > > > > Currently the monitor work is scheduled with a fixed interval that
> > > > > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > > > > a low utilization of page slot in some scenarios. The page can store
> > > > > up to 512 records. For example on Android system it can look like:
> > > > >
> > > > > <snip>
> > > > >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> > > > >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> > > > >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> > > > >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> > > > >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> > > > >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> > > > >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> > > > >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> > > > >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> > > > >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> > > > >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > > > > <snip>
> > > > >
> > > > > where a page only carries few records to reclaim a memory. In order to
> > > > > improve batching and make utilization more efficient the patch introduces
> > > > > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > > > > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > > > > case a memory reclaim occurs more often whereas in mostly idle cases the
> > > > > interval is set to its maximum timeout that improves the utilization of
> > > > > page slots.
> > > > >
> > > > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > > >
> > > > That does look like a problem well worth solving!
> > > >
> > > Agree, better ideas make better final solution :)
> > >
> > > >
> > > > But I am missing one thing. If we are having a callback flood, why do we
> > > > need a shorter timeout?
> > > >
> > > To offload faster, because otherwise we run into classical issue, it is a low
> > > memory condition state resulting in OOM.
> >
> > But doesn't each callback queued during the flood give us an opportunity
> > to react to the flood?  That will be way more fine-grained than any
> > reasonable timer, right?  Or am I missing something?
> >
> We can set the timer to zero or to current "jiffies" to initiate the
> offloading if the
> page is full. In that sense probably it make sense to propagate those two attr.
> to user space, so the user can configure min/max drain interval.
> 
> Or we can only deal with fixed interval exposed via sysfs to control it by user.
> In that case we can get rid of MIN one and just trigger a timer if the page is
> full. I think this approach is better.

Yes I also think triggering timer with zero-timeout is better. Can you (Vlad)
accomplish that by just calling the timer callback inline, instead of queuing
a timer? I imagine you would just do queue_work() instead of
queue_delayed_work() in this scenario.

> > I do agree that the action would often need to be indirect to avoid the
> > memory-allocation-state hassles, but we already can do that, either via
> > an extremely short-term hrtimer or something like irq-work.
> >
> > > > Wouldn't a check on the number of blocks queued be simpler, more direct,
> > > > and provide faster response to the start of a callback flood?
> > > >
> > > I rely on krcp->count because not always we can store the pointer in the page
> > > slots. We can not allocate a page in the caller context thus we use page-cache
> > > worker that fills the cache in normal context. While it populates the cache,
> > > pointers temporary are queued to the linked-list.
> > >
> > > Any thoughts?
> >
> > There are a great many ways to approach this.  One of them is to maintain
> > a per-CPU free-running counter of kvfree_rcu() calls, and to reset this
> > counter each jiffy.
> >
> > Or am I missing a trick here?
> >
> Do you mean to have a per-cpu timer that checks the per-cpu-freed counter
> and schedule the work when if it is needed? Or i have missed your point?

I think he (Paul) is describing the way 'flood detection' can work similar to how the
bypass list code is implemented. There he maintains a count which only if
exceeds a limit, will queue on to the bypass list.

This code:

        // If we have advanced to a new jiffy, reset counts to allow
        // moving back from ->nocb_bypass to ->cblist.
        if (j == rdp->nocb_nobypass_last) {
                c = rdp->nocb_nobypass_count + 1;
        } else {
                WRITE_ONCE(rdp->nocb_nobypass_last, j);
                c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
                if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
                                 nocb_nobypass_lim_per_jiffy))
                        c = 0;
                else if (c > nocb_nobypass_lim_per_jiffy)
                        c = nocb_nobypass_lim_per_jiffy;
        }
        WRITE_ONCE(rdp->nocb_nobypass_count, c);


Your (Vlad's) approach OTOH is also fine to me, you check if page is full and
make that as a 'flood is happening' detector.

thanks,

 - Joel
Uladzislau Rezki June 13, 2022, 9:47 a.m. UTC | #9
Hello, Joel, Paul.

> Hi Vlad, Paul,
> 
> On Thu, Jun 09, 2022 at 03:10:57PM +0200, Uladzislau Rezki wrote:
> > On Tue, Jun 7, 2022 at 5:47 AM Paul E. McKenney <paulmck@kernel.org> wrote:
> > >
> > > On Sun, Jun 05, 2022 at 11:10:31AM +0200, Uladzislau Rezki wrote:
> > > > > On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > > > > > Currently the monitor work is scheduled with a fixed interval that
> > > > > > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > > > > > a low utilization of page slot in some scenarios. The page can store
> > > > > > up to 512 records. For example on Android system it can look like:
> > > > > >
> > > > > > <snip>
> > > > > >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> > > > > >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> > > > > >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> > > > > >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> > > > > >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> > > > > >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> > > > > >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> > > > > >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> > > > > >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> > > > > >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> > > > > >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > > > > > <snip>
> > > > > >
> > > > > > where a page only carries few records to reclaim a memory. In order to
> > > > > > improve batching and make utilization more efficient the patch introduces
> > > > > > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > > > > > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > > > > > case a memory reclaim occurs more often whereas in mostly idle cases the
> > > > > > interval is set to its maximum timeout that improves the utilization of
> > > > > > page slots.
> > > > > >
> > > > > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > > > >
> > > > > That does look like a problem well worth solving!
> > > > >
> > > > Agree, better ideas make better final solution :)
> > > >
> > > > >
> > > > > But I am missing one thing. If we are having a callback flood, why do we
> > > > > need a shorter timeout?
> > > > >
> > > > To offload faster, because otherwise we run into classical issue, it is a low
> > > > memory condition state resulting in OOM.
> > >
> > > But doesn't each callback queued during the flood give us an opportunity
> > > to react to the flood?  That will be way more fine-grained than any
> > > reasonable timer, right?  Or am I missing something?
> > >
> > We can set the timer to zero or to current "jiffies" to initiate the
> > offloading if the
> > page is full. In that sense probably it make sense to propagate those two attr.
> > to user space, so the user can configure min/max drain interval.
> > 
> > Or we can only deal with fixed interval exposed via sysfs to control it by user.
> > In that case we can get rid of MIN one and just trigger a timer if the page is
> > full. I think this approach is better.
> 
> Yes I also think triggering timer with zero-timeout is better. Can you (Vlad)
> accomplish that by just calling the timer callback inline, instead of queuing
> a timer? I imagine you would just do queue_work() instead of
> queue_delayed_work() in this scenario.
> 
> > > I do agree that the action would often need to be indirect to avoid the
> > > memory-allocation-state hassles, but we already can do that, either via
> > > an extremely short-term hrtimer or something like irq-work.
> > >
> > > > > Wouldn't a check on the number of blocks queued be simpler, more direct,
> > > > > and provide faster response to the start of a callback flood?
> > > > >
> > > > I rely on krcp->count because not always we can store the pointer in the page
> > > > slots. We can not allocate a page in the caller context thus we use page-cache
> > > > worker that fills the cache in normal context. While it populates the cache,
> > > > pointers temporary are queued to the linked-list.
> > > >
> > > > Any thoughts?
> > >
> > > There are a great many ways to approach this.  One of them is to maintain
> > > a per-CPU free-running counter of kvfree_rcu() calls, and to reset this
> > > counter each jiffy.
> > >
> > > Or am I missing a trick here?
> > >
> > Do you mean to have a per-cpu timer that checks the per-cpu-freed counter
> > and schedule the work when if it is needed? Or i have missed your point?
> 
> I think he (Paul) is describing the way 'flood detection' can work similar to how the
> bypass list code is implemented. There he maintains a count which only if
> exceeds a limit, will queue on to the bypass list.
> 
OK, i see that. We also do similar thing. We say it is a flood - when a
page becomes full, so it is kind of threshold that we pass.

> This code:
> 
>         // If we have advanced to a new jiffy, reset counts to allow
>         // moving back from ->nocb_bypass to ->cblist.
>         if (j == rdp->nocb_nobypass_last) {
>                 c = rdp->nocb_nobypass_count + 1;
>         } else {
>                 WRITE_ONCE(rdp->nocb_nobypass_last, j);
>                 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
>                 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
>                                  nocb_nobypass_lim_per_jiffy))
>                         c = 0;
>                 else if (c > nocb_nobypass_lim_per_jiffy)
>                         c = nocb_nobypass_lim_per_jiffy;
>         }
>         WRITE_ONCE(rdp->nocb_nobypass_count, c);
> 
> 
> Your (Vlad's) approach OTOH is also fine to me, you check if page is full and
> make that as a 'flood is happening' detector.
> 
OK, thank you Joel. I also think, that way we improve batching and utilization
of the page what is actually an intention of the patch in question.

--
Uladzislau Rezki
Uladzislau Rezki June 14, 2022, 6:42 a.m. UTC | #10
> Hello, Joel, Paul.
> 
> > Hi Vlad, Paul,
> > 
> > On Thu, Jun 09, 2022 at 03:10:57PM +0200, Uladzislau Rezki wrote:
> > > On Tue, Jun 7, 2022 at 5:47 AM Paul E. McKenney <paulmck@kernel.org> wrote:
> > > >
> > > > On Sun, Jun 05, 2022 at 11:10:31AM +0200, Uladzislau Rezki wrote:
> > > > > > On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > > > > > > Currently the monitor work is scheduled with a fixed interval that
> > > > > > > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > > > > > > a low utilization of page slot in some scenarios. The page can store
> > > > > > > up to 512 records. For example on Android system it can look like:
> > > > > > >
> > > > > > > <snip>
> > > > > > >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> > > > > > >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > > >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > > >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> > > > > > >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> > > > > > >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> > > > > > >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > > >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > > >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> > > > > > >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> > > > > > >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> > > > > > >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> > > > > > >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> > > > > > >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> > > > > > >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > > > > > > <snip>
> > > > > > >
> > > > > > > where a page only carries few records to reclaim a memory. In order to
> > > > > > > improve batching and make utilization more efficient the patch introduces
> > > > > > > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > > > > > > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > > > > > > case a memory reclaim occurs more often whereas in mostly idle cases the
> > > > > > > interval is set to its maximum timeout that improves the utilization of
> > > > > > > page slots.
> > > > > > >
> > > > > > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > > > > >
> > > > > > That does look like a problem well worth solving!
> > > > > >
> > > > > Agree, better ideas make better final solution :)
> > > > >
> > > > > >
> > > > > > But I am missing one thing. If we are having a callback flood, why do we
> > > > > > need a shorter timeout?
> > > > > >
> > > > > To offload faster, because otherwise we run into classical issue, it is a low
> > > > > memory condition state resulting in OOM.
> > > >
> > > > But doesn't each callback queued during the flood give us an opportunity
> > > > to react to the flood?  That will be way more fine-grained than any
> > > > reasonable timer, right?  Or am I missing something?
> > > >
> > > We can set the timer to zero or to current "jiffies" to initiate the
> > > offloading if the
> > > page is full. In that sense probably it make sense to propagate those two attr.
> > > to user space, so the user can configure min/max drain interval.
> > > 
> > > Or we can only deal with fixed interval exposed via sysfs to control it by user.
> > > In that case we can get rid of MIN one and just trigger a timer if the page is
> > > full. I think this approach is better.
> > 
> > Yes I also think triggering timer with zero-timeout is better. Can you (Vlad)
> > accomplish that by just calling the timer callback inline, instead of queuing
> > a timer? I imagine you would just do queue_work() instead of
> > queue_delayed_work() in this scenario.
> > 
> > > > I do agree that the action would often need to be indirect to avoid the
> > > > memory-allocation-state hassles, but we already can do that, either via
> > > > an extremely short-term hrtimer or something like irq-work.
> > > >
> > > > > > Wouldn't a check on the number of blocks queued be simpler, more direct,
> > > > > > and provide faster response to the start of a callback flood?
> > > > > >
> > > > > I rely on krcp->count because not always we can store the pointer in the page
> > > > > slots. We can not allocate a page in the caller context thus we use page-cache
> > > > > worker that fills the cache in normal context. While it populates the cache,
> > > > > pointers temporary are queued to the linked-list.
> > > > >
> > > > > Any thoughts?
> > > >
> > > > There are a great many ways to approach this.  One of them is to maintain
> > > > a per-CPU free-running counter of kvfree_rcu() calls, and to reset this
> > > > counter each jiffy.
> > > >
> > > > Or am I missing a trick here?
> > > >
> > > Do you mean to have a per-cpu timer that checks the per-cpu-freed counter
> > > and schedule the work when if it is needed? Or i have missed your point?
> > 
> > I think he (Paul) is describing the way 'flood detection' can work similar to how the
> > bypass list code is implemented. There he maintains a count which only if
> > exceeds a limit, will queue on to the bypass list.
> > 
> OK, i see that. We also do similar thing. We say it is a flood - when a
> page becomes full, so it is kind of threshold that we pass.
> 
> > This code:
> > 
> >         // If we have advanced to a new jiffy, reset counts to allow
> >         // moving back from ->nocb_bypass to ->cblist.
> >         if (j == rdp->nocb_nobypass_last) {
> >                 c = rdp->nocb_nobypass_count + 1;
> >         } else {
> >                 WRITE_ONCE(rdp->nocb_nobypass_last, j);
> >                 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
> >                 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
> >                                  nocb_nobypass_lim_per_jiffy))
> >                         c = 0;
> >                 else if (c > nocb_nobypass_lim_per_jiffy)
> >                         c = nocb_nobypass_lim_per_jiffy;
> >         }
> >         WRITE_ONCE(rdp->nocb_nobypass_count, c);
> > 
> > 
> > Your (Vlad's) approach OTOH is also fine to me, you check if page is full and
> > make that as a 'flood is happening' detector.
> > 
> OK, thank you Joel. I also think, that way we improve batching and utilization
> of the page what is actually an intention of the patch in question.
> 
Paul, will you pick this patch?

Thanks!

--
Uladzislau Rezki
Paul E. McKenney June 15, 2022, 5:12 a.m. UTC | #11
On Tue, Jun 14, 2022 at 08:42:00AM +0200, Uladzislau Rezki wrote:
> > Hello, Joel, Paul.
> > 
> > > Hi Vlad, Paul,
> > > 
> > > On Thu, Jun 09, 2022 at 03:10:57PM +0200, Uladzislau Rezki wrote:
> > > > On Tue, Jun 7, 2022 at 5:47 AM Paul E. McKenney <paulmck@kernel.org> wrote:
> > > > >
> > > > > On Sun, Jun 05, 2022 at 11:10:31AM +0200, Uladzislau Rezki wrote:
> > > > > > > On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > > > > > > > Currently the monitor work is scheduled with a fixed interval that
> > > > > > > > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > > > > > > > a low utilization of page slot in some scenarios. The page can store
> > > > > > > > up to 512 records. For example on Android system it can look like:
> > > > > > > >
> > > > > > > > <snip>
> > > > > > > >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> > > > > > > >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > > > >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > > > >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> > > > > > > >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> > > > > > > >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> > > > > > > >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > > > >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > > > >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> > > > > > > >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> > > > > > > >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> > > > > > > >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> > > > > > > >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> > > > > > > >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> > > > > > > >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > > > > > > > <snip>
> > > > > > > >
> > > > > > > > where a page only carries few records to reclaim a memory. In order to
> > > > > > > > improve batching and make utilization more efficient the patch introduces
> > > > > > > > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > > > > > > > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > > > > > > > case a memory reclaim occurs more often whereas in mostly idle cases the
> > > > > > > > interval is set to its maximum timeout that improves the utilization of
> > > > > > > > page slots.
> > > > > > > >
> > > > > > > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > > > > > >
> > > > > > > That does look like a problem well worth solving!
> > > > > > >
> > > > > > Agree, better ideas make better final solution :)
> > > > > >
> > > > > > >
> > > > > > > But I am missing one thing. If we are having a callback flood, why do we
> > > > > > > need a shorter timeout?
> > > > > > >
> > > > > > To offload faster, because otherwise we run into classical issue, it is a low
> > > > > > memory condition state resulting in OOM.
> > > > >
> > > > > But doesn't each callback queued during the flood give us an opportunity
> > > > > to react to the flood?  That will be way more fine-grained than any
> > > > > reasonable timer, right?  Or am I missing something?
> > > > >
> > > > We can set the timer to zero or to current "jiffies" to initiate the
> > > > offloading if the
> > > > page is full. In that sense probably it make sense to propagate those two attr.
> > > > to user space, so the user can configure min/max drain interval.
> > > > 
> > > > Or we can only deal with fixed interval exposed via sysfs to control it by user.
> > > > In that case we can get rid of MIN one and just trigger a timer if the page is
> > > > full. I think this approach is better.
> > > 
> > > Yes I also think triggering timer with zero-timeout is better. Can you (Vlad)
> > > accomplish that by just calling the timer callback inline, instead of queuing
> > > a timer? I imagine you would just do queue_work() instead of
> > > queue_delayed_work() in this scenario.
> > > 
> > > > > I do agree that the action would often need to be indirect to avoid the
> > > > > memory-allocation-state hassles, but we already can do that, either via
> > > > > an extremely short-term hrtimer or something like irq-work.
> > > > >
> > > > > > > Wouldn't a check on the number of blocks queued be simpler, more direct,
> > > > > > > and provide faster response to the start of a callback flood?
> > > > > > >
> > > > > > I rely on krcp->count because not always we can store the pointer in the page
> > > > > > slots. We can not allocate a page in the caller context thus we use page-cache
> > > > > > worker that fills the cache in normal context. While it populates the cache,
> > > > > > pointers temporary are queued to the linked-list.
> > > > > >
> > > > > > Any thoughts?
> > > > >
> > > > > There are a great many ways to approach this.  One of them is to maintain
> > > > > a per-CPU free-running counter of kvfree_rcu() calls, and to reset this
> > > > > counter each jiffy.
> > > > >
> > > > > Or am I missing a trick here?
> > > > >
> > > > Do you mean to have a per-cpu timer that checks the per-cpu-freed counter
> > > > and schedule the work when if it is needed? Or i have missed your point?
> > > 
> > > I think he (Paul) is describing the way 'flood detection' can work similar to how the
> > > bypass list code is implemented. There he maintains a count which only if
> > > exceeds a limit, will queue on to the bypass list.
> > > 
> > OK, i see that. We also do similar thing. We say it is a flood - when a
> > page becomes full, so it is kind of threshold that we pass.
> > 
> > > This code:
> > > 
> > >         // If we have advanced to a new jiffy, reset counts to allow
> > >         // moving back from ->nocb_bypass to ->cblist.
> > >         if (j == rdp->nocb_nobypass_last) {
> > >                 c = rdp->nocb_nobypass_count + 1;
> > >         } else {
> > >                 WRITE_ONCE(rdp->nocb_nobypass_last, j);
> > >                 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
> > >                 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
> > >                                  nocb_nobypass_lim_per_jiffy))
> > >                         c = 0;
> > >                 else if (c > nocb_nobypass_lim_per_jiffy)
> > >                         c = nocb_nobypass_lim_per_jiffy;
> > >         }
> > >         WRITE_ONCE(rdp->nocb_nobypass_count, c);
> > > 
> > > 
> > > Your (Vlad's) approach OTOH is also fine to me, you check if page is full and
> > > make that as a 'flood is happening' detector.
> > > 
> > OK, thank you Joel. I also think, that way we improve batching and utilization
> > of the page what is actually an intention of the patch in question.
> > 
> Paul, will you pick this patch?

I did pick up the first one:

16224f4cdf03 ("rcu/kvfree: Remove useless monitor_todo flag")

On the second one, if you use page-fill as your flood detector, can't
you simplify things by just using the one longer timeout, as discussed
in this thread?

Or did I miss a turn somewhere?

							Thanx, Paul

> Thanks!
> 
> --
> Uladzislau Rezki
Uladzislau Rezki June 15, 2022, 7:32 a.m. UTC | #12
> On Tue, Jun 14, 2022 at 08:42:00AM +0200, Uladzislau Rezki wrote:
> > > Hello, Joel, Paul.
> > > 
> > > > Hi Vlad, Paul,
> > > > 
> > > > On Thu, Jun 09, 2022 at 03:10:57PM +0200, Uladzislau Rezki wrote:
> > > > > On Tue, Jun 7, 2022 at 5:47 AM Paul E. McKenney <paulmck@kernel.org> wrote:
> > > > > >
> > > > > > On Sun, Jun 05, 2022 at 11:10:31AM +0200, Uladzislau Rezki wrote:
> > > > > > > > On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > > > > > > > > Currently the monitor work is scheduled with a fixed interval that
> > > > > > > > > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > > > > > > > > a low utilization of page slot in some scenarios. The page can store
> > > > > > > > > up to 512 records. For example on Android system it can look like:
> > > > > > > > >
> > > > > > > > > <snip>
> > > > > > > > >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> > > > > > > > >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > > > > >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > > > > >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> > > > > > > > >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> > > > > > > > >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> > > > > > > > >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > > > > >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > > > > >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> > > > > > > > >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> > > > > > > > >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> > > > > > > > >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> > > > > > > > >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> > > > > > > > >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> > > > > > > > >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > > > > > > > > <snip>
> > > > > > > > >
> > > > > > > > > where a page only carries few records to reclaim a memory. In order to
> > > > > > > > > improve batching and make utilization more efficient the patch introduces
> > > > > > > > > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > > > > > > > > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > > > > > > > > case a memory reclaim occurs more often whereas in mostly idle cases the
> > > > > > > > > interval is set to its maximum timeout that improves the utilization of
> > > > > > > > > page slots.
> > > > > > > > >
> > > > > > > > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > > > > > > >
> > > > > > > > That does look like a problem well worth solving!
> > > > > > > >
> > > > > > > Agree, better ideas make better final solution :)
> > > > > > >
> > > > > > > >
> > > > > > > > But I am missing one thing. If we are having a callback flood, why do we
> > > > > > > > need a shorter timeout?
> > > > > > > >
> > > > > > > To offload faster, because otherwise we run into classical issue, it is a low
> > > > > > > memory condition state resulting in OOM.
> > > > > >
> > > > > > But doesn't each callback queued during the flood give us an opportunity
> > > > > > to react to the flood?  That will be way more fine-grained than any
> > > > > > reasonable timer, right?  Or am I missing something?
> > > > > >
> > > > > We can set the timer to zero or to current "jiffies" to initiate the
> > > > > offloading if the
> > > > > page is full. In that sense probably it make sense to propagate those two attr.
> > > > > to user space, so the user can configure min/max drain interval.
> > > > > 
> > > > > Or we can only deal with fixed interval exposed via sysfs to control it by user.
> > > > > In that case we can get rid of MIN one and just trigger a timer if the page is
> > > > > full. I think this approach is better.
> > > > 
> > > > Yes I also think triggering timer with zero-timeout is better. Can you (Vlad)
> > > > accomplish that by just calling the timer callback inline, instead of queuing
> > > > a timer? I imagine you would just do queue_work() instead of
> > > > queue_delayed_work() in this scenario.
> > > > 
> > > > > > I do agree that the action would often need to be indirect to avoid the
> > > > > > memory-allocation-state hassles, but we already can do that, either via
> > > > > > an extremely short-term hrtimer or something like irq-work.
> > > > > >
> > > > > > > > Wouldn't a check on the number of blocks queued be simpler, more direct,
> > > > > > > > and provide faster response to the start of a callback flood?
> > > > > > > >
> > > > > > > I rely on krcp->count because not always we can store the pointer in the page
> > > > > > > slots. We can not allocate a page in the caller context thus we use page-cache
> > > > > > > worker that fills the cache in normal context. While it populates the cache,
> > > > > > > pointers temporary are queued to the linked-list.
> > > > > > >
> > > > > > > Any thoughts?
> > > > > >
> > > > > > There are a great many ways to approach this.  One of them is to maintain
> > > > > > a per-CPU free-running counter of kvfree_rcu() calls, and to reset this
> > > > > > counter each jiffy.
> > > > > >
> > > > > > Or am I missing a trick here?
> > > > > >
> > > > > Do you mean to have a per-cpu timer that checks the per-cpu-freed counter
> > > > > and schedule the work when if it is needed? Or i have missed your point?
> > > > 
> > > > I think he (Paul) is describing the way 'flood detection' can work similar to how the
> > > > bypass list code is implemented. There he maintains a count which only if
> > > > exceeds a limit, will queue on to the bypass list.
> > > > 
> > > OK, i see that. We also do similar thing. We say it is a flood - when a
> > > page becomes full, so it is kind of threshold that we pass.
> > > 
> > > > This code:
> > > > 
> > > >         // If we have advanced to a new jiffy, reset counts to allow
> > > >         // moving back from ->nocb_bypass to ->cblist.
> > > >         if (j == rdp->nocb_nobypass_last) {
> > > >                 c = rdp->nocb_nobypass_count + 1;
> > > >         } else {
> > > >                 WRITE_ONCE(rdp->nocb_nobypass_last, j);
> > > >                 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
> > > >                 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
> > > >                                  nocb_nobypass_lim_per_jiffy))
> > > >                         c = 0;
> > > >                 else if (c > nocb_nobypass_lim_per_jiffy)
> > > >                         c = nocb_nobypass_lim_per_jiffy;
> > > >         }
> > > >         WRITE_ONCE(rdp->nocb_nobypass_count, c);
> > > > 
> > > > 
> > > > Your (Vlad's) approach OTOH is also fine to me, you check if page is full and
> > > > make that as a 'flood is happening' detector.
> > > > 
> > > OK, thank you Joel. I also think, that way we improve batching and utilization
> > > of the page what is actually an intention of the patch in question.
> > > 
> > Paul, will you pick this patch?
> 
> I did pick up the first one:
> 
> 16224f4cdf03 ("rcu/kvfree: Remove useless monitor_todo flag")
> 
> On the second one, if you use page-fill as your flood detector, can't
> you simplify things by just using the one longer timeout, as discussed
> in this thread?
> 
> Or did I miss a turn somewhere?
> 
No, you did not :) Agreed i will simplify it with a one interval that
corresponding to 1 HZ. The flood is detected when a page is full. When
it occurs the work will be rearmed to be run asap. Will resend it.

One thing that we have discussed, in case of flood we can minimize a
memory footprint by releasing the page directly from the our monitor
work if the grace period is passed for the all page slots. In that case
we do not need to move the page forward toward the RCU-core for later
reclaim.

But that is another patches. I will examine it when i return back from 
Norway.

--
Uladzislau Rezki
Paul E. McKenney June 15, 2022, 2:02 p.m. UTC | #13
On Wed, Jun 15, 2022 at 09:32:32AM +0200, Uladzislau Rezki wrote:
> > On Tue, Jun 14, 2022 at 08:42:00AM +0200, Uladzislau Rezki wrote:
> > > > Hello, Joel, Paul.
> > > > 
> > > > > Hi Vlad, Paul,
> > > > > 
> > > > > On Thu, Jun 09, 2022 at 03:10:57PM +0200, Uladzislau Rezki wrote:
> > > > > > On Tue, Jun 7, 2022 at 5:47 AM Paul E. McKenney <paulmck@kernel.org> wrote:
> > > > > > >
> > > > > > > On Sun, Jun 05, 2022 at 11:10:31AM +0200, Uladzislau Rezki wrote:
> > > > > > > > > On Thu, Jun 02, 2022 at 10:06:44AM +0200, Uladzislau Rezki (Sony) wrote:
> > > > > > > > > > Currently the monitor work is scheduled with a fixed interval that
> > > > > > > > > > is HZ/20 or each 50 milliseconds. The drawback of such approach is
> > > > > > > > > > a low utilization of page slot in some scenarios. The page can store
> > > > > > > > > > up to 512 records. For example on Android system it can look like:
> > > > > > > > > >
> > > > > > > > > > <snip>
> > > > > > > > > >   kworker/3:0-13872   [003] .... 11286.007048: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=1
> > > > > > > > > >   kworker/3:0-13872   [003] .... 11286.015638: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > > > > > >   kworker/1:2-20434   [001] .... 11286.051230: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > > > > > >   kworker/1:2-20434   [001] .... 11286.059322: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=2
> > > > > > > > > >   kworker/0:1-20052   [000] .... 11286.095295: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=2
> > > > > > > > > >   kworker/0:1-20052   [000] .... 11286.103418: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=1
> > > > > > > > > >   kworker/2:3-14372   [002] .... 11286.135155: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=2
> > > > > > > > > >   kworker/2:3-14372   [002] .... 11286.135198: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000044872ffd nr_records=1
> > > > > > > > > >   kworker/1:2-20434   [001] .... 11286.155377: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=5
> > > > > > > > > >   kworker/2:3-14372   [002] .... 11286.167181: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000026522604 nr_records=5
> > > > > > > > > >   kworker/1:2-20434   [001] .... 11286.179202: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x000000008ef95e14 nr_records=1
> > > > > > > > > >   kworker/2:3-14372   [002] .... 11286.187398: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000c597d297 nr_records=6
> > > > > > > > > >   kworker/3:0-13872   [003] .... 11286.187445: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000050bf92e2 nr_records=3
> > > > > > > > > >   kworker/1:2-20434   [001] .... 11286.198975: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x00000000cbcf05db nr_records=4
> > > > > > > > > >   kworker/1:2-20434   [001] .... 11286.207203: rcu_invoke_kfree_bulk_callback: rcu_preempt bulk=0x0000000095ed6fca nr_records=4
> > > > > > > > > > <snip>
> > > > > > > > > >
> > > > > > > > > > where a page only carries few records to reclaim a memory. In order to
> > > > > > > > > > improve batching and make utilization more efficient the patch introduces
> > > > > > > > > > a drain interval that can be set either to KFREE_DRAIN_JIFFIES_MAX or
> > > > > > > > > > KFREE_DRAIN_JIFFIES_MIN. It is adjusted if a flood is detected, in this
> > > > > > > > > > case a memory reclaim occurs more often whereas in mostly idle cases the
> > > > > > > > > > interval is set to its maximum timeout that improves the utilization of
> > > > > > > > > > page slots.
> > > > > > > > > >
> > > > > > > > > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > > > > > > > >
> > > > > > > > > That does look like a problem well worth solving!
> > > > > > > > >
> > > > > > > > Agree, better ideas make better final solution :)
> > > > > > > >
> > > > > > > > >
> > > > > > > > > But I am missing one thing. If we are having a callback flood, why do we
> > > > > > > > > need a shorter timeout?
> > > > > > > > >
> > > > > > > > To offload faster, because otherwise we run into classical issue, it is a low
> > > > > > > > memory condition state resulting in OOM.
> > > > > > >
> > > > > > > But doesn't each callback queued during the flood give us an opportunity
> > > > > > > to react to the flood?  That will be way more fine-grained than any
> > > > > > > reasonable timer, right?  Or am I missing something?
> > > > > > >
> > > > > > We can set the timer to zero or to current "jiffies" to initiate the
> > > > > > offloading if the
> > > > > > page is full. In that sense probably it make sense to propagate those two attr.
> > > > > > to user space, so the user can configure min/max drain interval.
> > > > > > 
> > > > > > Or we can only deal with fixed interval exposed via sysfs to control it by user.
> > > > > > In that case we can get rid of MIN one and just trigger a timer if the page is
> > > > > > full. I think this approach is better.
> > > > > 
> > > > > Yes I also think triggering timer with zero-timeout is better. Can you (Vlad)
> > > > > accomplish that by just calling the timer callback inline, instead of queuing
> > > > > a timer? I imagine you would just do queue_work() instead of
> > > > > queue_delayed_work() in this scenario.
> > > > > 
> > > > > > > I do agree that the action would often need to be indirect to avoid the
> > > > > > > memory-allocation-state hassles, but we already can do that, either via
> > > > > > > an extremely short-term hrtimer or something like irq-work.
> > > > > > >
> > > > > > > > > Wouldn't a check on the number of blocks queued be simpler, more direct,
> > > > > > > > > and provide faster response to the start of a callback flood?
> > > > > > > > >
> > > > > > > > I rely on krcp->count because not always we can store the pointer in the page
> > > > > > > > slots. We can not allocate a page in the caller context thus we use page-cache
> > > > > > > > worker that fills the cache in normal context. While it populates the cache,
> > > > > > > > pointers temporary are queued to the linked-list.
> > > > > > > >
> > > > > > > > Any thoughts?
> > > > > > >
> > > > > > > There are a great many ways to approach this.  One of them is to maintain
> > > > > > > a per-CPU free-running counter of kvfree_rcu() calls, and to reset this
> > > > > > > counter each jiffy.
> > > > > > >
> > > > > > > Or am I missing a trick here?
> > > > > > >
> > > > > > Do you mean to have a per-cpu timer that checks the per-cpu-freed counter
> > > > > > and schedule the work when if it is needed? Or i have missed your point?
> > > > > 
> > > > > I think he (Paul) is describing the way 'flood detection' can work similar to how the
> > > > > bypass list code is implemented. There he maintains a count which only if
> > > > > exceeds a limit, will queue on to the bypass list.
> > > > > 
> > > > OK, i see that. We also do similar thing. We say it is a flood - when a
> > > > page becomes full, so it is kind of threshold that we pass.
> > > > 
> > > > > This code:
> > > > > 
> > > > >         // If we have advanced to a new jiffy, reset counts to allow
> > > > >         // moving back from ->nocb_bypass to ->cblist.
> > > > >         if (j == rdp->nocb_nobypass_last) {
> > > > >                 c = rdp->nocb_nobypass_count + 1;
> > > > >         } else {
> > > > >                 WRITE_ONCE(rdp->nocb_nobypass_last, j);
> > > > >                 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
> > > > >                 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
> > > > >                                  nocb_nobypass_lim_per_jiffy))
> > > > >                         c = 0;
> > > > >                 else if (c > nocb_nobypass_lim_per_jiffy)
> > > > >                         c = nocb_nobypass_lim_per_jiffy;
> > > > >         }
> > > > >         WRITE_ONCE(rdp->nocb_nobypass_count, c);
> > > > > 
> > > > > 
> > > > > Your (Vlad's) approach OTOH is also fine to me, you check if page is full and
> > > > > make that as a 'flood is happening' detector.
> > > > > 
> > > > OK, thank you Joel. I also think, that way we improve batching and utilization
> > > > of the page what is actually an intention of the patch in question.
> > > > 
> > > Paul, will you pick this patch?
> > 
> > I did pick up the first one:
> > 
> > 16224f4cdf03 ("rcu/kvfree: Remove useless monitor_todo flag")
> > 
> > On the second one, if you use page-fill as your flood detector, can't
> > you simplify things by just using the one longer timeout, as discussed
> > in this thread?
> > 
> > Or did I miss a turn somewhere?
> > 
> No, you did not :) Agreed i will simplify it with a one interval that
> corresponding to 1 HZ. The flood is detected when a page is full. When
> it occurs the work will be rearmed to be run asap. Will resend it.

Very good, and looking forward to it!

> One thing that we have discussed, in case of flood we can minimize a
> memory footprint by releasing the page directly from the our monitor
> work if the grace period is passed for the all page slots. In that case
> we do not need to move the page forward toward the RCU-core for later
> reclaim.
> 
> But that is another patches. I will examine it when i return back from 
> Norway.

Agreed, those should be separate patches.

							Thanx, Paul
diff mbox series

Patch

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index fd16c0b46d9e..c02a64995b85 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3249,7 +3249,8 @@  EXPORT_SYMBOL_GPL(call_rcu);
 
 
 /* Maximum number of jiffies to wait before draining a batch. */
-#define KFREE_DRAIN_JIFFIES (HZ / 50)
+#define KFREE_DRAIN_JIFFIES_MAX (HZ)
+#define KFREE_DRAIN_JIFFIES_MIN (HZ / 50)
 #define KFREE_N_BATCHES 2
 #define FREE_N_CHANNELS 2
 
@@ -3510,6 +3511,26 @@  need_offload_krc(struct kfree_rcu_cpu *krcp)
 	return !!krcp->head;
 }
 
+static void
+schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
+{
+	long delay, delay_left;
+
+	delay = READ_ONCE(krcp->count) >= KVFREE_BULK_MAX_ENTR ?
+		KFREE_DRAIN_JIFFIES_MIN:KFREE_DRAIN_JIFFIES_MAX;
+
+	if (delayed_work_pending(&krcp->monitor_work)) {
+		delay_left = krcp->monitor_work.timer.expires - jiffies;
+
+		if (delay < delay_left)
+			mod_delayed_work(system_wq, &krcp->monitor_work, delay);
+
+		return;
+	}
+
+	queue_delayed_work(system_wq, &krcp->monitor_work, delay);
+}
+
 /*
  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
  */
@@ -3567,7 +3588,7 @@  static void kfree_rcu_monitor(struct work_struct *work)
 	// work to repeat an attempt. Because previous batches are
 	// still in progress.
 	if (need_offload_krc(krcp))
-		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
+		schedule_delayed_monitor_work(krcp);
 
 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
 }
@@ -3755,7 +3776,7 @@  void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 
 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
-		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
+		schedule_delayed_monitor_work(krcp);
 
 unlock_return:
 	krc_this_cpu_unlock(krcp, flags);
@@ -3831,7 +3852,7 @@  void __init kfree_rcu_scheduler_running(void)
 
 		raw_spin_lock_irqsave(&krcp->lock, flags);
 		if (need_offload_krc(krcp))
-			schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
+			schedule_delayed_monitor_work(krcp);
 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
 	}
 }