diff mbox series

[v7,1/3] mm/vmstat: Use per cpu variable to track a vmstat discrepancy

Message ID 20220817191524.140710201@redhat.com (mailing list archive)
State New
Headers show
Series tick/sched: Ensure quiet_vmstat() is called when the idle tick was stopped too | expand

Commit Message

Marcelo Tosatti Aug. 17, 2022, 7:13 p.m. UTC
From: Aaron Tomlin <atomlin@redhat.com>

Add CPU-specific variable namely vmstat_dirty to indicate if
a vmstat imbalance is present for a given CPU. Therefore, at the
appropriate time, we can fold all the remaining differentials.

This speeds up quiet_vmstat in case no per-CPU differentials exist.

Based on 
https://lore.kernel.org/lkml/20220204173554.763888172@fedora.localdomain/

Signed-off-by: Aaron Tomlin <atomlin@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

---
 mm/vmstat.c |   54 ++++++++++++++++++++----------------------------------
 1 file changed, 20 insertions(+), 34 deletions(-)

Comments

Andrew Morton Aug. 24, 2022, 8:20 p.m. UTC | #1
On Wed, 17 Aug 2022 16:13:47 -0300 Marcelo Tosatti <mtosatti@redhat.com> wrote:

> From: Aaron Tomlin <atomlin@redhat.com>
> 
> Add CPU-specific variable namely vmstat_dirty to indicate if
> a vmstat imbalance is present for a given CPU. Therefore, at the
> appropriate time, we can fold all the remaining differentials.
> 
> This speeds up quiet_vmstat in case no per-CPU differentials exist.
> 
> Based on 
> https://lore.kernel.org/lkml/20220204173554.763888172@fedora.localdomain/
> 
> Signed-off-by: Aaron Tomlin <atomlin@redhat.com>
> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> 
> ---
>  mm/vmstat.c |   54 ++++++++++++++++++++----------------------------------
>  1 file changed, 20 insertions(+), 34 deletions(-)
> 
> Index: linux-2.6/mm/vmstat.c
> ===================================================================
> --- linux-2.6.orig/mm/vmstat.c
> +++ linux-2.6/mm/vmstat.c
> @@ -195,6 +195,12 @@ void fold_vm_numa_events(void)
>  #endif
>  
>  #ifdef CONFIG_SMP
> +static DEFINE_PER_CPU_ALIGNED(bool, vmstat_dirty);
> +
> +static inline void mark_vmstat_dirty(void)
> +{
> +	this_cpu_write(vmstat_dirty, true);
> +}

If we're to have a helper for this then how about helpers for clearing
it and reading it?

Also, vmstat_mark_dirty(), vmstat_clear_dirty() and vmstat_dirty()
would be better identifiers.

Then those helper functions become good sites for comments explaining
what's going on.
Aaron Tomlin Aug. 26, 2022, 1:29 p.m. UTC | #2
On Wed 2022-08-24 13:20 -0700, Andrew Morton wrote:
> > --- linux-2.6.orig/mm/vmstat.c
> > +++ linux-2.6/mm/vmstat.c
> > @@ -195,6 +195,12 @@ void fold_vm_numa_events(void)
> >  #endif
> >  
> >  #ifdef CONFIG_SMP
> > +static DEFINE_PER_CPU_ALIGNED(bool, vmstat_dirty);
> > +
> > +static inline void mark_vmstat_dirty(void)
> > +{
> > +	this_cpu_write(vmstat_dirty, true);
> > +}
> 
> If we're to have a helper for this then how about helpers for clearing
> it and reading it?
> 
> Also, vmstat_mark_dirty(), vmstat_clear_dirty() and vmstat_dirty()
> would be better identifiers.
> 
> Then those helper functions become good sites for comments explaining
> what's going on.
> 

Hi Andrew,

Fair enough. I'll work on that.


Kind regards,
diff mbox series

Patch

Index: linux-2.6/mm/vmstat.c
===================================================================
--- linux-2.6.orig/mm/vmstat.c
+++ linux-2.6/mm/vmstat.c
@@ -195,6 +195,12 @@  void fold_vm_numa_events(void)
 #endif
 
 #ifdef CONFIG_SMP
+static DEFINE_PER_CPU_ALIGNED(bool, vmstat_dirty);
+
+static inline void mark_vmstat_dirty(void)
+{
+	this_cpu_write(vmstat_dirty, true);
+}
 
 int calculate_pressure_threshold(struct zone *zone)
 {
@@ -367,6 +373,7 @@  void __mod_zone_page_state(struct zone *
 		x = 0;
 	}
 	__this_cpu_write(*p, x);
+	mark_vmstat_dirty();
 
 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
 		preempt_enable();
@@ -405,6 +412,7 @@  void __mod_node_page_state(struct pglist
 		x = 0;
 	}
 	__this_cpu_write(*p, x);
+	mark_vmstat_dirty();
 
 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
 		preempt_enable();
@@ -603,6 +611,7 @@  static inline void mod_zone_state(struct
 
 	if (z)
 		zone_page_state_add(z, zone, item);
+	mark_vmstat_dirty();
 }
 
 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
@@ -671,6 +680,7 @@  static inline void mod_node_state(struct
 
 	if (z)
 		node_page_state_add(z, pgdat, item);
+	mark_vmstat_dirty();
 }
 
 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
@@ -825,6 +835,14 @@  static int refresh_cpu_vm_stats(bool do_
 	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
 	int changes = 0;
 
+	/*
+	 * Clear vmstat_dirty before clearing the percpu vmstats.
+	 * If interrupts are enabled, it is possible that an interrupt
+	 * or another task modifies a percpu vmstat, which will
+	 * set vmstat_dirty to true.
+	 */
+	this_cpu_write(vmstat_dirty, false);
+
 	for_each_populated_zone(zone) {
 		struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
 #ifdef CONFIG_NUMA
@@ -1949,35 +1967,6 @@  static void vmstat_update(struct work_st
 }
 
 /*
- * Check if the diffs for a certain cpu indicate that
- * an update is needed.
- */
-static bool need_update(int cpu)
-{
-	pg_data_t *last_pgdat = NULL;
-	struct zone *zone;
-
-	for_each_populated_zone(zone) {
-		struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
-		struct per_cpu_nodestat *n;
-
-		/*
-		 * The fast way of checking if there are any vmstat diffs.
-		 */
-		if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
-			return true;
-
-		if (last_pgdat == zone->zone_pgdat)
-			continue;
-		last_pgdat = zone->zone_pgdat;
-		n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
-		if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
-			return true;
-	}
-	return false;
-}
-
-/*
  * Switch off vmstat processing and then fold all the remaining differentials
  * until the diffs stay at zero. The function is used by NOHZ and can only be
  * invoked when tick processing is not active.
@@ -1987,10 +1976,7 @@  void quiet_vmstat(void)
 	if (system_state != SYSTEM_RUNNING)
 		return;
 
-	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
-		return;
-
-	if (!need_update(smp_processor_id()))
+	if (!__this_cpu_read(vmstat_dirty))
 		return;
 
 	/*
@@ -2021,7 +2007,7 @@  static void vmstat_shepherd(struct work_
 	for_each_online_cpu(cpu) {
 		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
 
-		if (!delayed_work_pending(dw) && need_update(cpu))
+		if (!delayed_work_pending(dw) && per_cpu(vmstat_dirty, cpu))
 			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
 
 		cond_resched();