diff mbox series

[RFC,4/5] mm, page_alloc: cache pageset high and batch in struct zone

Message ID 20200907163628.26495-5-vbabka@suse.cz
State New
Headers show
Series disable pcplists during page isolation | expand

Commit Message

Vlastimil Babka Sept. 7, 2020, 4:36 p.m. UTC
All per-cpu pagesets for a zone use the same high and batch values, that are
duplicated there just for performance (locality) reasons. This patch adds the
same variables also to struct zone as 'central' ones.

This will be useful later for making possible to disable pcplists temporarily
by setting high value to 0, while remembering the values for restoring them
later. But we can also immediately benefit from not updating pagesets of all
possible cpus in case the newly recalculated values (after sysctl change or
memory online/offline) are actually unchanged from the previous ones.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 include/linux/mmzone.h |  2 ++
 mm/page_alloc.c        | 18 +++++++++++++-----
 2 files changed, 15 insertions(+), 5 deletions(-)

Comments

Oscar Salvador Sept. 10, 2020, 11:30 a.m. UTC | #1
On Mon, Sep 07, 2020 at 06:36:27PM +0200, Vlastimil Babka wrote:
   */
> -static void setup_pageset(struct per_cpu_pageset *p);
> +static void pageset_init(struct per_cpu_pageset *p);

this belongs to the respective patches

> -static void zone_set_pageset_high_and_batch(struct zone *zone)
> +static void zone_set_pageset_high_and_batch(struct zone *zone, bool force_update)
>  {
>  	unsigned long new_high;
>  	unsigned long new_batch;
> @@ -6256,6 +6256,14 @@ static void zone_set_pageset_high_and_batch(struct zone *zone)
>  		new_batch = max(1UL, 1 * new_batch);
>  	}
>  
> +	if (zone->pageset_high != new_high ||
> +	    zone->pageset_batch != new_batch) {
> +		zone->pageset_high = new_high;
> +		zone->pageset_batch = new_batch;
> +	} else if (!force_update) {
> +		return;
> +	}

I am probably missimg something obvious, so sorry, but why do we need
force_update here?
AFAICS, we only want to call pageset_update() in case zone->pageset_high/batch
and the new computed high/batch differs, so if everything is equal, why do we want
to call it anyways?
Vlastimil Babka Sept. 18, 2020, 12:02 p.m. UTC | #2
On 9/10/20 1:30 PM, Oscar Salvador wrote:
> On Mon, Sep 07, 2020 at 06:36:27PM +0200, Vlastimil Babka wrote:
>    */
>> -static void setup_pageset(struct per_cpu_pageset *p);
>> +static void pageset_init(struct per_cpu_pageset *p);
> 
> this belongs to the respective patches

Right, thanks.

>> -static void zone_set_pageset_high_and_batch(struct zone *zone)
>> +static void zone_set_pageset_high_and_batch(struct zone *zone, bool force_update)
>>  {
>>  	unsigned long new_high;
>>  	unsigned long new_batch;
>> @@ -6256,6 +6256,14 @@ static void zone_set_pageset_high_and_batch(struct zone *zone)
>>  		new_batch = max(1UL, 1 * new_batch);
>>  	}
>>  
>> +	if (zone->pageset_high != new_high ||
>> +	    zone->pageset_batch != new_batch) {
>> +		zone->pageset_high = new_high;
>> +		zone->pageset_batch = new_batch;
>> +	} else if (!force_update) {
>> +		return;
>> +	}
> 
> I am probably missimg something obvious, so sorry, but why do we need
> force_update here?
> AFAICS, we only want to call pageset_update() in case zone->pageset_high/batch
> and the new computed high/batch differs, so if everything is equal, why do we want
> to call it anyways?

My reasoning is that initially we don't have guarantee that
zone->pageset_high/batch matches the respective pcp->high/batch. So we could
detect no change in the zone values and return, but leave the pcp value
incoherent. But now I think it could be achieved also in a simpler way, so I'll try.
diff mbox series

Patch

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8379432f4f2f..15582ca368b9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -431,6 +431,8 @@  struct zone {
 #endif
 	struct pglist_data	*zone_pgdat;
 	struct per_cpu_pageset __percpu *pageset;
+	int pageset_high;
+	int pageset_batch;
 
 #ifndef CONFIG_SPARSEMEM
 	/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a0cab2c6055e..004350a2b6ca 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5834,7 +5834,7 @@  static void build_zonelists(pg_data_t *pgdat)
  * not check if the processor is online before following the pageset pointer.
  * Other parts of the kernel may not check if the zone is available.
  */
-static void setup_pageset(struct per_cpu_pageset *p);
+static void pageset_init(struct per_cpu_pageset *p);
 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
 
@@ -6237,7 +6237,7 @@  static void pageset_init(struct per_cpu_pageset *p)
 	pcp->batch  = 1;
 }
 
-static void zone_set_pageset_high_and_batch(struct zone *zone)
+static void zone_set_pageset_high_and_batch(struct zone *zone, bool force_update)
 {
 	unsigned long new_high;
 	unsigned long new_batch;
@@ -6256,6 +6256,14 @@  static void zone_set_pageset_high_and_batch(struct zone *zone)
 		new_batch = max(1UL, 1 * new_batch);
 	}
 
+	if (zone->pageset_high != new_high ||
+	    zone->pageset_batch != new_batch) {
+		zone->pageset_high = new_high;
+		zone->pageset_batch = new_batch;
+	} else if (!force_update) {
+		return;
+	}
+
 	for_each_possible_cpu(cpu) {
 		p = per_cpu_ptr(zone->pageset, cpu);
 		pageset_update(&p->pcp, new_high, new_batch);
@@ -6273,7 +6281,7 @@  void __meminit setup_zone_pageset(struct zone *zone)
 		pageset_init(p);
 	}
 
-	zone_set_pageset_high_and_batch(zone);
+	zone_set_pageset_high_and_batch(zone, true);
 }
 
 /*
@@ -8038,7 +8046,7 @@  int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
 		goto out;
 
 	for_each_populated_zone(zone)
-		zone_set_pageset_high_and_batch(zone);
+		zone_set_pageset_high_and_batch(zone, false);
 out:
 	mutex_unlock(&pcp_batch_high_lock);
 	return ret;
@@ -8654,7 +8662,7 @@  EXPORT_SYMBOL(free_contig_range);
 void __meminit zone_pcp_update(struct zone *zone)
 {
 	mutex_lock(&pcp_batch_high_lock);
-	zone_set_pageset_high_and_batch(zone);
+	zone_set_pageset_high_and_batch(zone, false);
 	mutex_unlock(&pcp_batch_high_lock);
 }