diff mbox series

[05/11] mm/page_alloc: Batch the accounting updates in the bulk allocator

Message ID 20210414133931.4555-6-mgorman@techsingularity.net (mailing list archive)
State New
Headers show
Series Use local_lock for pcp protection and reduce stat overhead | expand

Commit Message

Mel Gorman April 14, 2021, 1:39 p.m. UTC
Now that the zone_statistics are simple counters that do not require
special protection, the bulk allocator accounting updates can be batch
updated without adding too much complexity with protected RMW updates or
using xchg.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
---
 include/linux/vmstat.h |  8 ++++++++
 mm/page_alloc.c        | 30 +++++++++++++-----------------
 2 files changed, 21 insertions(+), 17 deletions(-)

Comments

Vlastimil Babka April 14, 2021, 4:31 p.m. UTC | #1
On 4/14/21 3:39 PM, Mel Gorman wrote:
> Now that the zone_statistics are simple counters that do not require
> special protection, the bulk allocator accounting updates can be batch
> updated without adding too much complexity with protected RMW updates or
> using xchg.
> 
> Signed-off-by: Mel Gorman <mgorman@techsingularity.net>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  include/linux/vmstat.h |  8 ++++++++
>  mm/page_alloc.c        | 30 +++++++++++++-----------------
>  2 files changed, 21 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
> index dde4dec4e7dd..8473b8fa9756 100644
> --- a/include/linux/vmstat.h
> +++ b/include/linux/vmstat.h
> @@ -246,6 +246,14 @@ __count_numa_event(struct zone *zone, enum numa_stat_item item)
>  	raw_cpu_inc(pzstats->vm_numa_event[item]);
>  }
>  
> +static inline void
> +__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
> +{
> +	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
> +
> +	raw_cpu_add(pzstats->vm_numa_event[item], delta);
> +}
> +
>  extern void __count_numa_event(struct zone *zone, enum numa_stat_item item);
>  extern unsigned long sum_zone_node_page_state(int node,
>  					      enum zone_stat_item item);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 9d0f047647e3..cff0f1c98b28 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3411,7 +3411,8 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
>   *
>   * Must be called with interrupts disabled.
>   */
> -static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
> +static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
> +				   long nr_account)
>  {
>  #ifdef CONFIG_NUMA
>  	enum numa_stat_item local_stat = NUMA_LOCAL;
> @@ -3424,12 +3425,12 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
>  		local_stat = NUMA_OTHER;
>  
>  	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
> -		__count_numa_event(z, NUMA_HIT);
> +		__count_numa_events(z, NUMA_HIT, nr_account);
>  	else {
> -		__count_numa_event(z, NUMA_MISS);
> -		__count_numa_event(preferred_zone, NUMA_FOREIGN);
> +		__count_numa_events(z, NUMA_MISS, nr_account);
> +		__count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
>  	}
> -	__count_numa_event(z, local_stat);
> +	__count_numa_events(z, local_stat, nr_account);
>  #endif
>  }
>  
> @@ -3475,7 +3476,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
>  	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
>  	if (page) {
>  		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
> -		zone_statistics(preferred_zone, zone);
> +		zone_statistics(preferred_zone, zone, 1);
>  	}
>  	local_unlock_irqrestore(&pagesets.lock, flags);
>  	return page;
> @@ -3536,7 +3537,7 @@ struct page *rmqueue(struct zone *preferred_zone,
>  				  get_pcppage_migratetype(page));
>  
>  	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
> -	zone_statistics(preferred_zone, zone);
> +	zone_statistics(preferred_zone, zone, 1);
>  	local_irq_restore(flags);
>  
>  out:
> @@ -5019,7 +5020,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
>  	struct alloc_context ac;
>  	gfp_t alloc_gfp;
>  	unsigned int alloc_flags = ALLOC_WMARK_LOW;
> -	int nr_populated = 0;
> +	int nr_populated = 0, nr_account = 0;
>  
>  	if (unlikely(nr_pages <= 0))
>  		return 0;
> @@ -5092,15 +5093,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
>  				goto failed_irq;
>  			break;
>  		}
> -
> -		/*
> -		 * Ideally this would be batched but the best way to do
> -		 * that cheaply is to first convert zone_statistics to
> -		 * be inaccurate per-cpu counter like vm_events to avoid
> -		 * a RMW cycle then do the accounting with IRQs enabled.
> -		 */
> -		__count_zid_vm_events(PGALLOC, zone_idx(zone), 1);
> -		zone_statistics(ac.preferred_zoneref->zone, zone);
> +		nr_account++;
>  
>  		prep_new_page(page, 0, gfp, 0);
>  		if (page_list)
> @@ -5110,6 +5103,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
>  		nr_populated++;
>  	}
>  
> +	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
> +	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
> +
>  	local_unlock_irqrestore(&pagesets.lock, flags);
>  
>  	return nr_populated;
>
diff mbox series

Patch

diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index dde4dec4e7dd..8473b8fa9756 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -246,6 +246,14 @@  __count_numa_event(struct zone *zone, enum numa_stat_item item)
 	raw_cpu_inc(pzstats->vm_numa_event[item]);
 }
 
+static inline void
+__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
+{
+	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
+
+	raw_cpu_add(pzstats->vm_numa_event[item], delta);
+}
+
 extern void __count_numa_event(struct zone *zone, enum numa_stat_item item);
 extern unsigned long sum_zone_node_page_state(int node,
 					      enum zone_stat_item item);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9d0f047647e3..cff0f1c98b28 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3411,7 +3411,8 @@  void __putback_isolated_page(struct page *page, unsigned int order, int mt)
  *
  * Must be called with interrupts disabled.
  */
-static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
+static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
+				   long nr_account)
 {
 #ifdef CONFIG_NUMA
 	enum numa_stat_item local_stat = NUMA_LOCAL;
@@ -3424,12 +3425,12 @@  static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
 		local_stat = NUMA_OTHER;
 
 	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
-		__count_numa_event(z, NUMA_HIT);
+		__count_numa_events(z, NUMA_HIT, nr_account);
 	else {
-		__count_numa_event(z, NUMA_MISS);
-		__count_numa_event(preferred_zone, NUMA_FOREIGN);
+		__count_numa_events(z, NUMA_MISS, nr_account);
+		__count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
 	}
-	__count_numa_event(z, local_stat);
+	__count_numa_events(z, local_stat, nr_account);
 #endif
 }
 
@@ -3475,7 +3476,7 @@  static struct page *rmqueue_pcplist(struct zone *preferred_zone,
 	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
 	if (page) {
 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
-		zone_statistics(preferred_zone, zone);
+		zone_statistics(preferred_zone, zone, 1);
 	}
 	local_unlock_irqrestore(&pagesets.lock, flags);
 	return page;
@@ -3536,7 +3537,7 @@  struct page *rmqueue(struct zone *preferred_zone,
 				  get_pcppage_migratetype(page));
 
 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
-	zone_statistics(preferred_zone, zone);
+	zone_statistics(preferred_zone, zone, 1);
 	local_irq_restore(flags);
 
 out:
@@ -5019,7 +5020,7 @@  unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
 	struct alloc_context ac;
 	gfp_t alloc_gfp;
 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
-	int nr_populated = 0;
+	int nr_populated = 0, nr_account = 0;
 
 	if (unlikely(nr_pages <= 0))
 		return 0;
@@ -5092,15 +5093,7 @@  unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
 				goto failed_irq;
 			break;
 		}
-
-		/*
-		 * Ideally this would be batched but the best way to do
-		 * that cheaply is to first convert zone_statistics to
-		 * be inaccurate per-cpu counter like vm_events to avoid
-		 * a RMW cycle then do the accounting with IRQs enabled.
-		 */
-		__count_zid_vm_events(PGALLOC, zone_idx(zone), 1);
-		zone_statistics(ac.preferred_zoneref->zone, zone);
+		nr_account++;
 
 		prep_new_page(page, 0, gfp, 0);
 		if (page_list)
@@ -5110,6 +5103,9 @@  unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
 		nr_populated++;
 	}
 
+	__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
+	zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+
 	local_unlock_irqrestore(&pagesets.lock, flags);
 
 	return nr_populated;