diff mbox series

[v2,1/3] mm/page_alloc: Don't pass pfn to free_unref_page_commit()

Message ID 20211103170512.2745765-2-nsaenzju@redhat.com (mailing list archive)
State New
Headers show
Series mm/page_alloc: Remote per-cpu page list drain support | expand

Commit Message

Nicolas Saenz Julienne Nov. 3, 2021, 5:05 p.m. UTC
free_unref_page_commit() doesn't make use of its pfn argument, so get
rid of it.

Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
---
 mm/page_alloc.c | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)

Comments

Vlastimil Babka Nov. 23, 2021, 2:41 p.m. UTC | #1
On 11/3/21 18:05, Nicolas Saenz Julienne wrote:
> free_unref_page_commit() doesn't make use of its pfn argument, so get
> rid of it.

Yeah, looks like since df1acc856923 ("mm/page_alloc: avoid conflating IRQs
disabled with zone->lock")

> Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  mm/page_alloc.c | 17 ++++++-----------
>  1 file changed, 6 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index c5952749ad40..9ef03dfb8f95 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3355,8 +3355,8 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
>  	return min(READ_ONCE(pcp->batch) << 2, high);
>  }
>  
> -static void free_unref_page_commit(struct page *page, unsigned long pfn,
> -				   int migratetype, unsigned int order)
> +static void free_unref_page_commit(struct page *page, int migratetype,
> +				   unsigned int order)
>  {
>  	struct zone *zone = page_zone(page);
>  	struct per_cpu_pages *pcp;
> @@ -3405,7 +3405,7 @@ void free_unref_page(struct page *page, unsigned int order)
>  	}
>  
>  	local_lock_irqsave(&pagesets.lock, flags);
> -	free_unref_page_commit(page, pfn, migratetype, order);
> +	free_unref_page_commit(page, migratetype, order);
>  	local_unlock_irqrestore(&pagesets.lock, flags);
>  }
>  
> @@ -3415,13 +3415,13 @@ void free_unref_page(struct page *page, unsigned int order)
>  void free_unref_page_list(struct list_head *list)
>  {
>  	struct page *page, *next;
> -	unsigned long flags, pfn;
> +	unsigned long flags;
>  	int batch_count = 0;
>  	int migratetype;
>  
>  	/* Prepare pages for freeing */
>  	list_for_each_entry_safe(page, next, list, lru) {
> -		pfn = page_to_pfn(page);
> +		unsigned long pfn = page_to_pfn(page);
>  		if (!free_unref_page_prepare(page, pfn, 0)) {
>  			list_del(&page->lru);
>  			continue;
> @@ -3437,15 +3437,10 @@ void free_unref_page_list(struct list_head *list)
>  			free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
>  			continue;
>  		}
> -
> -		set_page_private(page, pfn);
>  	}
>  
>  	local_lock_irqsave(&pagesets.lock, flags);
>  	list_for_each_entry_safe(page, next, list, lru) {
> -		pfn = page_private(page);
> -		set_page_private(page, 0);
> -
>  		/*
>  		 * Non-isolated types over MIGRATE_PCPTYPES get added
>  		 * to the MIGRATE_MOVABLE pcp list.
> @@ -3455,7 +3450,7 @@ void free_unref_page_list(struct list_head *list)
>  			migratetype = MIGRATE_MOVABLE;
>  
>  		trace_mm_page_free_batched(page);
> -		free_unref_page_commit(page, pfn, migratetype, 0);
> +		free_unref_page_commit(page, migratetype, 0);
>  
>  		/*
>  		 * Guard against excessive IRQ disabled times when we get
>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c5952749ad40..9ef03dfb8f95 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3355,8 +3355,8 @@  static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
 	return min(READ_ONCE(pcp->batch) << 2, high);
 }
 
-static void free_unref_page_commit(struct page *page, unsigned long pfn,
-				   int migratetype, unsigned int order)
+static void free_unref_page_commit(struct page *page, int migratetype,
+				   unsigned int order)
 {
 	struct zone *zone = page_zone(page);
 	struct per_cpu_pages *pcp;
@@ -3405,7 +3405,7 @@  void free_unref_page(struct page *page, unsigned int order)
 	}
 
 	local_lock_irqsave(&pagesets.lock, flags);
-	free_unref_page_commit(page, pfn, migratetype, order);
+	free_unref_page_commit(page, migratetype, order);
 	local_unlock_irqrestore(&pagesets.lock, flags);
 }
 
@@ -3415,13 +3415,13 @@  void free_unref_page(struct page *page, unsigned int order)
 void free_unref_page_list(struct list_head *list)
 {
 	struct page *page, *next;
-	unsigned long flags, pfn;
+	unsigned long flags;
 	int batch_count = 0;
 	int migratetype;
 
 	/* Prepare pages for freeing */
 	list_for_each_entry_safe(page, next, list, lru) {
-		pfn = page_to_pfn(page);
+		unsigned long pfn = page_to_pfn(page);
 		if (!free_unref_page_prepare(page, pfn, 0)) {
 			list_del(&page->lru);
 			continue;
@@ -3437,15 +3437,10 @@  void free_unref_page_list(struct list_head *list)
 			free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
 			continue;
 		}
-
-		set_page_private(page, pfn);
 	}
 
 	local_lock_irqsave(&pagesets.lock, flags);
 	list_for_each_entry_safe(page, next, list, lru) {
-		pfn = page_private(page);
-		set_page_private(page, 0);
-
 		/*
 		 * Non-isolated types over MIGRATE_PCPTYPES get added
 		 * to the MIGRATE_MOVABLE pcp list.
@@ -3455,7 +3450,7 @@  void free_unref_page_list(struct list_head *list)
 			migratetype = MIGRATE_MOVABLE;
 
 		trace_mm_page_free_batched(page);
-		free_unref_page_commit(page, pfn, migratetype, 0);
+		free_unref_page_commit(page, migratetype, 0);
 
 		/*
 		 * Guard against excessive IRQ disabled times when we get