diff mbox series

[-next] mm, page_alloc: avoid page_to_pfn() in move_freepages()

Message ID 20210323131215.934472-1-liushixin2@huawei.com (mailing list archive)
State New, archived
Headers show
Series [-next] mm, page_alloc: avoid page_to_pfn() in move_freepages() | expand

Commit Message

Liu Shixin March 23, 2021, 1:12 p.m. UTC
From: Kefeng Wang <wangkefeng.wang@huawei.com>

The start_pfn and end_pfn are already available in move_freepages_block(),
there is no need to go back and forth between page and pfn in move_freepages
and move_freepages_block, and pfn_valid_within() should validate pfn first
before touching the page.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Liu Shixin <liushixin2@huawei.com>
---
 mm/page_alloc.c | 28 +++++++++++++---------------
 1 file changed, 13 insertions(+), 15 deletions(-)

Comments

Matthew Wilcox March 23, 2021, 12:54 p.m. UTC | #1
On Tue, Mar 23, 2021 at 09:12:15PM +0800, Liu Shixin wrote:
> From: Kefeng Wang <wangkefeng.wang@huawei.com>
> 
> The start_pfn and end_pfn are already available in move_freepages_block(),
> there is no need to go back and forth between page and pfn in move_freepages
> and move_freepages_block, and pfn_valid_within() should validate pfn first
> before touching the page.

This looks good to me:

Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>

>  static int move_freepages(struct zone *zone,
> -			  struct page *start_page, struct page *end_page,
> +			  unsigned long start_pfn, unsigned long end_pfn,
>  			  int migratetype, int *num_movable)
>  {
>  	struct page *page;
> +	unsigned long pfn;
>  	unsigned int order;
>  	int pages_moved = 0;
>  
> -	for (page = start_page; page <= end_page;) {
> -		if (!pfn_valid_within(page_to_pfn(page))) {
> -			page++;
> +	for (pfn = start_pfn; pfn <= end_pfn;) {
> +		if (!pfn_valid_within(pfn)) {
> +			pfn++;
>  			continue;
>  		}
>  
> +		page = pfn_to_page(pfn);

I wonder if this wouldn't be even better if we did:

	struct page *start_page = pfn_to_page(start_pfn);

	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
		struct page *page = start_page + pfn - start_pfn;

		if (!pfn_valid_within(pfn))
			continue;

> -
> -			page++;
> +			pfn++;
>  			continue;

... then we can drop the increment of pfn here

>  		}
>  
> @@ -2458,7 +2459,7 @@ static int move_freepages(struct zone *zone,
>  
>  		order = buddy_order(page);
>  		move_to_free_list(page, zone, order, migratetype);
> -		page += 1 << order;
> +		pfn += 1 << order;

... and change this to pfn += (1 << order) - 1;

Do you have any numbers to quantify the benefit of this change?
Liu Shixin March 27, 2021, 3:34 a.m. UTC | #2
Sorry to reply to you after a so long time and thanks for your advice. It does seem that your proposed change will make the code cleaner and more efficient.

    I repeated move_freepages_block() 2000000 times on the VM and counted jiffies. The average value before and after the change was both about 12,000. I think it's probably because I'm using the Sparse Memory Model, so pfn_to_page() is not time-consuming.


On 2021/3/23 20:54, Matthew Wilcox wrote:
> On Tue, Mar 23, 2021 at 09:12:15PM +0800, Liu Shixin wrote:
>> From: Kefeng Wang <wangkefeng.wang@huawei.com>
>>
>> The start_pfn and end_pfn are already available in move_freepages_block(),
>> there is no need to go back and forth between page and pfn in move_freepages
>> and move_freepages_block, and pfn_valid_within() should validate pfn first
>> before touching the page.
> This looks good to me:
>
> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>
>>  static int move_freepages(struct zone *zone,
>> -			  struct page *start_page, struct page *end_page,
>> +			  unsigned long start_pfn, unsigned long end_pfn,
>>  			  int migratetype, int *num_movable)
>>  {
>>  	struct page *page;
>> +	unsigned long pfn;
>>  	unsigned int order;
>>  	int pages_moved = 0;
>>  
>> -	for (page = start_page; page <= end_page;) {
>> -		if (!pfn_valid_within(page_to_pfn(page))) {
>> -			page++;
>> +	for (pfn = start_pfn; pfn <= end_pfn;) {
>> +		if (!pfn_valid_within(pfn)) {
>> +			pfn++;
>>  			continue;
>>  		}
>>  
>> +		page = pfn_to_page(pfn);
> I wonder if this wouldn't be even better if we did:
>
> 	struct page *start_page = pfn_to_page(start_pfn);
>
> 	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
> 		struct page *page = start_page + pfn - start_pfn;
>
> 		if (!pfn_valid_within(pfn))
> 			continue;
>
>> -
>> -			page++;
>> +			pfn++;
>>  			continue;
> ... then we can drop the increment of pfn here
>
>>  		}
>>  
>> @@ -2458,7 +2459,7 @@ static int move_freepages(struct zone *zone,
>>  
>>  		order = buddy_order(page);
>>  		move_to_free_list(page, zone, order, migratetype);
>> -		page += 1 << order;
>> +		pfn += 1 << order;
> ... and change this to pfn += (1 << order) - 1;
>
> Do you have any numbers to quantify the benefit of this change?
> .
>
Vlastimil Babka March 29, 2021, 3:31 p.m. UTC | #3
On 3/23/21 2:12 PM, Liu Shixin wrote:
> From: Kefeng Wang <wangkefeng.wang@huawei.com>
> 
> The start_pfn and end_pfn are already available in move_freepages_block(),
> there is no need to go back and forth between page and pfn in move_freepages
> and move_freepages_block, and pfn_valid_within() should validate pfn first
> before touching the page.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> Signed-off-by: Liu Shixin <liushixin2@huawei.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

Agreed with Matthew's suggestion, also:

> @@ -2468,25 +2469,22 @@ static int move_freepages(struct zone *zone,
>  int move_freepages_block(struct zone *zone, struct page *page,
>  				int migratetype, int *num_movable)
>  {
> -	unsigned long start_pfn, end_pfn;
> -	struct page *start_page, *end_page;
> +	unsigned long start_pfn, end_pfn, pfn;
>  
>  	if (num_movable)
>  		*num_movable = 0;
>  
> -	start_pfn = page_to_pfn(page);
> -	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
> -	start_page = pfn_to_page(start_pfn);
> -	end_page = start_page + pageblock_nr_pages - 1;
> +	pfn = page_to_pfn(page);
> +	start_pfn = pfn & ~(pageblock_nr_pages - 1);

Since you touch this already, consider pageblock_start_pfn()

>  	end_pfn = start_pfn + pageblock_nr_pages - 1;

I'd also drop the "- 1" here and instead adjust the for loop's ending condition
from "pfn <= end_pfn" to "pfn < end_pfn" as that's more common way of doing it.

Thanks.

>  
>  	/* Do not cross zone boundaries */
>  	if (!zone_spans_pfn(zone, start_pfn))
> -		start_page = page;
> +		start_pfn = pfn;
>  	if (!zone_spans_pfn(zone, end_pfn))
>  		return 0;
>  
> -	return move_freepages(zone, start_page, end_page, migratetype,
> +	return move_freepages(zone, start_pfn, end_pfn, migratetype,
>  								num_movable);
>  }
>  
>
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c53fe4fa10bf..ccfaa8158862 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2425,19 +2425,21 @@  static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
  * boundary. If alignment is required, use move_freepages_block()
  */
 static int move_freepages(struct zone *zone,
-			  struct page *start_page, struct page *end_page,
+			  unsigned long start_pfn, unsigned long end_pfn,
 			  int migratetype, int *num_movable)
 {
 	struct page *page;
+	unsigned long pfn;
 	unsigned int order;
 	int pages_moved = 0;
 
-	for (page = start_page; page <= end_page;) {
-		if (!pfn_valid_within(page_to_pfn(page))) {
-			page++;
+	for (pfn = start_pfn; pfn <= end_pfn;) {
+		if (!pfn_valid_within(pfn)) {
+			pfn++;
 			continue;
 		}
 
+		page = pfn_to_page(pfn);
 		if (!PageBuddy(page)) {
 			/*
 			 * We assume that pages that could be isolated for
@@ -2447,8 +2449,7 @@  static int move_freepages(struct zone *zone,
 			if (num_movable &&
 					(PageLRU(page) || __PageMovable(page)))
 				(*num_movable)++;
-
-			page++;
+			pfn++;
 			continue;
 		}
 
@@ -2458,7 +2459,7 @@  static int move_freepages(struct zone *zone,
 
 		order = buddy_order(page);
 		move_to_free_list(page, zone, order, migratetype);
-		page += 1 << order;
+		pfn += 1 << order;
 		pages_moved += 1 << order;
 	}
 
@@ -2468,25 +2469,22 @@  static int move_freepages(struct zone *zone,
 int move_freepages_block(struct zone *zone, struct page *page,
 				int migratetype, int *num_movable)
 {
-	unsigned long start_pfn, end_pfn;
-	struct page *start_page, *end_page;
+	unsigned long start_pfn, end_pfn, pfn;
 
 	if (num_movable)
 		*num_movable = 0;
 
-	start_pfn = page_to_pfn(page);
-	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
-	start_page = pfn_to_page(start_pfn);
-	end_page = start_page + pageblock_nr_pages - 1;
+	pfn = page_to_pfn(page);
+	start_pfn = pfn & ~(pageblock_nr_pages - 1);
 	end_pfn = start_pfn + pageblock_nr_pages - 1;
 
 	/* Do not cross zone boundaries */
 	if (!zone_spans_pfn(zone, start_pfn))
-		start_page = page;
+		start_pfn = pfn;
 	if (!zone_spans_pfn(zone, end_pfn))
 		return 0;
 
-	return move_freepages(zone, start_page, end_page, migratetype,
+	return move_freepages(zone, start_pfn, end_pfn, migratetype,
 								num_movable);
 }