diff mbox series

mm: separate move/undo parts from migrate_pages_batch()

Message ID 20250115095343.46390-1-byungchul@sk.com (mailing list archive)
State New
Headers show
Series mm: separate move/undo parts from migrate_pages_batch() | expand

Commit Message

Byungchul Park Jan. 15, 2025, 9:53 a.m. UTC
Hi,

This is a part of luf(lazy unmap flush) patchset and also referred
several times by e.g. Shivank Garg and Zi Yan who are currently working
on page migration optimization.  Why don't we take this first so that
such a migration optimization tries can use it.

	Byungchul

--->8---
From a65a6e4975962707bf87171e317f005c6276887e Mon Sep 17 00:00:00 2001
From: Byungchul Park <byungchul@sk.com>
Date: Thu, 8 Aug 2024 15:53:58 +0900
Subject: [PATCH] mm: separate move/undo parts from migrate_pages_batch()

Functionally, no change.  This is a preparation for migration
optimization tries that require to use separated folio lists for its own
handling during migration.  Refactored migrate_pages_batch() so as to
separate move/undo parts from migrate_pages_batch().

Signed-off-by: Byungchul Park <byungchul@sk.com>
---
 mm/migrate.c | 134 +++++++++++++++++++++++++++++++--------------------
 1 file changed, 83 insertions(+), 51 deletions(-)

Comments

Shivank Garg Jan. 15, 2025, 10:07 a.m. UTC | #1
Hi Byungchul,

On 1/15/2025 3:23 PM, Byungchul Park wrote:
> Hi,
> 
> This is a part of luf(lazy unmap flush) patchset and also referred
> several times by e.g. Shivank Garg and Zi Yan who are currently working
> on page migration optimization.  Why don't we take this first so that
> such a migration optimization tries can use it.
> 
> 	Byungchul
> 
> --->8---
> From a65a6e4975962707bf87171e317f005c6276887e Mon Sep 17 00:00:00 2001
> From: Byungchul Park <byungchul@sk.com>
> Date: Thu, 8 Aug 2024 15:53:58 +0900
> Subject: [PATCH] mm: separate move/undo parts from migrate_pages_batch()
> 
> Functionally, no change.  This is a preparation for migration
> optimization tries that require to use separated folio lists for its own
> handling during migration.  Refactored migrate_pages_batch() so as to
> separate move/undo parts from migrate_pages_batch().
> 
> Signed-off-by: Byungchul Park <byungchul@sk.com>
> ---
>  mm/migrate.c | 134 +++++++++++++++++++++++++++++++--------------------
>  1 file changed, 83 insertions(+), 51 deletions(-)

[...]

> +		 */
> +		switch(rc) {
		     ^^^
minor nit here - space after 'switch' before the parentheses.
This code style issue was present in the original code and was carried
over during refactoring.

With this fix, please add-
Reviewed-by: Shivank Garg <shivankg@amd.com>
 
> +		case -EAGAIN:
> +			*retry += 1;
> +			*thp_retry += is_thp;
> +			*nr_retry_pages += nr_pages;
> +			break;
> +		case MIGRATEPAGE_SUCCESS:
> +			stats->nr_succeeded += nr_pages;
> +			stats->nr_thp_succeeded += is_thp;
> +			break;
> +		default:
> +			*nr_failed += 1;
> +			stats->nr_thp_failed += is_thp;
> +			stats->nr_failed_pages += nr_pages;
> +			break;
> +		}
> +		dst = dst2;
> +		dst2 = list_next_entry(dst, lru);
> +	}
> +}
> +
> +static void migrate_folios_undo(struct list_head *src_folios,
> +		struct list_head *dst_folios,
> +		free_folio_t put_new_folio, unsigned long private,
> +		struct list_head *ret_folios)
> +{
> +	struct folio *folio, *folio2, *dst, *dst2;
> +
> +	dst = list_first_entry(dst_folios, struct folio, lru);
> +	dst2 = list_next_entry(dst, lru);
> +	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
> +		int old_page_state = 0;
> +		struct anon_vma *anon_vma = NULL;
> +
> +		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
> +		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
> +				anon_vma, true, ret_folios);
> +		list_del(&dst->lru);
> +		migrate_folio_undo_dst(dst, true, put_new_folio, private);
> +		dst = dst2;
> +		dst2 = list_next_entry(dst, lru);
> +	}
> +}
> +
>  /*
>   * migrate_pages_batch() first unmaps folios in the from list as many as
>   * possible, then move the unmapped folios.
> @@ -1717,7 +1792,7 @@ static int migrate_pages_batch(struct list_head *from,
>  	int pass = 0;
>  	bool is_thp = false;
>  	bool is_large = false;
> -	struct folio *folio, *folio2, *dst = NULL, *dst2;
> +	struct folio *folio, *folio2, *dst = NULL;
>  	int rc, rc_saved = 0, nr_pages;
>  	LIST_HEAD(unmap_folios);
>  	LIST_HEAD(dst_folios);
> @@ -1888,42 +1963,11 @@ static int migrate_pages_batch(struct list_head *from,
>  		thp_retry = 0;
>  		nr_retry_pages = 0;
>  
> -		dst = list_first_entry(&dst_folios, struct folio, lru);
> -		dst2 = list_next_entry(dst, lru);
> -		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
> -			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
> -			nr_pages = folio_nr_pages(folio);
> -
> -			cond_resched();
> -
> -			rc = migrate_folio_move(put_new_folio, private,
> -						folio, dst, mode,
> -						reason, ret_folios);
> -			/*
> -			 * The rules are:
> -			 *	Success: folio will be freed
> -			 *	-EAGAIN: stay on the unmap_folios list
> -			 *	Other errno: put on ret_folios list
> -			 */
> -			switch(rc) {
> -			case -EAGAIN:
> -				retry++;
> -				thp_retry += is_thp;
> -				nr_retry_pages += nr_pages;
> -				break;
> -			case MIGRATEPAGE_SUCCESS:
> -				stats->nr_succeeded += nr_pages;
> -				stats->nr_thp_succeeded += is_thp;
> -				break;
> -			default:
> -				nr_failed++;
> -				stats->nr_thp_failed += is_thp;
> -				stats->nr_failed_pages += nr_pages;
> -				break;
> -			}
> -			dst = dst2;
> -			dst2 = list_next_entry(dst, lru);
> -		}
> +		/* Move the unmapped folios */
> +		migrate_folios_move(&unmap_folios, &dst_folios,
> +				put_new_folio, private, mode, reason,
> +				ret_folios, stats, &retry, &thp_retry,
> +				&nr_failed, &nr_retry_pages);
>  	}
>  	nr_failed += retry;
>  	stats->nr_thp_failed += thp_retry;
> @@ -1932,20 +1976,8 @@ static int migrate_pages_batch(struct list_head *from,
>  	rc = rc_saved ? : nr_failed;
>  out:
>  	/* Cleanup remaining folios */
> -	dst = list_first_entry(&dst_folios, struct folio, lru);
> -	dst2 = list_next_entry(dst, lru);
> -	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
> -		int old_page_state = 0;
> -		struct anon_vma *anon_vma = NULL;
> -
> -		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
> -		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
> -				       anon_vma, true, ret_folios);
> -		list_del(&dst->lru);
> -		migrate_folio_undo_dst(dst, true, put_new_folio, private);
> -		dst = dst2;
> -		dst2 = list_next_entry(dst, lru);
> -	}
> +	migrate_folios_undo(&unmap_folios, &dst_folios,
> +			put_new_folio, private, ret_folios);
>  
>  	return rc;
>  }
Byungchul Park Jan. 15, 2025, 10:23 a.m. UTC | #2
On Wed, Jan 15, 2025 at 03:37:15PM +0530, Shivank Garg wrote:
> Hi Byungchul,
> 
> On 1/15/2025 3:23 PM, Byungchul Park wrote:
> > Hi,
> > 
> > This is a part of luf(lazy unmap flush) patchset and also referred
> > several times by e.g. Shivank Garg and Zi Yan who are currently working
> > on page migration optimization.  Why don't we take this first so that
> > such a migration optimization tries can use it.
> > 
> > 	Byungchul
> > 
> > --->8---
> > From a65a6e4975962707bf87171e317f005c6276887e Mon Sep 17 00:00:00 2001
> > From: Byungchul Park <byungchul@sk.com>
> > Date: Thu, 8 Aug 2024 15:53:58 +0900
> > Subject: [PATCH] mm: separate move/undo parts from migrate_pages_batch()
> > 
> > Functionally, no change.  This is a preparation for migration
> > optimization tries that require to use separated folio lists for its own
> > handling during migration.  Refactored migrate_pages_batch() so as to
> > separate move/undo parts from migrate_pages_batch().
> > 
> > Signed-off-by: Byungchul Park <byungchul@sk.com>
> > ---
> >  mm/migrate.c | 134 +++++++++++++++++++++++++++++++--------------------
> >  1 file changed, 83 insertions(+), 51 deletions(-)
> 
> [...]
> 
> > +		 */
> > +		switch(rc) {
> 		     ^^^
> minor nit here - space after 'switch' before the parentheses.
> This code style issue was present in the original code and was carried
> over during refactoring.

I should've taken more care of it.  Thank you.

	Byungchul

> With this fix, please add-
> Reviewed-by: Shivank Garg <shivankg@amd.com>
>  
> > +		case -EAGAIN:
> > +			*retry += 1;
> > +			*thp_retry += is_thp;
> > +			*nr_retry_pages += nr_pages;
> > +			break;
> > +		case MIGRATEPAGE_SUCCESS:
> > +			stats->nr_succeeded += nr_pages;
> > +			stats->nr_thp_succeeded += is_thp;
> > +			break;
> > +		default:
> > +			*nr_failed += 1;
> > +			stats->nr_thp_failed += is_thp;
> > +			stats->nr_failed_pages += nr_pages;
> > +			break;
> > +		}
> > +		dst = dst2;
> > +		dst2 = list_next_entry(dst, lru);
> > +	}
> > +}
> > +
> > +static void migrate_folios_undo(struct list_head *src_folios,
> > +		struct list_head *dst_folios,
> > +		free_folio_t put_new_folio, unsigned long private,
> > +		struct list_head *ret_folios)
> > +{
> > +	struct folio *folio, *folio2, *dst, *dst2;
> > +
> > +	dst = list_first_entry(dst_folios, struct folio, lru);
> > +	dst2 = list_next_entry(dst, lru);
> > +	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
> > +		int old_page_state = 0;
> > +		struct anon_vma *anon_vma = NULL;
> > +
> > +		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
> > +		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
> > +				anon_vma, true, ret_folios);
> > +		list_del(&dst->lru);
> > +		migrate_folio_undo_dst(dst, true, put_new_folio, private);
> > +		dst = dst2;
> > +		dst2 = list_next_entry(dst, lru);
> > +	}
> > +}
> > +
> >  /*
> >   * migrate_pages_batch() first unmaps folios in the from list as many as
> >   * possible, then move the unmapped folios.
> > @@ -1717,7 +1792,7 @@ static int migrate_pages_batch(struct list_head *from,
> >  	int pass = 0;
> >  	bool is_thp = false;
> >  	bool is_large = false;
> > -	struct folio *folio, *folio2, *dst = NULL, *dst2;
> > +	struct folio *folio, *folio2, *dst = NULL;
> >  	int rc, rc_saved = 0, nr_pages;
> >  	LIST_HEAD(unmap_folios);
> >  	LIST_HEAD(dst_folios);
> > @@ -1888,42 +1963,11 @@ static int migrate_pages_batch(struct list_head *from,
> >  		thp_retry = 0;
> >  		nr_retry_pages = 0;
> >  
> > -		dst = list_first_entry(&dst_folios, struct folio, lru);
> > -		dst2 = list_next_entry(dst, lru);
> > -		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
> > -			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
> > -			nr_pages = folio_nr_pages(folio);
> > -
> > -			cond_resched();
> > -
> > -			rc = migrate_folio_move(put_new_folio, private,
> > -						folio, dst, mode,
> > -						reason, ret_folios);
> > -			/*
> > -			 * The rules are:
> > -			 *	Success: folio will be freed
> > -			 *	-EAGAIN: stay on the unmap_folios list
> > -			 *	Other errno: put on ret_folios list
> > -			 */
> > -			switch(rc) {
> > -			case -EAGAIN:
> > -				retry++;
> > -				thp_retry += is_thp;
> > -				nr_retry_pages += nr_pages;
> > -				break;
> > -			case MIGRATEPAGE_SUCCESS:
> > -				stats->nr_succeeded += nr_pages;
> > -				stats->nr_thp_succeeded += is_thp;
> > -				break;
> > -			default:
> > -				nr_failed++;
> > -				stats->nr_thp_failed += is_thp;
> > -				stats->nr_failed_pages += nr_pages;
> > -				break;
> > -			}
> > -			dst = dst2;
> > -			dst2 = list_next_entry(dst, lru);
> > -		}
> > +		/* Move the unmapped folios */
> > +		migrate_folios_move(&unmap_folios, &dst_folios,
> > +				put_new_folio, private, mode, reason,
> > +				ret_folios, stats, &retry, &thp_retry,
> > +				&nr_failed, &nr_retry_pages);
> >  	}
> >  	nr_failed += retry;
> >  	stats->nr_thp_failed += thp_retry;
> > @@ -1932,20 +1976,8 @@ static int migrate_pages_batch(struct list_head *from,
> >  	rc = rc_saved ? : nr_failed;
> >  out:
> >  	/* Cleanup remaining folios */
> > -	dst = list_first_entry(&dst_folios, struct folio, lru);
> > -	dst2 = list_next_entry(dst, lru);
> > -	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
> > -		int old_page_state = 0;
> > -		struct anon_vma *anon_vma = NULL;
> > -
> > -		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
> > -		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
> > -				       anon_vma, true, ret_folios);
> > -		list_del(&dst->lru);
> > -		migrate_folio_undo_dst(dst, true, put_new_folio, private);
> > -		dst = dst2;
> > -		dst2 = list_next_entry(dst, lru);
> > -	}
> > +	migrate_folios_undo(&unmap_folios, &dst_folios,
> > +			put_new_folio, private, ret_folios);
> >  
> >  	return rc;
> >  }
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index dfb5eba3c5223..c8f25434b9973 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1695,6 +1695,81 @@  static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
 	return nr_failed;
 }
 
+static void migrate_folios_move(struct list_head *src_folios,
+		struct list_head *dst_folios,
+		free_folio_t put_new_folio, unsigned long private,
+		enum migrate_mode mode, int reason,
+		struct list_head *ret_folios,
+		struct migrate_pages_stats *stats,
+		int *retry, int *thp_retry, int *nr_failed,
+		int *nr_retry_pages)
+{
+	struct folio *folio, *folio2, *dst, *dst2;
+	bool is_thp;
+	int nr_pages;
+	int rc;
+
+	dst = list_first_entry(dst_folios, struct folio, lru);
+	dst2 = list_next_entry(dst, lru);
+	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
+		is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
+		nr_pages = folio_nr_pages(folio);
+
+		cond_resched();
+
+		rc = migrate_folio_move(put_new_folio, private,
+				folio, dst, mode,
+				reason, ret_folios);
+		/*
+		 * The rules are:
+		 *	Success: folio will be freed
+		 *	-EAGAIN: stay on the unmap_folios list
+		 *	Other errno: put on ret_folios list
+		 */
+		switch(rc) {
+		case -EAGAIN:
+			*retry += 1;
+			*thp_retry += is_thp;
+			*nr_retry_pages += nr_pages;
+			break;
+		case MIGRATEPAGE_SUCCESS:
+			stats->nr_succeeded += nr_pages;
+			stats->nr_thp_succeeded += is_thp;
+			break;
+		default:
+			*nr_failed += 1;
+			stats->nr_thp_failed += is_thp;
+			stats->nr_failed_pages += nr_pages;
+			break;
+		}
+		dst = dst2;
+		dst2 = list_next_entry(dst, lru);
+	}
+}
+
+static void migrate_folios_undo(struct list_head *src_folios,
+		struct list_head *dst_folios,
+		free_folio_t put_new_folio, unsigned long private,
+		struct list_head *ret_folios)
+{
+	struct folio *folio, *folio2, *dst, *dst2;
+
+	dst = list_first_entry(dst_folios, struct folio, lru);
+	dst2 = list_next_entry(dst, lru);
+	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
+		int old_page_state = 0;
+		struct anon_vma *anon_vma = NULL;
+
+		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
+		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
+				anon_vma, true, ret_folios);
+		list_del(&dst->lru);
+		migrate_folio_undo_dst(dst, true, put_new_folio, private);
+		dst = dst2;
+		dst2 = list_next_entry(dst, lru);
+	}
+}
+
 /*
  * migrate_pages_batch() first unmaps folios in the from list as many as
  * possible, then move the unmapped folios.
@@ -1717,7 +1792,7 @@  static int migrate_pages_batch(struct list_head *from,
 	int pass = 0;
 	bool is_thp = false;
 	bool is_large = false;
-	struct folio *folio, *folio2, *dst = NULL, *dst2;
+	struct folio *folio, *folio2, *dst = NULL;
 	int rc, rc_saved = 0, nr_pages;
 	LIST_HEAD(unmap_folios);
 	LIST_HEAD(dst_folios);
@@ -1888,42 +1963,11 @@  static int migrate_pages_batch(struct list_head *from,
 		thp_retry = 0;
 		nr_retry_pages = 0;
 
-		dst = list_first_entry(&dst_folios, struct folio, lru);
-		dst2 = list_next_entry(dst, lru);
-		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
-			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
-			nr_pages = folio_nr_pages(folio);
-
-			cond_resched();
-
-			rc = migrate_folio_move(put_new_folio, private,
-						folio, dst, mode,
-						reason, ret_folios);
-			/*
-			 * The rules are:
-			 *	Success: folio will be freed
-			 *	-EAGAIN: stay on the unmap_folios list
-			 *	Other errno: put on ret_folios list
-			 */
-			switch(rc) {
-			case -EAGAIN:
-				retry++;
-				thp_retry += is_thp;
-				nr_retry_pages += nr_pages;
-				break;
-			case MIGRATEPAGE_SUCCESS:
-				stats->nr_succeeded += nr_pages;
-				stats->nr_thp_succeeded += is_thp;
-				break;
-			default:
-				nr_failed++;
-				stats->nr_thp_failed += is_thp;
-				stats->nr_failed_pages += nr_pages;
-				break;
-			}
-			dst = dst2;
-			dst2 = list_next_entry(dst, lru);
-		}
+		/* Move the unmapped folios */
+		migrate_folios_move(&unmap_folios, &dst_folios,
+				put_new_folio, private, mode, reason,
+				ret_folios, stats, &retry, &thp_retry,
+				&nr_failed, &nr_retry_pages);
 	}
 	nr_failed += retry;
 	stats->nr_thp_failed += thp_retry;
@@ -1932,20 +1976,8 @@  static int migrate_pages_batch(struct list_head *from,
 	rc = rc_saved ? : nr_failed;
 out:
 	/* Cleanup remaining folios */
-	dst = list_first_entry(&dst_folios, struct folio, lru);
-	dst2 = list_next_entry(dst, lru);
-	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
-		int old_page_state = 0;
-		struct anon_vma *anon_vma = NULL;
-
-		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
-		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
-				       anon_vma, true, ret_folios);
-		list_del(&dst->lru);
-		migrate_folio_undo_dst(dst, true, put_new_folio, private);
-		dst = dst2;
-		dst2 = list_next_entry(dst, lru);
-	}
+	migrate_folios_undo(&unmap_folios, &dst_folios,
+			put_new_folio, private, ret_folios);
 
 	return rc;
 }