diff mbox series

[v3,5/5] mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation

Message ID 20240827114728.3212578-6-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: memory_hotplug: improve do_migrate_range() | expand

Commit Message

Kefeng Wang Aug. 27, 2024, 11:47 a.m. UTC
Use the isolate_folio_to_list() to unify hugetlb/LRU/non-LRU folio
isolation, which cleanup code a bit and save a few calls to
compound_head().

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/memory_hotplug.c | 45 +++++++++++++++++----------------------------
 1 file changed, 17 insertions(+), 28 deletions(-)

Comments

Miaohe Lin Aug. 31, 2024, 9:01 a.m. UTC | #1
On 2024/8/27 19:47, Kefeng Wang wrote:
> Use the isolate_folio_to_list() to unify hugetlb/LRU/non-LRU folio
> isolation, which cleanup code a bit and save a few calls to
> compound_head().
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  mm/memory_hotplug.c | 45 +++++++++++++++++----------------------------
>  1 file changed, 17 insertions(+), 28 deletions(-)
> 
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 1335fb6ef7fa..5f09866d17cf 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1772,15 +1772,15 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
>  
>  static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
>  {
> +	struct folio *folio;
>  	unsigned long pfn;
> -	struct page *page;
>  	LIST_HEAD(source);
>  	static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
>  				      DEFAULT_RATELIMIT_BURST);
>  
>  	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
> -		struct folio *folio;
> -		bool isolated;
> +		struct page *page;
> +		bool hugetlb;
>  
>  		if (!pfn_valid(pfn))
>  			continue;
> @@ -1811,34 +1811,22 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
>  			continue;
>  		}
>  
> -		if (folio_test_hugetlb(folio)) {
> -			isolate_hugetlb(folio, &source);
> -			continue;
> +		hugetlb = folio_test_hugetlb(folio);
> +		if (!hugetlb) {
> +			folio = folio_get_nontail_page(page);
> +			if (!folio)
> +				continue;
>  		}
>  
> -		if (!get_page_unless_zero(page))
> -			continue;
> -		/*
> -		 * We can skip free pages. And we can deal with pages on
> -		 * LRU and non-lru movable pages.
> -		 */
> -		if (PageLRU(page))
> -			isolated = isolate_lru_page(page);
> -		else
> -			isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
> -		if (isolated) {
> -			list_add_tail(&page->lru, &source);

This has a side effect that list_add_tail is replaced with list_add now. But it seems this
won't cause any problem.

Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Thanks.
.
diff mbox series

Patch

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1335fb6ef7fa..5f09866d17cf 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1772,15 +1772,15 @@  static int scan_movable_pages(unsigned long start, unsigned long end,
 
 static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 {
+	struct folio *folio;
 	unsigned long pfn;
-	struct page *page;
 	LIST_HEAD(source);
 	static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
 				      DEFAULT_RATELIMIT_BURST);
 
 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
-		struct folio *folio;
-		bool isolated;
+		struct page *page;
+		bool hugetlb;
 
 		if (!pfn_valid(pfn))
 			continue;
@@ -1811,34 +1811,22 @@  static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 			continue;
 		}
 
-		if (folio_test_hugetlb(folio)) {
-			isolate_hugetlb(folio, &source);
-			continue;
+		hugetlb = folio_test_hugetlb(folio);
+		if (!hugetlb) {
+			folio = folio_get_nontail_page(page);
+			if (!folio)
+				continue;
 		}
 
-		if (!get_page_unless_zero(page))
-			continue;
-		/*
-		 * We can skip free pages. And we can deal with pages on
-		 * LRU and non-lru movable pages.
-		 */
-		if (PageLRU(page))
-			isolated = isolate_lru_page(page);
-		else
-			isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
-		if (isolated) {
-			list_add_tail(&page->lru, &source);
-			if (!__PageMovable(page))
-				inc_node_page_state(page, NR_ISOLATED_ANON +
-						    page_is_file_lru(page));
-
-		} else {
+		if (!isolate_folio_to_list(folio, &source)) {
 			if (__ratelimit(&migrate_rs)) {
 				pr_warn("failed to isolate pfn %lx\n", pfn);
 				dump_page(page, "isolation failed");
 			}
 		}
-		put_page(page);
+
+		if (!hugetlb)
+			folio_put(folio);
 	}
 	if (!list_empty(&source)) {
 		nodemask_t nmask = node_states[N_MEMORY];
@@ -1853,7 +1841,7 @@  static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 		 * We have checked that migration range is on a single zone so
 		 * we can use the nid of the first page to all the others.
 		 */
-		mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
+		mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru));
 
 		/*
 		 * try to allocate from a different node but reuse this node
@@ -1866,11 +1854,12 @@  static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 		ret = migrate_pages(&source, alloc_migration_target, NULL,
 			(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
 		if (ret) {
-			list_for_each_entry(page, &source, lru) {
+			list_for_each_entry(folio, &source, lru) {
 				if (__ratelimit(&migrate_rs)) {
 					pr_warn("migrating pfn %lx failed ret:%d\n",
-						page_to_pfn(page), ret);
-					dump_page(page, "migration failure");
+						folio_pfn(folio), ret);
+					dump_page(&folio->page,
+						  "migration failure");
 				}
 			}
 			putback_movable_pages(&source);