diff mbox series

[v3,8/8] mm/page_alloc: remove a wrapper for alloc_migration_target()

Message ID 1592892828-1934-9-git-send-email-iamjoonsoo.kim@lge.com (mailing list archive)
State New, archived
Headers show
Series clean-up the migration target allocation functions | expand

Commit Message

Joonsoo Kim June 23, 2020, 6:13 a.m. UTC
From: Joonsoo Kim <iamjoonsoo.kim@lge.com>

There is a well-defined standard migration target callback.
Use it directly.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 mm/page_alloc.c     |  9 +++++++--
 mm/page_isolation.c | 11 -----------
 2 files changed, 7 insertions(+), 13 deletions(-)

Comments

Michal Hocko June 25, 2020, 12:10 p.m. UTC | #1
On Tue 23-06-20 15:13:48, Joonsoo Kim wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> There is a well-defined standard migration target callback.
> Use it directly.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  mm/page_alloc.c     |  9 +++++++--
>  mm/page_isolation.c | 11 -----------
>  2 files changed, 7 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 9808339..884dfb5 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -8359,6 +8359,11 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
>  	unsigned long pfn = start;
>  	unsigned int tries = 0;
>  	int ret = 0;
> +	struct migration_target_control mtc = {
> +		.nid = zone_to_nid(cc->zone),
> +		.nmask = &node_states[N_MEMORY],
> +		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> +	};
>  
>  	migrate_prep();
>  
> @@ -8385,8 +8390,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
>  							&cc->migratepages);
>  		cc->nr_migratepages -= nr_reclaimed;
>  
> -		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
> -				    NULL, 0, cc->mode, MR_CONTIG_RANGE);
> +		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
> +				NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
>  	}
>  	if (ret < 0) {
>  		putback_movable_pages(&cc->migratepages);
> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> index adba031..242c031 100644
> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -306,14 +306,3 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
>  
>  	return pfn < end_pfn ? -EBUSY : 0;
>  }
> -
> -struct page *alloc_migrate_target(struct page *page, unsigned long private)
> -{
> -	struct migration_target_control mtc = {
> -		.nid = page_to_nid(page),
> -		.nmask = &node_states[N_MEMORY],
> -		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> -	};
> -
> -	return alloc_migration_target(page, (unsigned long)&mtc);
> -}
> -- 
> 2.7.4
Vlastimil Babka July 3, 2020, 4:18 p.m. UTC | #2
On 6/23/20 8:13 AM, js1304@gmail.com wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> There is a well-defined standard migration target callback.
> Use it directly.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

But you could move this to patch 5/8 to reduce churn. And do the same with
mm/memory-failure.c new_page() there really, to drop the simple wrappers. Only
new_node_page() is complex enough.
Hm wait, new_node_page() is only called by do_migrate_range() which is only
called by __offline_pages() with explicit test that all pages are from a single
zone, so the nmask could also be setup just once and not per each page, making
it possible to remove the wrapper.

But for new_page() you would have to define that mtc->nid == NUMA_NO_NODE means
alloc_migrate_target() does page_to_nid(page) by itself.



> ---
>  mm/page_alloc.c     |  9 +++++++--
>  mm/page_isolation.c | 11 -----------
>  2 files changed, 7 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 9808339..884dfb5 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -8359,6 +8359,11 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
>  	unsigned long pfn = start;
>  	unsigned int tries = 0;
>  	int ret = 0;
> +	struct migration_target_control mtc = {
> +		.nid = zone_to_nid(cc->zone),
> +		.nmask = &node_states[N_MEMORY],
> +		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> +	};
>  
>  	migrate_prep();
>  
> @@ -8385,8 +8390,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
>  							&cc->migratepages);
>  		cc->nr_migratepages -= nr_reclaimed;
>  
> -		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
> -				    NULL, 0, cc->mode, MR_CONTIG_RANGE);
> +		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
> +				NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
>  	}
>  	if (ret < 0) {
>  		putback_movable_pages(&cc->migratepages);
> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> index adba031..242c031 100644
> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -306,14 +306,3 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
>  
>  	return pfn < end_pfn ? -EBUSY : 0;
>  }
> -
> -struct page *alloc_migrate_target(struct page *page, unsigned long private)
> -{
> -	struct migration_target_control mtc = {
> -		.nid = page_to_nid(page),
> -		.nmask = &node_states[N_MEMORY],
> -		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> -	};
> -
> -	return alloc_migration_target(page, (unsigned long)&mtc);
> -}
>
Joonsoo Kim July 6, 2020, 8:44 a.m. UTC | #3
2020년 7월 4일 (토) 오전 1:18, Vlastimil Babka <vbabka@suse.cz>님이 작성:
>
> On 6/23/20 8:13 AM, js1304@gmail.com wrote:
> > From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> >
> > There is a well-defined standard migration target callback.
> > Use it directly.
> >
> > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
>
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
>
> But you could move this to patch 5/8 to reduce churn. And do the same with

Yes, I now realize that it is possible to make this change earlier.
However, reordering
the patches would cause additional change so I will not change the
order in the next
version. Result would be the same. :)

> mm/memory-failure.c new_page() there really, to drop the simple wrappers. Only

Okay. As you suggested below, with NUMA_NO_NODE handling, we can remove
the more wrappers. I will do it.

> new_node_page() is complex enough.
> Hm wait, new_node_page() is only called by do_migrate_range() which is only
> called by __offline_pages() with explicit test that all pages are from a single
> zone, so the nmask could also be setup just once and not per each page, making
> it possible to remove the wrapper.

I have tried this suggestion and found that code is not simpler than before.
However, there would be minor performance benefit so I will include
this change, too.

> But for new_page() you would have to define that mtc->nid == NUMA_NO_NODE means
> alloc_migrate_target() does page_to_nid(page) by itself.

Yes, I will use this suggestion.

Thanks.
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9808339..884dfb5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8359,6 +8359,11 @@  static int __alloc_contig_migrate_range(struct compact_control *cc,
 	unsigned long pfn = start;
 	unsigned int tries = 0;
 	int ret = 0;
+	struct migration_target_control mtc = {
+		.nid = zone_to_nid(cc->zone),
+		.nmask = &node_states[N_MEMORY],
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
 	migrate_prep();
 
@@ -8385,8 +8390,8 @@  static int __alloc_contig_migrate_range(struct compact_control *cc,
 							&cc->migratepages);
 		cc->nr_migratepages -= nr_reclaimed;
 
-		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
-				    NULL, 0, cc->mode, MR_CONTIG_RANGE);
+		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
+				NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
 	}
 	if (ret < 0) {
 		putback_movable_pages(&cc->migratepages);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index adba031..242c031 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -306,14 +306,3 @@  int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 
 	return pfn < end_pfn ? -EBUSY : 0;
 }
-
-struct page *alloc_migrate_target(struct page *page, unsigned long private)
-{
-	struct migration_target_control mtc = {
-		.nid = page_to_nid(page),
-		.nmask = &node_states[N_MEMORY],
-		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
-	};
-
-	return alloc_migration_target(page, (unsigned long)&mtc);
-}