diff mbox series

[v4,06/11] mm/migrate: make a standard migration target allocation function

Message ID 1594107889-32228-7-git-send-email-iamjoonsoo.kim@lge.com (mailing list archive)
State New, archived
Headers show
Series clean-up the migration target allocation functions | expand

Commit Message

Joonsoo Kim July 7, 2020, 7:44 a.m. UTC
From: Joonsoo Kim <iamjoonsoo.kim@lge.com>

There are some similar functions for migration target allocation.  Since
there is no fundamental difference, it's better to keep just one rather
than keeping all variants.  This patch implements base migration target
allocation function.  In the following patches, variants will be converted
to use this function.

Changes should be mechanical but there are some differences. First, Some
callers' nodemask is assgined to NULL since NULL nodemask will be
considered as all available nodes, that is, &node_states[N_MEMORY].
Second, for hugetlb page allocation, gfp_mask is ORed since a user could
provide a gfp_mask from now on. Third, if provided node id is NUMA_NO_NODE,
node id is set up to the node where migration source lives.

Note that PageHighmem() call in previous function is changed to open-code
"is_highmem_idx()" since it provides more readability.

Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 include/linux/migrate.h |  9 +++++----
 mm/internal.h           |  7 +++++++
 mm/memory-failure.c     |  7 +++++--
 mm/memory_hotplug.c     | 14 +++++++++-----
 mm/migrate.c            | 27 +++++++++++++++++----------
 mm/page_isolation.c     |  7 +++++--
 6 files changed, 48 insertions(+), 23 deletions(-)

Comments

Michal Hocko July 7, 2020, 11:43 a.m. UTC | #1
On Tue 07-07-20 16:44:44, Joonsoo Kim wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> There are some similar functions for migration target allocation.  Since
> there is no fundamental difference, it's better to keep just one rather
> than keeping all variants.  This patch implements base migration target
> allocation function.  In the following patches, variants will be converted
> to use this function.
> 
> Changes should be mechanical but there are some differences. First, Some
> callers' nodemask is assgined to NULL since NULL nodemask will be
> considered as all available nodes, that is, &node_states[N_MEMORY].
> Second, for hugetlb page allocation, gfp_mask is ORed since a user could
> provide a gfp_mask from now on. Third, if provided node id is NUMA_NO_NODE,
> node id is set up to the node where migration source lives.
> 
> Note that PageHighmem() call in previous function is changed to open-code
> "is_highmem_idx()" since it provides more readability.
> 
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

Acked-by: Michal Hocko <mhocko@suse.com>

Thanks!

> ---
>  include/linux/migrate.h |  9 +++++----
>  mm/internal.h           |  7 +++++++
>  mm/memory-failure.c     |  7 +++++--
>  mm/memory_hotplug.c     | 14 +++++++++-----
>  mm/migrate.c            | 27 +++++++++++++++++----------
>  mm/page_isolation.c     |  7 +++++--
>  6 files changed, 48 insertions(+), 23 deletions(-)
> 
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index 1d70b4a..cc56f0d 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -10,6 +10,8 @@
>  typedef struct page *new_page_t(struct page *page, unsigned long private);
>  typedef void free_page_t(struct page *page, unsigned long private);
>  
> +struct migration_target_control;
> +
>  /*
>   * Return values from addresss_space_operations.migratepage():
>   * - negative errno on page migration failure;
> @@ -39,8 +41,7 @@ extern int migrate_page(struct address_space *mapping,
>  			enum migrate_mode mode);
>  extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
>  		unsigned long private, enum migrate_mode mode, int reason);
> -extern struct page *new_page_nodemask(struct page *page,
> -		int preferred_nid, nodemask_t *nodemask);
> +extern struct page *alloc_migration_target(struct page *page, unsigned long private);
>  extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
>  extern void putback_movable_page(struct page *page);
>  
> @@ -59,8 +60,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
>  		free_page_t free, unsigned long private, enum migrate_mode mode,
>  		int reason)
>  	{ return -ENOSYS; }
> -static inline struct page *new_page_nodemask(struct page *page,
> -		int preferred_nid, nodemask_t *nodemask)
> +static inline struct page *alloc_migration_target(struct page *page,
> +		unsigned long private)
>  	{ return NULL; }
>  static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
>  	{ return -EBUSY; }
> diff --git a/mm/internal.h b/mm/internal.h
> index dd14c53..0beacf3 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -614,4 +614,11 @@ static inline bool is_migrate_highatomic_page(struct page *page)
>  
>  void setup_zone_pageset(struct zone *zone);
>  extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
> +
> +struct migration_target_control {
> +	int nid;		/* preferred node id */
> +	nodemask_t *nmask;
> +	gfp_t gfp_mask;
> +};
> +
>  #endif	/* __MM_INTERNAL_H */
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index c5e4cee..609d42b6 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1679,9 +1679,12 @@ EXPORT_SYMBOL(unpoison_memory);
>  
>  static struct page *new_page(struct page *p, unsigned long private)
>  {
> -	int nid = page_to_nid(p);
> +	struct migration_target_control mtc = {
> +		.nid = page_to_nid(p),
> +		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> +	};
>  
> -	return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
> +	return alloc_migration_target(p, (unsigned long)&mtc);
>  }
>  
>  /*
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index cafe65eb..86bc2ad 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1267,19 +1267,23 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
>  
>  static struct page *new_node_page(struct page *page, unsigned long private)
>  {
> -	int nid = page_to_nid(page);
>  	nodemask_t nmask = node_states[N_MEMORY];
> +	struct migration_target_control mtc = {
> +		.nid = page_to_nid(page),
> +		.nmask = &nmask,
> +		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> +	};
>  
>  	/*
>  	 * try to allocate from a different node but reuse this node if there
>  	 * are no other online nodes to be used (e.g. we are offlining a part
>  	 * of the only existing node)
>  	 */
> -	node_clear(nid, nmask);
> -	if (nodes_empty(nmask))
> -		node_set(nid, nmask);
> +	node_clear(mtc.nid, *mtc.nmask);
> +	if (nodes_empty(*mtc.nmask))
> +		node_set(mtc.nid, *mtc.nmask);
>  
> -	return new_page_nodemask(page, nid, &nmask);
> +	return alloc_migration_target(page, (unsigned long)&mtc);
>  }
>  
>  static int
> diff --git a/mm/migrate.c b/mm/migrate.c
> index ecd7615..00cd81c 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1531,19 +1531,27 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
>  	return rc;
>  }
>  
> -struct page *new_page_nodemask(struct page *page,
> -				int preferred_nid, nodemask_t *nodemask)
> +struct page *alloc_migration_target(struct page *page, unsigned long private)
>  {
> -	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
> +	struct migration_target_control *mtc;
> +	gfp_t gfp_mask;
>  	unsigned int order = 0;
>  	struct page *new_page = NULL;
> +	int nid;
> +	int zidx;
> +
> +	mtc = (struct migration_target_control *)private;
> +	gfp_mask = mtc->gfp_mask;
> +	nid = mtc->nid;
> +	if (nid == NUMA_NO_NODE)
> +		nid = page_to_nid(page);
>  
>  	if (PageHuge(page)) {
>  		struct hstate *h = page_hstate(compound_head(page));
>  
> -		gfp_mask = htlb_alloc_mask(h);
> -		return alloc_huge_page_nodemask(h, preferred_nid,
> -						nodemask, gfp_mask, false);
> +		gfp_mask |= htlb_alloc_mask(h);
> +		return alloc_huge_page_nodemask(h, nid, mtc->nmask,
> +						gfp_mask, false);
>  	}
>  
>  	if (PageTransHuge(page)) {
> @@ -1555,12 +1563,11 @@ struct page *new_page_nodemask(struct page *page,
>  		gfp_mask |= GFP_TRANSHUGE;
>  		order = HPAGE_PMD_ORDER;
>  	}
> -
> -	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
> +	zidx = zone_idx(page_zone(page));
> +	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
>  		gfp_mask |= __GFP_HIGHMEM;
>  
> -	new_page = __alloc_pages_nodemask(gfp_mask, order,
> -				preferred_nid, nodemask);
> +	new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
>  
>  	if (new_page && PageTransHuge(new_page))
>  		prep_transhuge_page(new_page);
> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> index aec26d9..f25c66e 100644
> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -309,7 +309,10 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
>  
>  struct page *alloc_migrate_target(struct page *page, unsigned long private)
>  {
> -	int nid = page_to_nid(page);
> +	struct migration_target_control mtc = {
> +		.nid = page_to_nid(page),
> +		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> +	};
>  
> -	return new_page_nodemask(page, nid, &node_states[N_MEMORY]);
> +	return alloc_migration_target(page, (unsigned long)&mtc);
>  }
> -- 
> 2.7.4
Vlastimil Babka July 7, 2020, 2:49 p.m. UTC | #2
On 7/7/20 9:44 AM, js1304@gmail.com wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> There are some similar functions for migration target allocation.  Since
> there is no fundamental difference, it's better to keep just one rather
> than keeping all variants.  This patch implements base migration target
> allocation function.  In the following patches, variants will be converted
> to use this function.
> 
> Changes should be mechanical but there are some differences. First, Some
> callers' nodemask is assgined to NULL since NULL nodemask will be
> considered as all available nodes, that is, &node_states[N_MEMORY].
> Second, for hugetlb page allocation, gfp_mask is ORed since a user could
> provide a gfp_mask from now on.

I think that's wrong. See how htlb_alloc_mask() determines between
GFP_HIGHUSER_MOVABLE and GFP_HIGHUSER, but then you OR it with __GFP_MOVABLE so
it's always GFP_HIGHUSER_MOVABLE.
Yeah, gfp_mask for hugeltb become exposed in new_page_nodemask() after v4 3/11
patch, but that doesn't mean we can start modifying it :/
Michal Hocko July 7, 2020, 7 p.m. UTC | #3
On Tue 07-07-20 16:49:51, Vlastimil Babka wrote:
> On 7/7/20 9:44 AM, js1304@gmail.com wrote:
> > From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> > 
> > There are some similar functions for migration target allocation.  Since
> > there is no fundamental difference, it's better to keep just one rather
> > than keeping all variants.  This patch implements base migration target
> > allocation function.  In the following patches, variants will be converted
> > to use this function.
> > 
> > Changes should be mechanical but there are some differences. First, Some
> > callers' nodemask is assgined to NULL since NULL nodemask will be
> > considered as all available nodes, that is, &node_states[N_MEMORY].
> > Second, for hugetlb page allocation, gfp_mask is ORed since a user could
> > provide a gfp_mask from now on.
> 
> I think that's wrong. See how htlb_alloc_mask() determines between
> GFP_HIGHUSER_MOVABLE and GFP_HIGHUSER, but then you OR it with __GFP_MOVABLE so
> it's always GFP_HIGHUSER_MOVABLE.

Right you are! Not that it would make any real difference because only
migrateable hugetlb pages will get __GFP_MOVABLE and so we shouldn't
really end up here for !movable pages in the first place (not sure about
soft offlining at this moment). But yeah it would be simply better to
override gfp mask for hugetlb which we have been doing anyway.
Joonsoo Kim July 9, 2020, 7:15 a.m. UTC | #4
2020년 7월 8일 (수) 오전 4:00, Michal Hocko <mhocko@kernel.org>님이 작성:
>
> On Tue 07-07-20 16:49:51, Vlastimil Babka wrote:
> > On 7/7/20 9:44 AM, js1304@gmail.com wrote:
> > > From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> > >
> > > There are some similar functions for migration target allocation.  Since
> > > there is no fundamental difference, it's better to keep just one rather
> > > than keeping all variants.  This patch implements base migration target
> > > allocation function.  In the following patches, variants will be converted
> > > to use this function.
> > >
> > > Changes should be mechanical but there are some differences. First, Some
> > > callers' nodemask is assgined to NULL since NULL nodemask will be
> > > considered as all available nodes, that is, &node_states[N_MEMORY].
> > > Second, for hugetlb page allocation, gfp_mask is ORed since a user could
> > > provide a gfp_mask from now on.
> >
> > I think that's wrong. See how htlb_alloc_mask() determines between
> > GFP_HIGHUSER_MOVABLE and GFP_HIGHUSER, but then you OR it with __GFP_MOVABLE so
> > it's always GFP_HIGHUSER_MOVABLE.

Indeed.

> Right you are! Not that it would make any real difference because only
> migrateable hugetlb pages will get __GFP_MOVABLE and so we shouldn't
> really end up here for !movable pages in the first place (not sure about
> soft offlining at this moment). But yeah it would be simply better to
> override gfp mask for hugetlb which we have been doing anyway.

Override gfp mask doesn't work since some users will call this function with
__GFP_THISNODE.  I will use hugepage_movable_supported() here and
clear __GFP_MOVABLE if needed.

Thanks.

Thanks.
Michal Hocko July 9, 2020, 10:28 a.m. UTC | #5
On Thu 09-07-20 16:15:07, Joonsoo Kim wrote:
> 2020년 7월 8일 (수) 오전 4:00, Michal Hocko <mhocko@kernel.org>님이 작성:
> >
> > On Tue 07-07-20 16:49:51, Vlastimil Babka wrote:
> > > On 7/7/20 9:44 AM, js1304@gmail.com wrote:
> > > > From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> > > >
> > > > There are some similar functions for migration target allocation.  Since
> > > > there is no fundamental difference, it's better to keep just one rather
> > > > than keeping all variants.  This patch implements base migration target
> > > > allocation function.  In the following patches, variants will be converted
> > > > to use this function.
> > > >
> > > > Changes should be mechanical but there are some differences. First, Some
> > > > callers' nodemask is assgined to NULL since NULL nodemask will be
> > > > considered as all available nodes, that is, &node_states[N_MEMORY].
> > > > Second, for hugetlb page allocation, gfp_mask is ORed since a user could
> > > > provide a gfp_mask from now on.
> > >
> > > I think that's wrong. See how htlb_alloc_mask() determines between
> > > GFP_HIGHUSER_MOVABLE and GFP_HIGHUSER, but then you OR it with __GFP_MOVABLE so
> > > it's always GFP_HIGHUSER_MOVABLE.
> 
> Indeed.
> 
> > Right you are! Not that it would make any real difference because only
> > migrateable hugetlb pages will get __GFP_MOVABLE and so we shouldn't
> > really end up here for !movable pages in the first place (not sure about
> > soft offlining at this moment). But yeah it would be simply better to
> > override gfp mask for hugetlb which we have been doing anyway.
> 
> Override gfp mask doesn't work since some users will call this function with
> __GFP_THISNODE.

> I will use hugepage_movable_supported() here and
> clear __GFP_MOVABLE if needed.

hugepage_movable_supported is really an implementation detail, do not
use it here. I think it would be better to add

gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t mask)
{
	gfp_t default_mask = htlb_alloc_mask(h);

	/* Some callers might want to enforce node */
	return default_mask | (mask & __GFP_THISNODE);
}

If we need to special case others, eg reclaim restrictions there would
be a single place to do so.
diff mbox series

Patch

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 1d70b4a..cc56f0d 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -10,6 +10,8 @@ 
 typedef struct page *new_page_t(struct page *page, unsigned long private);
 typedef void free_page_t(struct page *page, unsigned long private);
 
+struct migration_target_control;
+
 /*
  * Return values from addresss_space_operations.migratepage():
  * - negative errno on page migration failure;
@@ -39,8 +41,7 @@  extern int migrate_page(struct address_space *mapping,
 			enum migrate_mode mode);
 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
 		unsigned long private, enum migrate_mode mode, int reason);
-extern struct page *new_page_nodemask(struct page *page,
-		int preferred_nid, nodemask_t *nodemask);
+extern struct page *alloc_migration_target(struct page *page, unsigned long private);
 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
 extern void putback_movable_page(struct page *page);
 
@@ -59,8 +60,8 @@  static inline int migrate_pages(struct list_head *l, new_page_t new,
 		free_page_t free, unsigned long private, enum migrate_mode mode,
 		int reason)
 	{ return -ENOSYS; }
-static inline struct page *new_page_nodemask(struct page *page,
-		int preferred_nid, nodemask_t *nodemask)
+static inline struct page *alloc_migration_target(struct page *page,
+		unsigned long private)
 	{ return NULL; }
 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
 	{ return -EBUSY; }
diff --git a/mm/internal.h b/mm/internal.h
index dd14c53..0beacf3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -614,4 +614,11 @@  static inline bool is_migrate_highatomic_page(struct page *page)
 
 void setup_zone_pageset(struct zone *zone);
 extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
+
+struct migration_target_control {
+	int nid;		/* preferred node id */
+	nodemask_t *nmask;
+	gfp_t gfp_mask;
+};
+
 #endif	/* __MM_INTERNAL_H */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c5e4cee..609d42b6 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1679,9 +1679,12 @@  EXPORT_SYMBOL(unpoison_memory);
 
 static struct page *new_page(struct page *p, unsigned long private)
 {
-	int nid = page_to_nid(p);
+	struct migration_target_control mtc = {
+		.nid = page_to_nid(p),
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
-	return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
+	return alloc_migration_target(p, (unsigned long)&mtc);
 }
 
 /*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index cafe65eb..86bc2ad 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1267,19 +1267,23 @@  static int scan_movable_pages(unsigned long start, unsigned long end,
 
 static struct page *new_node_page(struct page *page, unsigned long private)
 {
-	int nid = page_to_nid(page);
 	nodemask_t nmask = node_states[N_MEMORY];
+	struct migration_target_control mtc = {
+		.nid = page_to_nid(page),
+		.nmask = &nmask,
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
 	/*
 	 * try to allocate from a different node but reuse this node if there
 	 * are no other online nodes to be used (e.g. we are offlining a part
 	 * of the only existing node)
 	 */
-	node_clear(nid, nmask);
-	if (nodes_empty(nmask))
-		node_set(nid, nmask);
+	node_clear(mtc.nid, *mtc.nmask);
+	if (nodes_empty(*mtc.nmask))
+		node_set(mtc.nid, *mtc.nmask);
 
-	return new_page_nodemask(page, nid, &nmask);
+	return alloc_migration_target(page, (unsigned long)&mtc);
 }
 
 static int
diff --git a/mm/migrate.c b/mm/migrate.c
index ecd7615..00cd81c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1531,19 +1531,27 @@  int migrate_pages(struct list_head *from, new_page_t get_new_page,
 	return rc;
 }
 
-struct page *new_page_nodemask(struct page *page,
-				int preferred_nid, nodemask_t *nodemask)
+struct page *alloc_migration_target(struct page *page, unsigned long private)
 {
-	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
+	struct migration_target_control *mtc;
+	gfp_t gfp_mask;
 	unsigned int order = 0;
 	struct page *new_page = NULL;
+	int nid;
+	int zidx;
+
+	mtc = (struct migration_target_control *)private;
+	gfp_mask = mtc->gfp_mask;
+	nid = mtc->nid;
+	if (nid == NUMA_NO_NODE)
+		nid = page_to_nid(page);
 
 	if (PageHuge(page)) {
 		struct hstate *h = page_hstate(compound_head(page));
 
-		gfp_mask = htlb_alloc_mask(h);
-		return alloc_huge_page_nodemask(h, preferred_nid,
-						nodemask, gfp_mask, false);
+		gfp_mask |= htlb_alloc_mask(h);
+		return alloc_huge_page_nodemask(h, nid, mtc->nmask,
+						gfp_mask, false);
 	}
 
 	if (PageTransHuge(page)) {
@@ -1555,12 +1563,11 @@  struct page *new_page_nodemask(struct page *page,
 		gfp_mask |= GFP_TRANSHUGE;
 		order = HPAGE_PMD_ORDER;
 	}
-
-	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
+	zidx = zone_idx(page_zone(page));
+	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
 		gfp_mask |= __GFP_HIGHMEM;
 
-	new_page = __alloc_pages_nodemask(gfp_mask, order,
-				preferred_nid, nodemask);
+	new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
 
 	if (new_page && PageTransHuge(new_page))
 		prep_transhuge_page(new_page);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index aec26d9..f25c66e 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -309,7 +309,10 @@  int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 
 struct page *alloc_migrate_target(struct page *page, unsigned long private)
 {
-	int nid = page_to_nid(page);
+	struct migration_target_control mtc = {
+		.nid = page_to_nid(page),
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
-	return new_page_nodemask(page, nid, &node_states[N_MEMORY]);
+	return alloc_migration_target(page, (unsigned long)&mtc);
 }