diff mbox series

[v3,5/8] mm/migrate: make a standard migration target allocation function

Message ID 1592892828-1934-6-git-send-email-iamjoonsoo.kim@lge.com (mailing list archive)
State New, archived
Headers show
Series clean-up the migration target allocation functions | expand

Commit Message

Joonsoo Kim June 23, 2020, 6:13 a.m. UTC
From: Joonsoo Kim <iamjoonsoo.kim@lge.com>

There are some similar functions for migration target allocation. Since
there is no fundamental difference, it's better to keep just one rather
than keeping all variants. This patch implements base migration target
allocation function. In the following patches, variants will be converted
to use this function.

Note that PageHighmem() call in previous function is changed to open-code
"is_highmem_idx()" since it provides more readability.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 include/linux/migrate.h |  5 +++--
 mm/internal.h           |  7 +++++++
 mm/memory-failure.c     |  8 ++++++--
 mm/memory_hotplug.c     | 14 +++++++++-----
 mm/migrate.c            | 21 +++++++++++++--------
 mm/page_isolation.c     |  8 ++++++--
 6 files changed, 44 insertions(+), 19 deletions(-)

Comments

Michal Hocko June 25, 2020, 12:05 p.m. UTC | #1
On Tue 23-06-20 15:13:45, Joonsoo Kim wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> There are some similar functions for migration target allocation. Since
> there is no fundamental difference, it's better to keep just one rather
> than keeping all variants. This patch implements base migration target
> allocation function. In the following patches, variants will be converted
> to use this function.
> 
> Note that PageHighmem() call in previous function is changed to open-code
> "is_highmem_idx()" since it provides more readability.

I was little bit surprised that alloc_migration_target still uses
private argument while it only accepts migration_target_control
structure pointer but then I have noticed that you are using it as a
real callback in a later patch.

> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

Few questions inline
[...]

> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 47b8ccb..820ea5e 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1648,9 +1648,13 @@ EXPORT_SYMBOL(unpoison_memory);
>  
>  static struct page *new_page(struct page *p, unsigned long private)
>  {
> -	int nid = page_to_nid(p);
> +	struct migration_target_control mtc = {
> +		.nid = page_to_nid(p),
> +		.nmask = &node_states[N_MEMORY],

This could be .namsk = NULL, right? alloc_migration_target doesn't
modify the node mask and NULL nodemask is always interpreted as all
available nodes.

> +		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> +	};
>  
> -	return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
> +	return alloc_migration_target(p, (unsigned long)&mtc);
>  }
>  
[...]
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 634f1ea..3afff59 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1536,29 +1536,34 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
>  	return rc;
>  }
>  
> -struct page *new_page_nodemask(struct page *page,
> -				int preferred_nid, nodemask_t *nodemask)
> +struct page *alloc_migration_target(struct page *page, unsigned long private)
>  {
> -	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
> +	struct migration_target_control *mtc;
> +	gfp_t gfp_mask;
>  	unsigned int order = 0;
>  	struct page *new_page = NULL;
> +	int zidx;
> +
> +	mtc = (struct migration_target_control *)private;
> +	gfp_mask = mtc->gfp_mask;
>  
>  	if (PageHuge(page)) {
>  		return alloc_huge_page_nodemask(
> -				page_hstate(compound_head(page)),
> -				preferred_nid, nodemask, 0, false);
> +				page_hstate(compound_head(page)), mtc->nid,
> +				mtc->nmask, gfp_mask, false);
>  	}
>  
>  	if (PageTransHuge(page)) {
> +		gfp_mask &= ~__GFP_RECLAIM;

What's up with this gfp_mask modification?

>  		gfp_mask |= GFP_TRANSHUGE;
>  		order = HPAGE_PMD_ORDER;
>  	}
> -
> -	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
> +	zidx = zone_idx(page_zone(page));
> +	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
>  		gfp_mask |= __GFP_HIGHMEM;
>  
>  	new_page = __alloc_pages_nodemask(gfp_mask, order,
> -				preferred_nid, nodemask);
> +				mtc->nid, mtc->nmask);
>  
>  	if (new_page && PageTransHuge(new_page))
>  		prep_transhuge_page(new_page);
> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> index aec26d9..adba031 100644
> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -309,7 +309,11 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
>  
>  struct page *alloc_migrate_target(struct page *page, unsigned long private)
>  {
> -	int nid = page_to_nid(page);
> +	struct migration_target_control mtc = {
> +		.nid = page_to_nid(page),
> +		.nmask = &node_states[N_MEMORY],

nmask = NULL again

> +		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> +	};
>  
> -	return new_page_nodemask(page, nid, &node_states[N_MEMORY]);
> +	return alloc_migration_target(page, (unsigned long)&mtc);
>  }
> -- 
> 2.7.4
Joonsoo Kim June 26, 2020, 5:02 a.m. UTC | #2
2020년 6월 25일 (목) 오후 9:05, Michal Hocko <mhocko@kernel.org>님이 작성:
>
> On Tue 23-06-20 15:13:45, Joonsoo Kim wrote:
> > From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> >
> > There are some similar functions for migration target allocation. Since
> > there is no fundamental difference, it's better to keep just one rather
> > than keeping all variants. This patch implements base migration target
> > allocation function. In the following patches, variants will be converted
> > to use this function.
> >
> > Note that PageHighmem() call in previous function is changed to open-code
> > "is_highmem_idx()" since it provides more readability.
>
> I was little bit surprised that alloc_migration_target still uses
> private argument while it only accepts migration_target_control
> structure pointer but then I have noticed that you are using it as a
> real callback in a later patch.
>
> > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
>
> Few questions inline
> [...]
>
> > diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> > index 47b8ccb..820ea5e 100644
> > --- a/mm/memory-failure.c
> > +++ b/mm/memory-failure.c
> > @@ -1648,9 +1648,13 @@ EXPORT_SYMBOL(unpoison_memory);
> >
> >  static struct page *new_page(struct page *p, unsigned long private)
> >  {
> > -     int nid = page_to_nid(p);
> > +     struct migration_target_control mtc = {
> > +             .nid = page_to_nid(p),
> > +             .nmask = &node_states[N_MEMORY],
>
> This could be .namsk = NULL, right? alloc_migration_target doesn't
> modify the node mask and NULL nodemask is always interpreted as all
> available nodes.

Will do.

> > +             .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
> > +     };
> >
> > -     return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
> > +     return alloc_migration_target(p, (unsigned long)&mtc);
> >  }
> >
> [...]
> > diff --git a/mm/migrate.c b/mm/migrate.c
> > index 634f1ea..3afff59 100644
> > --- a/mm/migrate.c
> > +++ b/mm/migrate.c
> > @@ -1536,29 +1536,34 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
> >       return rc;
> >  }
> >
> > -struct page *new_page_nodemask(struct page *page,
> > -                             int preferred_nid, nodemask_t *nodemask)
> > +struct page *alloc_migration_target(struct page *page, unsigned long private)
> >  {
> > -     gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
> > +     struct migration_target_control *mtc;
> > +     gfp_t gfp_mask;
> >       unsigned int order = 0;
> >       struct page *new_page = NULL;
> > +     int zidx;
> > +
> > +     mtc = (struct migration_target_control *)private;
> > +     gfp_mask = mtc->gfp_mask;
> >
> >       if (PageHuge(page)) {
> >               return alloc_huge_page_nodemask(
> > -                             page_hstate(compound_head(page)),
> > -                             preferred_nid, nodemask, 0, false);
> > +                             page_hstate(compound_head(page)), mtc->nid,
> > +                             mtc->nmask, gfp_mask, false);
> >       }
> >
> >       if (PageTransHuge(page)) {
> > +             gfp_mask &= ~__GFP_RECLAIM;
>
> What's up with this gfp_mask modification?

THP page allocation uses a standard gfp masks, GFP_TRANSHUGE_LIGHT and
GFP_TRANHUGE. __GFP_RECLAIM flags is a big part of this standard mask design.
So, I clear it here so as not to disrupt the THP gfp mask.

> >               gfp_mask |= GFP_TRANSHUGE;
> >               order = HPAGE_PMD_ORDER;
> >       }
> > -
> > -     if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
> > +     zidx = zone_idx(page_zone(page));
> > +     if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
> >               gfp_mask |= __GFP_HIGHMEM;
> >
> >       new_page = __alloc_pages_nodemask(gfp_mask, order,
> > -                             preferred_nid, nodemask);
> > +                             mtc->nid, mtc->nmask);
> >
> >       if (new_page && PageTransHuge(new_page))
> >               prep_transhuge_page(new_page);
> > diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> > index aec26d9..adba031 100644
> > --- a/mm/page_isolation.c
> > +++ b/mm/page_isolation.c
> > @@ -309,7 +309,11 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
> >
> >  struct page *alloc_migrate_target(struct page *page, unsigned long private)
> >  {
> > -     int nid = page_to_nid(page);
> > +     struct migration_target_control mtc = {
> > +             .nid = page_to_nid(page),
> > +             .nmask = &node_states[N_MEMORY],
>
> nmask = NULL again

Okay.

Thanks.
Michal Hocko June 26, 2020, 7:33 a.m. UTC | #3
On Fri 26-06-20 14:02:49, Joonsoo Kim wrote:
> 2020년 6월 25일 (목) 오후 9:05, Michal Hocko <mhocko@kernel.org>님이 작성:
> >
> > On Tue 23-06-20 15:13:45, Joonsoo Kim wrote:
[...]
> > > -struct page *new_page_nodemask(struct page *page,
> > > -                             int preferred_nid, nodemask_t *nodemask)
> > > +struct page *alloc_migration_target(struct page *page, unsigned long private)
> > >  {
> > > -     gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
> > > +     struct migration_target_control *mtc;
> > > +     gfp_t gfp_mask;
> > >       unsigned int order = 0;
> > >       struct page *new_page = NULL;
> > > +     int zidx;
> > > +
> > > +     mtc = (struct migration_target_control *)private;
> > > +     gfp_mask = mtc->gfp_mask;
> > >
> > >       if (PageHuge(page)) {
> > >               return alloc_huge_page_nodemask(
> > > -                             page_hstate(compound_head(page)),
> > > -                             preferred_nid, nodemask, 0, false);
> > > +                             page_hstate(compound_head(page)), mtc->nid,
> > > +                             mtc->nmask, gfp_mask, false);
> > >       }
> > >
> > >       if (PageTransHuge(page)) {
> > > +             gfp_mask &= ~__GFP_RECLAIM;
> >
> > What's up with this gfp_mask modification?
> 
> THP page allocation uses a standard gfp masks, GFP_TRANSHUGE_LIGHT and
> GFP_TRANHUGE. __GFP_RECLAIM flags is a big part of this standard mask design.
> So, I clear it here so as not to disrupt the THP gfp mask.

Why this wasn't really needed before? I guess I must be missing
something here. This patch should be mostly mechanical convergence of
existing migration callbacks but this change adds a new behavior AFAICS.
It would effectively drop __GFP_RETRY_MAYFAIL and __GFP_KSWAPD_RECLAIM
from the mask so the allocation would "lighter". If that is your
intention then this should be a separate patch with an explanation
rather than hiding it into this patch.
Joonsoo Kim June 29, 2020, 6:41 a.m. UTC | #4
2020년 6월 26일 (금) 오후 4:33, Michal Hocko <mhocko@kernel.org>님이 작성:
>
> On Fri 26-06-20 14:02:49, Joonsoo Kim wrote:
> > 2020년 6월 25일 (목) 오후 9:05, Michal Hocko <mhocko@kernel.org>님이 작성:
> > >
> > > On Tue 23-06-20 15:13:45, Joonsoo Kim wrote:
> [...]
> > > > -struct page *new_page_nodemask(struct page *page,
> > > > -                             int preferred_nid, nodemask_t *nodemask)
> > > > +struct page *alloc_migration_target(struct page *page, unsigned long private)
> > > >  {
> > > > -     gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
> > > > +     struct migration_target_control *mtc;
> > > > +     gfp_t gfp_mask;
> > > >       unsigned int order = 0;
> > > >       struct page *new_page = NULL;
> > > > +     int zidx;
> > > > +
> > > > +     mtc = (struct migration_target_control *)private;
> > > > +     gfp_mask = mtc->gfp_mask;
> > > >
> > > >       if (PageHuge(page)) {
> > > >               return alloc_huge_page_nodemask(
> > > > -                             page_hstate(compound_head(page)),
> > > > -                             preferred_nid, nodemask, 0, false);
> > > > +                             page_hstate(compound_head(page)), mtc->nid,
> > > > +                             mtc->nmask, gfp_mask, false);
> > > >       }
> > > >
> > > >       if (PageTransHuge(page)) {
> > > > +             gfp_mask &= ~__GFP_RECLAIM;
> > >
> > > What's up with this gfp_mask modification?
> >
> > THP page allocation uses a standard gfp masks, GFP_TRANSHUGE_LIGHT and
> > GFP_TRANHUGE. __GFP_RECLAIM flags is a big part of this standard mask design.
> > So, I clear it here so as not to disrupt the THP gfp mask.
>
> Why this wasn't really needed before? I guess I must be missing
> something here. This patch should be mostly mechanical convergence of
> existing migration callbacks but this change adds a new behavior AFAICS.

Before this patch, a user cannot specify a gfp_mask and THP allocation
uses GFP_TRANSHUGE
statically. After this patch, a user can specify a gfp_mask and it
could conflict with GFP_TRANSHUGE.
This code tries to avoid this conflict.

> It would effectively drop __GFP_RETRY_MAYFAIL and __GFP_KSWAPD_RECLAIM

__GFP_RETRY_MAYFAIL isn't dropped. __GFP_RECLAIM is
"___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM".
So, __GFP_KSWAPD_RECLAIM would be dropped for THP allocation.
IIUC, THP allocation doesn't use __GFP_KSWAPD_RECLAIM since it's overhead is
too large and this overhead should be given to the caller rather than
system thread (kswapd)
and so on.

Thanks.
Michal Hocko June 29, 2020, 8:03 a.m. UTC | #5
On Mon 29-06-20 15:41:37, Joonsoo Kim wrote:
> 2020년 6월 26일 (금) 오후 4:33, Michal Hocko <mhocko@kernel.org>님이 작성:
> >
> > On Fri 26-06-20 14:02:49, Joonsoo Kim wrote:
> > > 2020년 6월 25일 (목) 오후 9:05, Michal Hocko <mhocko@kernel.org>님이 작성:
> > > >
> > > > On Tue 23-06-20 15:13:45, Joonsoo Kim wrote:
> > [...]
> > > > > -struct page *new_page_nodemask(struct page *page,
> > > > > -                             int preferred_nid, nodemask_t *nodemask)
> > > > > +struct page *alloc_migration_target(struct page *page, unsigned long private)
> > > > >  {
> > > > > -     gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
> > > > > +     struct migration_target_control *mtc;
> > > > > +     gfp_t gfp_mask;
> > > > >       unsigned int order = 0;
> > > > >       struct page *new_page = NULL;
> > > > > +     int zidx;
> > > > > +
> > > > > +     mtc = (struct migration_target_control *)private;
> > > > > +     gfp_mask = mtc->gfp_mask;
> > > > >
> > > > >       if (PageHuge(page)) {
> > > > >               return alloc_huge_page_nodemask(
> > > > > -                             page_hstate(compound_head(page)),
> > > > > -                             preferred_nid, nodemask, 0, false);
> > > > > +                             page_hstate(compound_head(page)), mtc->nid,
> > > > > +                             mtc->nmask, gfp_mask, false);
> > > > >       }
> > > > >
> > > > >       if (PageTransHuge(page)) {
> > > > > +             gfp_mask &= ~__GFP_RECLAIM;
> > > >
> > > > What's up with this gfp_mask modification?
> > >
> > > THP page allocation uses a standard gfp masks, GFP_TRANSHUGE_LIGHT and
> > > GFP_TRANHUGE. __GFP_RECLAIM flags is a big part of this standard mask design.
> > > So, I clear it here so as not to disrupt the THP gfp mask.
> >
> > Why this wasn't really needed before? I guess I must be missing
> > something here. This patch should be mostly mechanical convergence of
> > existing migration callbacks but this change adds a new behavior AFAICS.
> 
> Before this patch, a user cannot specify a gfp_mask and THP allocation
> uses GFP_TRANSHUGE
> statically.

Unless I am misreading there are code paths (e.g.new_page_nodemask) which simply use
add GFP_TRANSHUGE to GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL. And
this goes all the way to thp migration introduction.

> After this patch, a user can specify a gfp_mask and it
> could conflict with GFP_TRANSHUGE.
> This code tries to avoid this conflict.
> 
> > It would effectively drop __GFP_RETRY_MAYFAIL and __GFP_KSWAPD_RECLAIM
> 
> __GFP_RETRY_MAYFAIL isn't dropped. __GFP_RECLAIM is
> "___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM".
> So, __GFP_KSWAPD_RECLAIM would be dropped for THP allocation.
> IIUC, THP allocation doesn't use __GFP_KSWAPD_RECLAIM since it's
> overhead is too large and this overhead should be given to the caller
> rather than system thread (kswapd) and so on.

Yes, there is a reason why KSWAPD is excluded from THP allocations in
the page fault path. Maybe we want to extend that behavior to the
migration as well. I do not have a strong opinion on that because I
haven't seen excessive kswapd reclaim due to THP migrations. They are
likely too rare.

But as I've said in my previous email. Make this a separate patch with
an explanation why we want this.
Joonsoo Kim June 30, 2020, 7:19 a.m. UTC | #6
2020년 6월 29일 (월) 오후 5:03, Michal Hocko <mhocko@kernel.org>님이 작성:
>
> On Mon 29-06-20 15:41:37, Joonsoo Kim wrote:
> > 2020년 6월 26일 (금) 오후 4:33, Michal Hocko <mhocko@kernel.org>님이 작성:
> > >
> > > On Fri 26-06-20 14:02:49, Joonsoo Kim wrote:
> > > > 2020년 6월 25일 (목) 오후 9:05, Michal Hocko <mhocko@kernel.org>님이 작성:
> > > > >
> > > > > On Tue 23-06-20 15:13:45, Joonsoo Kim wrote:
> > > [...]
> > > > > > -struct page *new_page_nodemask(struct page *page,
> > > > > > -                             int preferred_nid, nodemask_t *nodemask)
> > > > > > +struct page *alloc_migration_target(struct page *page, unsigned long private)
> > > > > >  {
> > > > > > -     gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
> > > > > > +     struct migration_target_control *mtc;
> > > > > > +     gfp_t gfp_mask;
> > > > > >       unsigned int order = 0;
> > > > > >       struct page *new_page = NULL;
> > > > > > +     int zidx;
> > > > > > +
> > > > > > +     mtc = (struct migration_target_control *)private;
> > > > > > +     gfp_mask = mtc->gfp_mask;
> > > > > >
> > > > > >       if (PageHuge(page)) {
> > > > > >               return alloc_huge_page_nodemask(
> > > > > > -                             page_hstate(compound_head(page)),
> > > > > > -                             preferred_nid, nodemask, 0, false);
> > > > > > +                             page_hstate(compound_head(page)), mtc->nid,
> > > > > > +                             mtc->nmask, gfp_mask, false);
> > > > > >       }
> > > > > >
> > > > > >       if (PageTransHuge(page)) {
> > > > > > +             gfp_mask &= ~__GFP_RECLAIM;
> > > > >
> > > > > What's up with this gfp_mask modification?
> > > >
> > > > THP page allocation uses a standard gfp masks, GFP_TRANSHUGE_LIGHT and
> > > > GFP_TRANHUGE. __GFP_RECLAIM flags is a big part of this standard mask design.
> > > > So, I clear it here so as not to disrupt the THP gfp mask.
> > >
> > > Why this wasn't really needed before? I guess I must be missing
> > > something here. This patch should be mostly mechanical convergence of
> > > existing migration callbacks but this change adds a new behavior AFAICS.
> >
> > Before this patch, a user cannot specify a gfp_mask and THP allocation
> > uses GFP_TRANSHUGE
> > statically.
>
> Unless I am misreading there are code paths (e.g.new_page_nodemask) which simply use
> add GFP_TRANSHUGE to GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL. And
> this goes all the way to thp migration introduction.

Ahh... Indeed. I missed that. There are multiple THP migration target
allocation functions
and some functions use GFP_TRANSHUGE + extra_mask so doesn't include
__GFP_KSWAPD_RECLAIM
but the others includes __GFP_KSWAPD_RECLAIM due to original GFP_USER.
Thanks for clarifying.

> > After this patch, a user can specify a gfp_mask and it
> > could conflict with GFP_TRANSHUGE.
> > This code tries to avoid this conflict.
> >
> > > It would effectively drop __GFP_RETRY_MAYFAIL and __GFP_KSWAPD_RECLAIM
> >
> > __GFP_RETRY_MAYFAIL isn't dropped. __GFP_RECLAIM is
> > "___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM".
> > So, __GFP_KSWAPD_RECLAIM would be dropped for THP allocation.
> > IIUC, THP allocation doesn't use __GFP_KSWAPD_RECLAIM since it's
> > overhead is too large and this overhead should be given to the caller
> > rather than system thread (kswapd) and so on.
>
> Yes, there is a reason why KSWAPD is excluded from THP allocations in
> the page fault path. Maybe we want to extend that behavior to the
> migration as well. I do not have a strong opinion on that because I
> haven't seen excessive kswapd reclaim due to THP migrations. They are
> likely too rare.
>
> But as I've said in my previous email. Make this a separate patch with
> an explanation why we want this.

Okay. I will make a separate patch that clears __GFP_RECLAIM for passed
gfp_mask to extend the behavior. It will make THP migration target allocation
consistent. :)

Thanks.
Vlastimil Babka July 3, 2020, 3:25 p.m. UTC | #7
On 6/23/20 8:13 AM, js1304@gmail.com wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> There are some similar functions for migration target allocation. Since
> there is no fundamental difference, it's better to keep just one rather
> than keeping all variants. This patch implements base migration target
> allocation function. In the following patches, variants will be converted
> to use this function.
> 
> Note that PageHighmem() call in previous function is changed to open-code
> "is_highmem_idx()" since it provides more readability.
> 
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

Provided that the "&= ~__GFP_RECLAIM" line is separated patch as you discussed,

Acked-by: Vlastimil Babka <vbabka@suse.cz>
diff mbox series

Patch

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 1d70b4a..5e9c866 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -10,6 +10,8 @@ 
 typedef struct page *new_page_t(struct page *page, unsigned long private);
 typedef void free_page_t(struct page *page, unsigned long private);
 
+struct migration_target_control;
+
 /*
  * Return values from addresss_space_operations.migratepage():
  * - negative errno on page migration failure;
@@ -39,8 +41,7 @@  extern int migrate_page(struct address_space *mapping,
 			enum migrate_mode mode);
 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
 		unsigned long private, enum migrate_mode mode, int reason);
-extern struct page *new_page_nodemask(struct page *page,
-		int preferred_nid, nodemask_t *nodemask);
+extern struct page *alloc_migration_target(struct page *page, unsigned long private);
 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
 extern void putback_movable_page(struct page *page);
 
diff --git a/mm/internal.h b/mm/internal.h
index 42cf0b6..f725aa8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -614,4 +614,11 @@  static inline bool is_migrate_highatomic_page(struct page *page)
 
 void setup_zone_pageset(struct zone *zone);
 extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
+
+struct migration_target_control {
+	int nid;		/* preferred node id */
+	nodemask_t *nmask;
+	gfp_t gfp_mask;
+};
+
 #endif	/* __MM_INTERNAL_H */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 47b8ccb..820ea5e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1648,9 +1648,13 @@  EXPORT_SYMBOL(unpoison_memory);
 
 static struct page *new_page(struct page *p, unsigned long private)
 {
-	int nid = page_to_nid(p);
+	struct migration_target_control mtc = {
+		.nid = page_to_nid(p),
+		.nmask = &node_states[N_MEMORY],
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
-	return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
+	return alloc_migration_target(p, (unsigned long)&mtc);
 }
 
 /*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index be3c62e3..d2b65a5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1259,19 +1259,23 @@  static int scan_movable_pages(unsigned long start, unsigned long end,
 
 static struct page *new_node_page(struct page *page, unsigned long private)
 {
-	int nid = page_to_nid(page);
 	nodemask_t nmask = node_states[N_MEMORY];
+	struct migration_target_control mtc = {
+		.nid = page_to_nid(page),
+		.nmask = &nmask,
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
 	/*
 	 * try to allocate from a different node but reuse this node if there
 	 * are no other online nodes to be used (e.g. we are offlining a part
 	 * of the only existing node)
 	 */
-	node_clear(nid, nmask);
-	if (nodes_empty(nmask))
-		node_set(nid, nmask);
+	node_clear(mtc.nid, *mtc.nmask);
+	if (nodes_empty(*mtc.nmask))
+		node_set(mtc.nid, *mtc.nmask);
 
-	return new_page_nodemask(page, nid, &nmask);
+	return alloc_migration_target(page, (unsigned long)&mtc);
 }
 
 static int
diff --git a/mm/migrate.c b/mm/migrate.c
index 634f1ea..3afff59 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1536,29 +1536,34 @@  int migrate_pages(struct list_head *from, new_page_t get_new_page,
 	return rc;
 }
 
-struct page *new_page_nodemask(struct page *page,
-				int preferred_nid, nodemask_t *nodemask)
+struct page *alloc_migration_target(struct page *page, unsigned long private)
 {
-	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
+	struct migration_target_control *mtc;
+	gfp_t gfp_mask;
 	unsigned int order = 0;
 	struct page *new_page = NULL;
+	int zidx;
+
+	mtc = (struct migration_target_control *)private;
+	gfp_mask = mtc->gfp_mask;
 
 	if (PageHuge(page)) {
 		return alloc_huge_page_nodemask(
-				page_hstate(compound_head(page)),
-				preferred_nid, nodemask, 0, false);
+				page_hstate(compound_head(page)), mtc->nid,
+				mtc->nmask, gfp_mask, false);
 	}
 
 	if (PageTransHuge(page)) {
+		gfp_mask &= ~__GFP_RECLAIM;
 		gfp_mask |= GFP_TRANSHUGE;
 		order = HPAGE_PMD_ORDER;
 	}
-
-	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
+	zidx = zone_idx(page_zone(page));
+	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
 		gfp_mask |= __GFP_HIGHMEM;
 
 	new_page = __alloc_pages_nodemask(gfp_mask, order,
-				preferred_nid, nodemask);
+				mtc->nid, mtc->nmask);
 
 	if (new_page && PageTransHuge(new_page))
 		prep_transhuge_page(new_page);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index aec26d9..adba031 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -309,7 +309,11 @@  int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 
 struct page *alloc_migrate_target(struct page *page, unsigned long private)
 {
-	int nid = page_to_nid(page);
+	struct migration_target_control mtc = {
+		.nid = page_to_nid(page),
+		.nmask = &node_states[N_MEMORY],
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+	};
 
-	return new_page_nodemask(page, nid, &node_states[N_MEMORY]);
+	return alloc_migration_target(page, (unsigned long)&mtc);
 }