diff mbox series

[1/6] mm: migrate: add isolate_movable_folio()

Message ID 20240327141034.3712697-2-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: remove isolate_lru_page() and isolate_movable_page() | expand

Commit Message

Kefeng Wang March 27, 2024, 2:10 p.m. UTC
Like isolate_lru_page(), make isolate_movable_page() as a wrapper
around isolate_lru_folio(), since isolate_movable_page() always
fails on a tail page, add a warn for tail page and return immediately.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 include/linux/migrate.h |  3 +++
 mm/migrate.c            | 41 +++++++++++++++++++++++------------------
 2 files changed, 26 insertions(+), 18 deletions(-)

Comments

Zi Yan March 27, 2024, 2:29 p.m. UTC | #1
On 27 Mar 2024, at 10:10, Kefeng Wang wrote:

> Like isolate_lru_page(), make isolate_movable_page() as a wrapper
> around isolate_lru_folio(), since isolate_movable_page() always
> fails on a tail page, add a warn for tail page and return immediately.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  include/linux/migrate.h |  3 +++
>  mm/migrate.c            | 41 +++++++++++++++++++++++------------------
>  2 files changed, 26 insertions(+), 18 deletions(-)
>
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index f9d92482d117..a6c38ee7246a 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
>  		  unsigned int *ret_succeeded);
>  struct folio *alloc_migration_target(struct folio *src, unsigned long private);
>  bool isolate_movable_page(struct page *page, isolate_mode_t mode);
> +bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode);
>
>  int migrate_huge_page_move_mapping(struct address_space *mapping,
>  		struct folio *dst, struct folio *src);
> @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
>  	{ return NULL; }
>  static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  	{ return false; }
> +static inline bool isolate_movable_folio(struct page *page, isolate_mode_t mode)
> +	{ return false; }
>
>  static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
>  				  struct folio *dst, struct folio *src)
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 2228ca681afb..b2195b6ff32c 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -57,31 +57,29 @@
>
>  #include "internal.h"
>
> -bool isolate_movable_page(struct page *page, isolate_mode_t mode)
> +bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode)
>  {
> -	struct folio *folio = folio_get_nontail_page(page);
>  	const struct movable_operations *mops;
>
>  	/*
> -	 * Avoid burning cycles with pages that are yet under __free_pages(),
> +	 * Avoid burning cycles with folios that are yet under __free_pages(),
>  	 * or just got freed under us.
>  	 *
> -	 * In case we 'win' a race for a movable page being freed under us and
> +	 * In case we 'win' a race for a movable folio being freed under us and
>  	 * raise its refcount preventing __free_pages() from doing its job
> -	 * the put_page() at the end of this block will take care of
> -	 * release this page, thus avoiding a nasty leakage.
> +	 * the folio_put() at the end of this block will take care of
> +	 * release this folio, thus avoiding a nasty leakage.
>  	 */
> -	if (!folio)
> -		goto out;
> +	folio_get(folio);

You need folio_try_get() instead. Since folio_get_nontail_page() calls
get_page_unless_zero() first.

>
>  	if (unlikely(folio_test_slab(folio)))
>  		goto out_putfolio;
>  	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
>  	smp_rmb();
>  	/*
> -	 * Check movable flag before taking the page lock because
> -	 * we use non-atomic bitops on newly allocated page flags so
> -	 * unconditionally grabbing the lock ruins page's owner side.
> +	 * Check movable flag before taking the folio lock because
> +	 * we use non-atomic bitops on newly allocated folio flags so
> +	 * unconditionally grabbing the lock ruins folio's owner side.
>  	 */
>  	if (unlikely(!__folio_test_movable(folio)))
>  		goto out_putfolio;
> @@ -91,13 +89,13 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  		goto out_putfolio;
>
>  	/*
> -	 * As movable pages are not isolated from LRU lists, concurrent
> -	 * compaction threads can race against page migration functions
> -	 * as well as race against the releasing a page.
> +	 * As movable folios are not isolated from LRU lists, concurrent
> +	 * compaction threads can race against folio migration functions
> +	 * as well as race against the releasing a folio.
>  	 *
> -	 * In order to avoid having an already isolated movable page
> +	 * In order to avoid having an already isolated movable folio
>  	 * being (wrongly) re-isolated while it is under migration,
> -	 * or to avoid attempting to isolate pages being released,
> +	 * or to avoid attempting to isolate folios being released,
>  	 * lets be sure we have the page lock
>  	 * before proceeding with the movable page isolation steps.
>  	 */
> @@ -113,7 +111,7 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  	if (!mops->isolate_page(&folio->page, mode))
>  		goto out_no_isolated;
>
> -	/* Driver shouldn't use PG_isolated bit of page->flags */
> +	/* Driver shouldn't use PG_isolated bit of folio->flags */
>  	WARN_ON_ONCE(folio_test_isolated(folio));
>  	folio_set_isolated(folio);
>  	folio_unlock(folio);
> @@ -124,10 +122,17 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  	folio_unlock(folio);
>  out_putfolio:
>  	folio_put(folio);
> -out:
>  	return false;
>  }
>
> +bool isolate_movable_page(struct page *page, isolate_mode_t mode)
> +{
> +	if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
> +		return false;

Why bother adding a warning here? There was no warning before. Also,
after this series, isolate_movable_page() will be gone.

> +
> +	return isolate_movable_folio((struct folio *)page, mode);
> +}
> +
>  static void putback_movable_folio(struct folio *folio)
>  {
>  	const struct movable_operations *mops = folio_movable_ops(folio);
> -- 
> 2.27.0


--
Best Regards,
Yan, Zi
Kefeng Wang March 27, 2024, 2:36 p.m. UTC | #2
On 2024/3/27 22:29, Zi Yan wrote:
> On 27 Mar 2024, at 10:10, Kefeng Wang wrote:
> 
>> Like isolate_lru_page(), make isolate_movable_page() as a wrapper
>> around isolate_lru_folio(), since isolate_movable_page() always
>> fails on a tail page, add a warn for tail page and return immediately.
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>> ---
>>   include/linux/migrate.h |  3 +++
>>   mm/migrate.c            | 41 +++++++++++++++++++++++------------------
>>   2 files changed, 26 insertions(+), 18 deletions(-)
>>
>> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
>> index f9d92482d117..a6c38ee7246a 100644
>> --- a/include/linux/migrate.h
>> +++ b/include/linux/migrate.h
>> @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
>>   		  unsigned int *ret_succeeded);
>>   struct folio *alloc_migration_target(struct folio *src, unsigned long private);
>>   bool isolate_movable_page(struct page *page, isolate_mode_t mode);
>> +bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode);
>>
>>   int migrate_huge_page_move_mapping(struct address_space *mapping,
>>   		struct folio *dst, struct folio *src);
>> @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
>>   	{ return NULL; }
>>   static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>>   	{ return false; }
>> +static inline bool isolate_movable_folio(struct page *page, isolate_mode_t mode)
>> +	{ return false; }
>>
>>   static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
>>   				  struct folio *dst, struct folio *src)
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index 2228ca681afb..b2195b6ff32c 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -57,31 +57,29 @@
>>
>>   #include "internal.h"
>>
>> -bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>> +bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode)
>>   {
>> -	struct folio *folio = folio_get_nontail_page(page);
>>   	const struct movable_operations *mops;
>>
>>   	/*
>> -	 * Avoid burning cycles with pages that are yet under __free_pages(),
>> +	 * Avoid burning cycles with folios that are yet under __free_pages(),
>>   	 * or just got freed under us.
>>   	 *
>> -	 * In case we 'win' a race for a movable page being freed under us and
>> +	 * In case we 'win' a race for a movable folio being freed under us and
>>   	 * raise its refcount preventing __free_pages() from doing its job
>> -	 * the put_page() at the end of this block will take care of
>> -	 * release this page, thus avoiding a nasty leakage.
>> +	 * the folio_put() at the end of this block will take care of
>> +	 * release this folio, thus avoiding a nasty leakage.
>>   	 */
>> -	if (!folio)
>> -		goto out;
>> +	folio_get(folio);
> 
> You need folio_try_get() instead. Since folio_get_nontail_page() calls
> get_page_unless_zero() first.
Oh, indeed, will fix.
> 
>>
>>   	if (unlikely(folio_test_slab(folio)))
>>   		goto out_putfolio;
>>   	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
>>   	smp_rmb();
>>   	/*
>> -	 * Check movable flag before taking the page lock because
>> -	 * we use non-atomic bitops on newly allocated page flags so
>> -	 * unconditionally grabbing the lock ruins page's owner side.
>> +	 * Check movable flag before taking the folio lock because
>> +	 * we use non-atomic bitops on newly allocated folio flags so
>> +	 * unconditionally grabbing the lock ruins folio's owner side.
>>   	 */
>>   	if (unlikely(!__folio_test_movable(folio)))
>>   		goto out_putfolio;
>> @@ -91,13 +89,13 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>>   		goto out_putfolio;
>>
>>   	/*
>> -	 * As movable pages are not isolated from LRU lists, concurrent
>> -	 * compaction threads can race against page migration functions
>> -	 * as well as race against the releasing a page.
>> +	 * As movable folios are not isolated from LRU lists, concurrent
>> +	 * compaction threads can race against folio migration functions
>> +	 * as well as race against the releasing a folio.
>>   	 *
>> -	 * In order to avoid having an already isolated movable page
>> +	 * In order to avoid having an already isolated movable folio
>>   	 * being (wrongly) re-isolated while it is under migration,
>> -	 * or to avoid attempting to isolate pages being released,
>> +	 * or to avoid attempting to isolate folios being released,
>>   	 * lets be sure we have the page lock
>>   	 * before proceeding with the movable page isolation steps.
>>   	 */
>> @@ -113,7 +111,7 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>>   	if (!mops->isolate_page(&folio->page, mode))
>>   		goto out_no_isolated;
>>
>> -	/* Driver shouldn't use PG_isolated bit of page->flags */
>> +	/* Driver shouldn't use PG_isolated bit of folio->flags */
>>   	WARN_ON_ONCE(folio_test_isolated(folio));
>>   	folio_set_isolated(folio);
>>   	folio_unlock(folio);
>> @@ -124,10 +122,17 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>>   	folio_unlock(folio);
>>   out_putfolio:
>>   	folio_put(folio);
>> -out:
>>   	return false;
>>   }
>>
>> +bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>> +{
>> +	if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
>> +		return false;
> 
> Why bother adding a warning here? There was no warning before. Also,
> after this series, isolate_movable_page() will be gone.

I copy from isolate_lru_page(), but as you said, it seems useless, will 
remove it.

Thanks.
> 
>> +
>> +	return isolate_movable_folio((struct folio *)page, mode);
>> +}
>> +
>>   static void putback_movable_folio(struct folio *folio)
>>   {
>>   	const struct movable_operations *mops = folio_movable_ops(folio);
>> -- 
>> 2.27.0
> 
> 
> --
> Best Regards,
> Yan, Zi
Vishal Moola March 27, 2024, 6:59 p.m. UTC | #3
On Wed, Mar 27, 2024 at 10:10:29PM +0800, Kefeng Wang wrote:
> Like isolate_lru_page(), make isolate_movable_page() as a wrapper
> around isolate_lru_folio(), since isolate_movable_page() always
> fails on a tail page, add a warn for tail page and return immediately.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  include/linux/migrate.h |  3 +++
>  mm/migrate.c            | 41 +++++++++++++++++++++++------------------
>  2 files changed, 26 insertions(+), 18 deletions(-)
> 
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index f9d92482d117..a6c38ee7246a 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
>  		  unsigned int *ret_succeeded);
>  struct folio *alloc_migration_target(struct folio *src, unsigned long private);
>  bool isolate_movable_page(struct page *page, isolate_mode_t mode);
> +bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode);
>  
>  int migrate_huge_page_move_mapping(struct address_space *mapping,
>  		struct folio *dst, struct folio *src);
> @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
>  	{ return NULL; }
>  static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  	{ return false; }
> +static inline bool isolate_movable_folio(struct page *page, isolate_mode_t mode)
> +	{ return false; }

Wrong argument here.

>  
>  static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
>  				  struct folio *dst, struct folio *src)
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 2228ca681afb..b2195b6ff32c 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -57,31 +57,29 @@
>  
>  #include "internal.h"
>  
> -bool isolate_movable_page(struct page *page, isolate_mode_t mode)
> +bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode)
>  {
> -	struct folio *folio = folio_get_nontail_page(page);
>  	const struct movable_operations *mops;
>  
>  	/*
> -	 * Avoid burning cycles with pages that are yet under __free_pages(),
> +	 * Avoid burning cycles with folios that are yet under __free_pages(),
>  	 * or just got freed under us.
>  	 *
> -	 * In case we 'win' a race for a movable page being freed under us and
> +	 * In case we 'win' a race for a movable folio being freed under us and
>  	 * raise its refcount preventing __free_pages() from doing its job
> -	 * the put_page() at the end of this block will take care of
> -	 * release this page, thus avoiding a nasty leakage.
> +	 * the folio_put() at the end of this block will take care of
> +	 * release this folio, thus avoiding a nasty leakage.
>  	 */
> -	if (!folio)
> -		goto out;
> +	folio_get(folio);
>  
>  	if (unlikely(folio_test_slab(folio)))
>  		goto out_putfolio;
>  	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
>  	smp_rmb();
>  	/*
> -	 * Check movable flag before taking the page lock because
> -	 * we use non-atomic bitops on newly allocated page flags so
> -	 * unconditionally grabbing the lock ruins page's owner side.
> +	 * Check movable flag before taking the folio lock because
> +	 * we use non-atomic bitops on newly allocated folio flags so
> +	 * unconditionally grabbing the lock ruins folio's owner side.
>  	 */
>  	if (unlikely(!__folio_test_movable(folio)))
>  		goto out_putfolio;
> @@ -91,13 +89,13 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  		goto out_putfolio;
>  
>  	/*
> -	 * As movable pages are not isolated from LRU lists, concurrent
> -	 * compaction threads can race against page migration functions
> -	 * as well as race against the releasing a page.
> +	 * As movable folios are not isolated from LRU lists, concurrent
> +	 * compaction threads can race against folio migration functions
> +	 * as well as race against the releasing a folio.
>  	 *
> -	 * In order to avoid having an already isolated movable page
> +	 * In order to avoid having an already isolated movable folio
>  	 * being (wrongly) re-isolated while it is under migration,
> -	 * or to avoid attempting to isolate pages being released,
> +	 * or to avoid attempting to isolate folios being released,
>  	 * lets be sure we have the page lock
>  	 * before proceeding with the movable page isolation steps.
>  	 */
> @@ -113,7 +111,7 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  	if (!mops->isolate_page(&folio->page, mode))
>  		goto out_no_isolated;
>  
> -	/* Driver shouldn't use PG_isolated bit of page->flags */
> +	/* Driver shouldn't use PG_isolated bit of folio->flags */
>  	WARN_ON_ONCE(folio_test_isolated(folio));
>  	folio_set_isolated(folio);
>  	folio_unlock(folio);
> @@ -124,10 +122,17 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>  	folio_unlock(folio);
>  out_putfolio:
>  	folio_put(folio);
> -out:
>  	return false;
>  }
>  
> +bool isolate_movable_page(struct page *page, isolate_mode_t mode)
> +{
> +	if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
> +		return false;

This warning doesn't make sense. As of now, we still isolate_movable_page()
to be able to take in a tail page, we just don't want to operate on it.

> +	return isolate_movable_folio((struct folio *)page, mode);
> +}
> +
>  static void putback_movable_folio(struct folio *folio)
>  {
>  	const struct movable_operations *mops = folio_movable_ops(folio);
> -- 
> 2.27.0
>
Kefeng Wang March 28, 2024, 5:08 a.m. UTC | #4
On 2024/3/28 2:59, Vishal Moola wrote:
> On Wed, Mar 27, 2024 at 10:10:29PM +0800, Kefeng Wang wrote:
>> Like isolate_lru_page(), make isolate_movable_page() as a wrapper
>> around isolate_lru_folio(), since isolate_movable_page() always
>> fails on a tail page, add a warn for tail page and return immediately.
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>> ---
>>   include/linux/migrate.h |  3 +++
>>   mm/migrate.c            | 41 +++++++++++++++++++++++------------------
>>   2 files changed, 26 insertions(+), 18 deletions(-)
>>
>> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
>> index f9d92482d117..a6c38ee7246a 100644
>> --- a/include/linux/migrate.h
>> +++ b/include/linux/migrate.h
>> @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
>>   		  unsigned int *ret_succeeded);
>>   struct folio *alloc_migration_target(struct folio *src, unsigned long private);
>>   bool isolate_movable_page(struct page *page, isolate_mode_t mode);
>> +bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode);
>>   
>>   int migrate_huge_page_move_mapping(struct address_space *mapping,
>>   		struct folio *dst, struct folio *src);
>> @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
>>   	{ return NULL; }
>>   static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>>   	{ return false; }
>> +static inline bool isolate_movable_folio(struct page *page, isolate_mode_t mode)
>> +	{ return false; }
> 
> Wrong argument here.
Mistake, will fix.

> 
>>   
>>   static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
>>   				  struct folio *dst, struct folio *src)
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index 2228ca681afb..b2195b6ff32c 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -57,31 +57,29 @@
>>   
>>   #include "internal.h"
>>   
>> -bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>> +bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode)
>>   {
>> -	struct folio *folio = folio_get_nontail_page(page);
>>   	const struct movable_operations *mops;
>>   
>>   	/*
>> -	 * Avoid burning cycles with pages that are yet under __free_pages(),
>> +	 * Avoid burning cycles with folios that are yet under __free_pages(),
>>   	 * or just got freed under us.
>>   	 *
>> -	 * In case we 'win' a race for a movable page being freed under us and
>> +	 * In case we 'win' a race for a movable folio being freed under us and
>>   	 * raise its refcount preventing __free_pages() from doing its job
>> -	 * the put_page() at the end of this block will take care of
>> -	 * release this page, thus avoiding a nasty leakage.
>> +	 * the folio_put() at the end of this block will take care of
>> +	 * release this folio, thus avoiding a nasty leakage.
>>   	 */
>> -	if (!folio)
>> -		goto out;
>> +	folio_get(folio);
>>   
>>   	if (unlikely(folio_test_slab(folio)))
>>   		goto out_putfolio;
>>   	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
>>   	smp_rmb();
>>   	/*
>> -	 * Check movable flag before taking the page lock because
>> -	 * we use non-atomic bitops on newly allocated page flags so
>> -	 * unconditionally grabbing the lock ruins page's owner side.
>> +	 * Check movable flag before taking the folio lock because
>> +	 * we use non-atomic bitops on newly allocated folio flags so
>> +	 * unconditionally grabbing the lock ruins folio's owner side.
>>   	 */
>>   	if (unlikely(!__folio_test_movable(folio)))
>>   		goto out_putfolio;
>> @@ -91,13 +89,13 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>>   		goto out_putfolio;
>>   
>>   	/*
>> -	 * As movable pages are not isolated from LRU lists, concurrent
>> -	 * compaction threads can race against page migration functions
>> -	 * as well as race against the releasing a page.
>> +	 * As movable folios are not isolated from LRU lists, concurrent
>> +	 * compaction threads can race against folio migration functions
>> +	 * as well as race against the releasing a folio.
>>   	 *
>> -	 * In order to avoid having an already isolated movable page
>> +	 * In order to avoid having an already isolated movable folio
>>   	 * being (wrongly) re-isolated while it is under migration,
>> -	 * or to avoid attempting to isolate pages being released,
>> +	 * or to avoid attempting to isolate folios being released,
>>   	 * lets be sure we have the page lock
>>   	 * before proceeding with the movable page isolation steps.
>>   	 */
>> @@ -113,7 +111,7 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>>   	if (!mops->isolate_page(&folio->page, mode))
>>   		goto out_no_isolated;
>>   
>> -	/* Driver shouldn't use PG_isolated bit of page->flags */
>> +	/* Driver shouldn't use PG_isolated bit of folio->flags */
>>   	WARN_ON_ONCE(folio_test_isolated(folio));
>>   	folio_set_isolated(folio);
>>   	folio_unlock(folio);
>> @@ -124,10 +122,17 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>>   	folio_unlock(folio);
>>   out_putfolio:
>>   	folio_put(folio);
>> -out:
>>   	return false;
>>   }
>>   
>> +bool isolate_movable_page(struct page *page, isolate_mode_t mode)
>> +{
>> +	if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
>> +		return false;
> 
> This warning doesn't make sense. As of now, we still isolate_movable_page()
> to be able to take in a tail page, we just don't want to operate on it.
Zi replied too, I will remove it.

Thanks.
> 
>> +	return isolate_movable_folio((struct folio *)page, mode);
>> +}
>> +
>>   static void putback_movable_folio(struct folio *folio)
>>   {
>>   	const struct movable_operations *mops = folio_movable_ops(folio);
>> -- 
>> 2.27.0
>>
diff mbox series

Patch

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f9d92482d117..a6c38ee7246a 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -70,6 +70,7 @@  int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
 		  unsigned int *ret_succeeded);
 struct folio *alloc_migration_target(struct folio *src, unsigned long private);
 bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode);
 
 int migrate_huge_page_move_mapping(struct address_space *mapping,
 		struct folio *dst, struct folio *src);
@@ -91,6 +92,8 @@  static inline struct folio *alloc_migration_target(struct folio *src,
 	{ return NULL; }
 static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
 	{ return false; }
+static inline bool isolate_movable_folio(struct page *page, isolate_mode_t mode)
+	{ return false; }
 
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
 				  struct folio *dst, struct folio *src)
diff --git a/mm/migrate.c b/mm/migrate.c
index 2228ca681afb..b2195b6ff32c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -57,31 +57,29 @@ 
 
 #include "internal.h"
 
-bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+bool isolate_movable_folio(struct folio *folio, isolate_mode_t mode)
 {
-	struct folio *folio = folio_get_nontail_page(page);
 	const struct movable_operations *mops;
 
 	/*
-	 * Avoid burning cycles with pages that are yet under __free_pages(),
+	 * Avoid burning cycles with folios that are yet under __free_pages(),
 	 * or just got freed under us.
 	 *
-	 * In case we 'win' a race for a movable page being freed under us and
+	 * In case we 'win' a race for a movable folio being freed under us and
 	 * raise its refcount preventing __free_pages() from doing its job
-	 * the put_page() at the end of this block will take care of
-	 * release this page, thus avoiding a nasty leakage.
+	 * the folio_put() at the end of this block will take care of
+	 * release this folio, thus avoiding a nasty leakage.
 	 */
-	if (!folio)
-		goto out;
+	folio_get(folio);
 
 	if (unlikely(folio_test_slab(folio)))
 		goto out_putfolio;
 	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
 	smp_rmb();
 	/*
-	 * Check movable flag before taking the page lock because
-	 * we use non-atomic bitops on newly allocated page flags so
-	 * unconditionally grabbing the lock ruins page's owner side.
+	 * Check movable flag before taking the folio lock because
+	 * we use non-atomic bitops on newly allocated folio flags so
+	 * unconditionally grabbing the lock ruins folio's owner side.
 	 */
 	if (unlikely(!__folio_test_movable(folio)))
 		goto out_putfolio;
@@ -91,13 +89,13 @@  bool isolate_movable_page(struct page *page, isolate_mode_t mode)
 		goto out_putfolio;
 
 	/*
-	 * As movable pages are not isolated from LRU lists, concurrent
-	 * compaction threads can race against page migration functions
-	 * as well as race against the releasing a page.
+	 * As movable folios are not isolated from LRU lists, concurrent
+	 * compaction threads can race against folio migration functions
+	 * as well as race against the releasing a folio.
 	 *
-	 * In order to avoid having an already isolated movable page
+	 * In order to avoid having an already isolated movable folio
 	 * being (wrongly) re-isolated while it is under migration,
-	 * or to avoid attempting to isolate pages being released,
+	 * or to avoid attempting to isolate folios being released,
 	 * lets be sure we have the page lock
 	 * before proceeding with the movable page isolation steps.
 	 */
@@ -113,7 +111,7 @@  bool isolate_movable_page(struct page *page, isolate_mode_t mode)
 	if (!mops->isolate_page(&folio->page, mode))
 		goto out_no_isolated;
 
-	/* Driver shouldn't use PG_isolated bit of page->flags */
+	/* Driver shouldn't use PG_isolated bit of folio->flags */
 	WARN_ON_ONCE(folio_test_isolated(folio));
 	folio_set_isolated(folio);
 	folio_unlock(folio);
@@ -124,10 +122,17 @@  bool isolate_movable_page(struct page *page, isolate_mode_t mode)
 	folio_unlock(folio);
 out_putfolio:
 	folio_put(folio);
-out:
 	return false;
 }
 
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+{
+	if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
+		return false;
+
+	return isolate_movable_folio((struct folio *)page, mode);
+}
+
 static void putback_movable_folio(struct folio *folio)
 {
 	const struct movable_operations *mops = folio_movable_ops(folio);