diff mbox series

mm: migrate LRU_REFS_MASK bits in folio_migrate_flags

Message ID 20240925030225.236143-1-zhaoyang.huang@unisoc.com (mailing list archive)
State New
Headers show
Series mm: migrate LRU_REFS_MASK bits in folio_migrate_flags | expand

Commit Message

zhaoyang.huang Sept. 25, 2024, 3:02 a.m. UTC
From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>

Bits of LRU_REFS_MASK are not inherited during migration which lead to
new_folio start from tier0. Fix this by migrate the bits domain.

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
 include/linux/mm_inline.h | 8 ++++++++
 mm/migrate.c              | 1 +
 2 files changed, 9 insertions(+)

Comments

Andrew Morton Sept. 25, 2024, 9:42 a.m. UTC | #1
On Wed, 25 Sep 2024 11:02:25 +0800 "zhaoyang.huang" <zhaoyang.huang@unisoc.com> wrote:

> From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> 
> Bits of LRU_REFS_MASK are not inherited during migration which lead to
> new_folio start from tier0. Fix this by migrate the bits domain.

I'm having trouble understanding this, sorry.  Please more fully
describe the runtime effects of this flaw.

> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -291,6 +291,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
>  	return true;
>  }
>  
> +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
> +{
> +	unsigned long refs = READ_ONCE(folio->flags) & LRU_REFS_MASK;
> +
> +	set_mask_bits(&new_folio->flags, LRU_REFS_MASK, refs);
> +}
>  #else /* !CONFIG_LRU_GEN */
>  
>  static inline bool lru_gen_enabled(void)
> @@ -313,6 +319,8 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
>  	return false;
>  }
>  
> +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
> +{}
>  #endif /* CONFIG_LRU_GEN */
>  
>  static __always_inline
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 923ea80ba744..60c97e235ae7 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -618,6 +618,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
>  	if (folio_test_idle(folio))
>  		folio_set_idle(newfolio);
>  
> +	folio_migrate_refs(newfolio, folio);
>  	/*
>  	 * Copy NUMA information to the new page, to prevent over-eager
>  	 * future migrations of this same page.
> -- 
> 2.25.1
Zhaoyang Huang Sept. 25, 2024, 11:49 a.m. UTC | #2
On Wed, Sep 25, 2024 at 5:42 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Wed, 25 Sep 2024 11:02:25 +0800 "zhaoyang.huang" <zhaoyang.huang@unisoc.com> wrote:
>
> > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> >
> > Bits of LRU_REFS_MASK are not inherited during migration which lead to
> > new_folio start from tier0. Fix this by migrate the bits domain.
>
> I'm having trouble understanding this, sorry.  Please more fully
> describe the runtime effects of this flaw.
Sorry for bringing confusion. According to my understanding, MGLRU
records how many times that vfs access this page in a range of bits
domain(LRU_REFS_MASK) in folio->flags which are not being migrated to
new_folios so far. This commit would like to do so to have the
new_folio inherit these bits from the old folio. Is it right and
worthy to do?
>
> > --- a/include/linux/mm_inline.h
> > +++ b/include/linux/mm_inline.h
> > @@ -291,6 +291,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> >       return true;
> >  }
> >
> > +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
> > +{
> > +     unsigned long refs = READ_ONCE(folio->flags) & LRU_REFS_MASK;
> > +
> > +     set_mask_bits(&new_folio->flags, LRU_REFS_MASK, refs);
> > +}
> >  #else /* !CONFIG_LRU_GEN */
> >
> >  static inline bool lru_gen_enabled(void)
> > @@ -313,6 +319,8 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> >       return false;
> >  }
> >
> > +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
> > +{}
> >  #endif /* CONFIG_LRU_GEN */
> >
> >  static __always_inline
> > diff --git a/mm/migrate.c b/mm/migrate.c
> > index 923ea80ba744..60c97e235ae7 100644
> > --- a/mm/migrate.c
> > +++ b/mm/migrate.c
> > @@ -618,6 +618,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
> >       if (folio_test_idle(folio))
> >               folio_set_idle(newfolio);
> >
> > +     folio_migrate_refs(newfolio, folio);
> >       /*
> >        * Copy NUMA information to the new page, to prevent over-eager
> >        * future migrations of this same page.
> > --
> > 2.25.1
Yu Zhao Sept. 25, 2024, 9:22 p.m. UTC | #3
On Wed, Sep 25, 2024 at 5:50 AM Zhaoyang Huang <huangzhaoyang@gmail.com> wrote:
>
> On Wed, Sep 25, 2024 at 5:42 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> >
> > On Wed, 25 Sep 2024 11:02:25 +0800 "zhaoyang.huang" <zhaoyang.huang@unisoc.com> wrote:
> >
> > > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> > >
> > > Bits of LRU_REFS_MASK are not inherited during migration which lead to
> > > new_folio start from tier0. Fix this by migrate the bits domain.
> >
> > I'm having trouble understanding this, sorry.  Please more fully
> > describe the runtime effects of this flaw.
> Sorry for bringing confusion. According to my understanding, MGLRU
> records how many times that vfs access this page in a range of bits
> domain(LRU_REFS_MASK) in folio->flags which are not being migrated to
> new_folios so far. This commit would like to do so to have the
> new_folio inherit these bits from the old folio. Is it right and
> worthy to do?

Correct. And this was not done because
1. While LRU_REFS_MASK is useful, but it's not important, and
2. More important information, e.g., generations, is lost during migration.

So I'm not sure how much this patch can help. But you think it does, why not.

> > > --- a/include/linux/mm_inline.h
> > > +++ b/include/linux/mm_inline.h
> > > @@ -291,6 +291,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> > >       return true;
> > >  }
> > >
> > > +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)

Nitpick: folio_migrate_refs(struct folio *new, struct folio *old)

> > > +{
> > > +     unsigned long refs = READ_ONCE(folio->flags) & LRU_REFS_MASK;
> > > +
> > > +     set_mask_bits(&new_folio->flags, LRU_REFS_MASK, refs);
> > > +}
> > >  #else /* !CONFIG_LRU_GEN */
> > >
> > >  static inline bool lru_gen_enabled(void)
> > > @@ -313,6 +319,8 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> > >       return false;
> > >  }
> > >
> > > +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)

Ditto.

> > > +{}

A line break between "{" and "}".

> > >  #endif /* CONFIG_LRU_GEN */
> > >
> > >  static __always_inline
> > > diff --git a/mm/migrate.c b/mm/migrate.c
> > > index 923ea80ba744..60c97e235ae7 100644
> > > --- a/mm/migrate.c
> > > +++ b/mm/migrate.c
> > > @@ -618,6 +618,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
> > >       if (folio_test_idle(folio))
> > >               folio_set_idle(newfolio);
> > >
> > > +     folio_migrate_refs(newfolio, folio);
> > >       /*
> > >        * Copy NUMA information to the new page, to prevent over-eager
> > >        * future migrations of this same page.
> > > --
> > > 2.25.1
diff mbox series

Patch

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index f4fe593c1400..ecf07a2a8201 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -291,6 +291,12 @@  static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
 	return true;
 }
 
+static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
+{
+	unsigned long refs = READ_ONCE(folio->flags) & LRU_REFS_MASK;
+
+	set_mask_bits(&new_folio->flags, LRU_REFS_MASK, refs);
+}
 #else /* !CONFIG_LRU_GEN */
 
 static inline bool lru_gen_enabled(void)
@@ -313,6 +319,8 @@  static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
 	return false;
 }
 
+static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
+{}
 #endif /* CONFIG_LRU_GEN */
 
 static __always_inline
diff --git a/mm/migrate.c b/mm/migrate.c
index 923ea80ba744..60c97e235ae7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -618,6 +618,7 @@  void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
 	if (folio_test_idle(folio))
 		folio_set_idle(newfolio);
 
+	folio_migrate_refs(newfolio, folio);
 	/*
 	 * Copy NUMA information to the new page, to prevent over-eager
 	 * future migrations of this same page.