diff mbox series

[v4,10/11] mm: lru: add VM_BUG_ON_FOLIO to lru maintenance function

Message ID 20220524060551.80037-11-songmuchun@bytedance.com (mailing list archive)
State New
Headers show
Series Use obj_cgroup APIs to charge the LRU pages | expand

Commit Message

Muchun Song May 24, 2022, 6:05 a.m. UTC
We need to make sure that the page is deleted from or added to the
correct lruvec list. So add a VM_BUG_ON_FOLIO() to catch invalid
users.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 include/linux/mm_inline.h | 6 ++++++
 mm/vmscan.c               | 1 -
 2 files changed, 6 insertions(+), 1 deletion(-)

Comments

Johannes Weiner May 24, 2022, 7:44 p.m. UTC | #1
On Tue, May 24, 2022 at 02:05:50PM +0800, Muchun Song wrote:
> We need to make sure that the page is deleted from or added to the
> correct lruvec list. So add a VM_BUG_ON_FOLIO() to catch invalid
> users.
> 
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>

Makes sense, but please use VM_WARN_ON_ONCE_FOLIO() so the machine can
continue limping along for extracting debug information.
Roman Gushchin May 25, 2022, 2:40 a.m. UTC | #2
On Tue, May 24, 2022 at 02:05:50PM +0800, Muchun Song wrote:
> We need to make sure that the page is deleted from or added to the
> correct lruvec list. So add a VM_BUG_ON_FOLIO() to catch invalid
> users.
> 
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> ---
>  include/linux/mm_inline.h | 6 ++++++
>  mm/vmscan.c               | 1 -
>  2 files changed, 6 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index ac32125745ab..30d2393da613 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -97,6 +97,8 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
>  {
>  	enum lru_list lru = folio_lru_list(folio);
>  
> +	VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
> +
>  	update_lru_size(lruvec, lru, folio_zonenum(folio),
>  			folio_nr_pages(folio));
>  	if (lru != LRU_UNEVICTABLE)
> @@ -114,6 +116,8 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
>  {
>  	enum lru_list lru = folio_lru_list(folio);
>  
> +	VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
> +
>  	update_lru_size(lruvec, lru, folio_zonenum(folio),
>  			folio_nr_pages(folio));
>  	/* This is not expected to be used on LRU_UNEVICTABLE */
> @@ -131,6 +135,8 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
>  {
>  	enum lru_list lru = folio_lru_list(folio);
>  
> +	VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
> +
>  	if (lru != LRU_UNEVICTABLE)
>  		list_del(&folio->lru);
>  	update_lru_size(lruvec, lru, folio_zonenum(folio),
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 761d5e0dd78d..6c9e2eafc8f9 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2281,7 +2281,6 @@ static unsigned int move_pages_to_lru(struct list_head *list)
>  			continue;
>  		}
>  
> -		VM_BUG_ON_PAGE(!folio_matches_lruvec(folio, lruvec), page);

The commit log describes well why we need to add new BUG_ON's. Please, add
something on why this is removed.


Thanks!
Muchun Song May 25, 2022, 11:58 a.m. UTC | #3
On Tue, May 24, 2022 at 07:40:05PM -0700, Roman Gushchin wrote:
> On Tue, May 24, 2022 at 02:05:50PM +0800, Muchun Song wrote:
> > We need to make sure that the page is deleted from or added to the
> > correct lruvec list. So add a VM_BUG_ON_FOLIO() to catch invalid
> > users.
> > 
> > Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> > ---
> >  include/linux/mm_inline.h | 6 ++++++
> >  mm/vmscan.c               | 1 -
> >  2 files changed, 6 insertions(+), 1 deletion(-)
> > 
> > diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> > index ac32125745ab..30d2393da613 100644
> > --- a/include/linux/mm_inline.h
> > +++ b/include/linux/mm_inline.h
> > @@ -97,6 +97,8 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
> >  {
> >  	enum lru_list lru = folio_lru_list(folio);
> >  
> > +	VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
> > +
> >  	update_lru_size(lruvec, lru, folio_zonenum(folio),
> >  			folio_nr_pages(folio));
> >  	if (lru != LRU_UNEVICTABLE)
> > @@ -114,6 +116,8 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
> >  {
> >  	enum lru_list lru = folio_lru_list(folio);
> >  
> > +	VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
> > +
> >  	update_lru_size(lruvec, lru, folio_zonenum(folio),
> >  			folio_nr_pages(folio));
> >  	/* This is not expected to be used on LRU_UNEVICTABLE */
> > @@ -131,6 +135,8 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
> >  {
> >  	enum lru_list lru = folio_lru_list(folio);
> >  
> > +	VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
> > +
> >  	if (lru != LRU_UNEVICTABLE)
> >  		list_del(&folio->lru);
> >  	update_lru_size(lruvec, lru, folio_zonenum(folio),
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 761d5e0dd78d..6c9e2eafc8f9 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -2281,7 +2281,6 @@ static unsigned int move_pages_to_lru(struct list_head *list)
> >  			continue;
> >  		}
> >  
> > -		VM_BUG_ON_PAGE(!folio_matches_lruvec(folio, lruvec), page);
> 
> The commit log describes well why we need to add new BUG_ON's. Please, add
> something on why this is removed.
>

OK. Will do in v5.

Thanks.
Muchun Song May 25, 2022, 11:59 a.m. UTC | #4
On Tue, May 24, 2022 at 03:44:02PM -0400, Johannes Weiner wrote:
> On Tue, May 24, 2022 at 02:05:50PM +0800, Muchun Song wrote:
> > We need to make sure that the page is deleted from or added to the
> > correct lruvec list. So add a VM_BUG_ON_FOLIO() to catch invalid
> > users.
> > 
> > Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> 
> Makes sense, but please use VM_WARN_ON_ONCE_FOLIO() so the machine can
> continue limping along for extracting debug information.
>

Make sense. Will do.

Thanks.
diff mbox series

Patch

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index ac32125745ab..30d2393da613 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -97,6 +97,8 @@  void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
 {
 	enum lru_list lru = folio_lru_list(folio);
 
+	VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
+
 	update_lru_size(lruvec, lru, folio_zonenum(folio),
 			folio_nr_pages(folio));
 	if (lru != LRU_UNEVICTABLE)
@@ -114,6 +116,8 @@  void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
 {
 	enum lru_list lru = folio_lru_list(folio);
 
+	VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
+
 	update_lru_size(lruvec, lru, folio_zonenum(folio),
 			folio_nr_pages(folio));
 	/* This is not expected to be used on LRU_UNEVICTABLE */
@@ -131,6 +135,8 @@  void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
 {
 	enum lru_list lru = folio_lru_list(folio);
 
+	VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
+
 	if (lru != LRU_UNEVICTABLE)
 		list_del(&folio->lru);
 	update_lru_size(lruvec, lru, folio_zonenum(folio),
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 761d5e0dd78d..6c9e2eafc8f9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2281,7 +2281,6 @@  static unsigned int move_pages_to_lru(struct list_head *list)
 			continue;
 		}
 
-		VM_BUG_ON_PAGE(!folio_matches_lruvec(folio, lruvec), page);
 		add_page_to_lru_list(page, lruvec);
 		nr_pages = thp_nr_pages(page);
 		nr_moved += nr_pages;