diff mbox series

[3/3] mm: swap: fix update_page_reclaim_stat for huge pages

Message ID 20200508212215.181307-3-shakeelb@google.com (mailing list archive)
State New, archived
Headers show
Series [1/3] mm: swap: fix vmstats for huge pages | expand

Commit Message

Shakeel Butt May 8, 2020, 9:22 p.m. UTC
Currently update_page_reclaim_stat() updates the lruvec.reclaim_stats
just once for a page irrespective if a page is huge or not. Fix that by
passing the hpage_nr_pages(page) to it.

Signed-off-by: Shakeel Butt <shakeelb@google.com>
---
 mm/swap.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

Comments

Johannes Weiner May 8, 2020, 9:51 p.m. UTC | #1
On Fri, May 08, 2020 at 02:22:15PM -0700, Shakeel Butt wrote:
> Currently update_page_reclaim_stat() updates the lruvec.reclaim_stats
> just once for a page irrespective if a page is huge or not. Fix that by
> passing the hpage_nr_pages(page) to it.
> 
> Signed-off-by: Shakeel Butt <shakeelb@google.com>

https://lore.kernel.org/patchwork/patch/685703/

Laughs, then cries.

> @@ -928,7 +928,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
>  	}
>  
>  	if (!PageUnevictable(page))
> -		update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
> +		update_page_reclaim_stat(lruvec, file, PageActive(page_tail), 1);

The change to __pagevec_lru_add_fn() below makes sure the tail pages
are already accounted. This would make them count twice.

>  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>  
> @@ -973,7 +973,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
>  	if (page_evictable(page)) {
>  		lru = page_lru(page);
>  		update_page_reclaim_stat(lruvec, page_is_file_lru(page),
> -					 PageActive(page));
> +					 PageActive(page), nr_pages);
>  		if (was_unevictable)
>  			__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
>  	} else {
Shakeel Butt May 9, 2020, 2:06 p.m. UTC | #2
On Fri, May 8, 2020 at 2:51 PM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> On Fri, May 08, 2020 at 02:22:15PM -0700, Shakeel Butt wrote:
> > Currently update_page_reclaim_stat() updates the lruvec.reclaim_stats
> > just once for a page irrespective if a page is huge or not. Fix that by
> > passing the hpage_nr_pages(page) to it.
> >
> > Signed-off-by: Shakeel Butt <shakeelb@google.com>
>
> https://lore.kernel.org/patchwork/patch/685703/
>
> Laughs, then cries.
>

What happened to that patch? Fell through the cracks?

> > @@ -928,7 +928,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
> >       }
> >
> >       if (!PageUnevictable(page))
> > -             update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
> > +             update_page_reclaim_stat(lruvec, file, PageActive(page_tail), 1);
>
> The change to __pagevec_lru_add_fn() below makes sure the tail pages
> are already accounted. This would make them count twice.
>

Yes, you are right. I will just re-send your patch after rebase.



> >  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
> >
> > @@ -973,7 +973,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
> >       if (page_evictable(page)) {
> >               lru = page_lru(page);
> >               update_page_reclaim_stat(lruvec, page_is_file_lru(page),
> > -                                      PageActive(page));
> > +                                      PageActive(page), nr_pages);
> >               if (was_unevictable)
> >                       __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
> >       } else {
diff mbox series

Patch

diff --git a/mm/swap.c b/mm/swap.c
index 4eb179ee0b72..dc7297cb76a0 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -262,14 +262,14 @@  void rotate_reclaimable_page(struct page *page)
 	}
 }
 
-static void update_page_reclaim_stat(struct lruvec *lruvec,
-				     int file, int rotated)
+static void update_page_reclaim_stat(struct lruvec *lruvec, int file,
+				     int rotated, int nr_pages)
 {
 	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
-	reclaim_stat->recent_scanned[file]++;
+	reclaim_stat->recent_scanned[file] += nr_pages;
 	if (rotated)
-		reclaim_stat->recent_rotated[file]++;
+		reclaim_stat->recent_rotated[file] += nr_pages;
 }
 
 static void __activate_page(struct page *page, struct lruvec *lruvec,
@@ -288,7 +288,7 @@  static void __activate_page(struct page *page, struct lruvec *lruvec,
 
 		__count_vm_events(PGACTIVATE, nr_pages);
 		__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
-		update_page_reclaim_stat(lruvec, file, 1);
+		update_page_reclaim_stat(lruvec, file, 1, nr_pages);
 	}
 }
 
@@ -546,7 +546,7 @@  static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 		__count_vm_events(PGDEACTIVATE, nr_pages);
 		__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
 	}
-	update_page_reclaim_stat(lruvec, file, 0);
+	update_page_reclaim_stat(lruvec, file, 0, nr_pages);
 }
 
 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
@@ -564,7 +564,7 @@  static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 
 		__count_vm_events(PGDEACTIVATE, nr_pages);
 		__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
-		update_page_reclaim_stat(lruvec, file, 0);
+		update_page_reclaim_stat(lruvec, file, 0, nr_pages);
 	}
 }
 
@@ -590,7 +590,7 @@  static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
 
 		__count_vm_events(PGLAZYFREE, nr_pages);
 		__count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
-		update_page_reclaim_stat(lruvec, 1, 0);
+		update_page_reclaim_stat(lruvec, 1, 0, nr_pages);
 	}
 }
 
@@ -928,7 +928,7 @@  void lru_add_page_tail(struct page *page, struct page *page_tail,
 	}
 
 	if (!PageUnevictable(page))
-		update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
+		update_page_reclaim_stat(lruvec, file, PageActive(page_tail), 1);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
@@ -973,7 +973,7 @@  static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 	if (page_evictable(page)) {
 		lru = page_lru(page);
 		update_page_reclaim_stat(lruvec, page_is_file_lru(page),
-					 PageActive(page));
+					 PageActive(page), nr_pages);
 		if (was_unevictable)
 			__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
 	} else {