diff mbox series

[v3,03/25] mm/vmstat: Add folio stat wrappers

Message ID 20210128070404.1922318-4-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Page folios | expand

Commit Message

Matthew Wilcox Jan. 28, 2021, 7:03 a.m. UTC
Allow page counters to be more readily modified by callers which have
a folio.  Name these wrappers with 'stat' instead of 'state' as requested
by Linus here:
https://lore.kernel.org/linux-mm/CAHk-=wj847SudR-kt+46fT3+xFFgiwpgThvm7DJWGdi4cVrbnQ@mail.gmail.com/

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/vmstat.h | 60 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)

Comments

Zi Yan March 1, 2021, 9:17 p.m. UTC | #1
On 28 Jan 2021, at 2:03, Matthew Wilcox (Oracle) wrote:

> Allow page counters to be more readily modified by callers which have
> a folio.  Name these wrappers with 'stat' instead of 'state' as requested
> by Linus here:
> https://lore.kernel.org/linux-mm/CAHk-=wj847SudR-kt+46fT3+xFFgiwpgThvm7DJWGdi4cVrbnQ@mail.gmail.com/
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/vmstat.h | 60 ++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 60 insertions(+)
>
> diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
> index 773135fc6e19..3c3373c2c3c2 100644
> --- a/include/linux/vmstat.h
> +++ b/include/linux/vmstat.h
> @@ -396,6 +396,54 @@ static inline void drain_zonestat(struct zone *zone,
>  			struct per_cpu_pageset *pset) { }
>  #endif		/* CONFIG_SMP */
>
> +static inline
> +void __inc_zone_folio_stat(struct folio *folio, enum zone_stat_item item)
> +{
> +	__inc_zone_page_state(&folio->page, item);

Shouldn’t we change the stats with folio_nr_pages(folio) here? And all
changes below. Otherwise one folio is always counted as a single page.

> +}
> +
> +static inline
> +void __dec_zone_folio_stat(struct folio *folio, enum zone_stat_item item)
> +{
> +	__dec_zone_page_state(&folio->page, item);
> +}
> +
> +static inline
> +void inc_zone_folio_stat(struct folio *folio, enum zone_stat_item item)
> +{
> +	inc_zone_page_state(&folio->page, item);
> +}
> +
> +static inline
> +void dec_zone_folio_stat(struct folio *folio, enum zone_stat_item item)
> +{
> +	dec_zone_page_state(&folio->page, item);
> +}
> +
> +static inline
> +void __inc_node_folio_stat(struct folio *folio, enum node_stat_item item)
> +{
> +	__inc_node_page_state(&folio->page, item);
> +}
> +
> +static inline
> +void __dec_node_folio_stat(struct folio *folio, enum node_stat_item item)
> +{
> +	__dec_node_page_state(&folio->page, item);
> +}
> +
> +static inline
> +void inc_node_folio_stat(struct folio *folio, enum node_stat_item item)
> +{
> +	inc_node_page_state(&folio->page, item);
> +}
> +
> +static inline
> +void dec_node_folio_stat(struct folio *folio, enum node_stat_item item)
> +{
> +	dec_node_page_state(&folio->page, item);
> +}
> +
>  static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
>  					     int migratetype)
>  {
> @@ -530,6 +578,18 @@ static inline void __dec_lruvec_page_state(struct page *page,
>  	__mod_lruvec_page_state(page, idx, -1);
>  }
>
> +static inline void __inc_lruvec_folio_stat(struct folio *folio,
> +					   enum node_stat_item idx)
> +{
> +	__mod_lruvec_page_state(&folio->page, idx, 1);
> +}
> +
> +static inline void __dec_lruvec_folio_stat(struct folio *folio,
> +					   enum node_stat_item idx)
> +{
> +	__mod_lruvec_page_state(&folio->page, idx, -1);
> +}
> +
>  static inline void inc_lruvec_state(struct lruvec *lruvec,
>  				    enum node_stat_item idx)
>  {
> -- 
> 2.29.2


—
Best Regards,
Yan Zi
Matthew Wilcox March 1, 2021, 10:15 p.m. UTC | #2
On Mon, Mar 01, 2021 at 04:17:39PM -0500, Zi Yan wrote:
> On 28 Jan 2021, at 2:03, Matthew Wilcox (Oracle) wrote:
> > Allow page counters to be more readily modified by callers which have
> > a folio.  Name these wrappers with 'stat' instead of 'state' as requested
>
> Shouldn’t we change the stats with folio_nr_pages(folio) here? And all
> changes below. Otherwise one folio is always counted as a single page.

That's a good point.  Looking through the changes in my current folio
tree (which doesn't get as far as the thp tree did; ie doesn't yet allocate
multi-page folios, so hasn't been tested with anything larger than a
single page), the callers are ...

@@ -2698,3 +2698,3 @@ int clear_page_dirty_for_io(struct page *page)
-               if (TestClearPageDirty(page)) {
-                       dec_lruvec_page_state(page, NR_FILE_DIRTY);
-                       dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
+               if (TestClearFolioDirty(folio)) {
+                       dec_lruvec_folio_stat(folio, NR_FILE_DIRTY);
+                       dec_zone_folio_stat(folio, NR_ZONE_WRITE_PENDING);
@@ -2432,3 +2433,3 @@ void account_page_dirtied(struct page *page, struct addres
s_space *mapping)
-               __inc_lruvec_page_state(page, NR_FILE_DIRTY);
-               __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
-               __inc_node_page_state(page, NR_DIRTIED);
+               __inc_lruvec_folio_stat(folio, NR_FILE_DIRTY);
+               __inc_zone_folio_stat(folio, NR_ZONE_WRITE_PENDING);
+               __inc_node_folio_stat(folio, NR_DIRTIED);
@@ -891 +890 @@ noinline int __add_to_page_cache_locked(struct page *page,
-                       __inc_lruvec_page_state(page, NR_FILE_PAGES);
+                       __inc_lruvec_folio_stat(folio, NR_FILE_PAGES);
@@ -2759,2 +2759,2 @@ int test_clear_page_writeback(struct page *page)
-               dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
-               inc_node_page_state(page, NR_WRITTEN);
+               dec_zone_folio_stat(folio, NR_ZONE_WRITE_PENDING);
+               inc_node_folio_stat(folio, NR_WRITTEN);

I think it's clear from this that I haven't found all the places
that I need to change yet ;-)

Looking at the places I did change in the thp tree, there are changes
like this:

@@ -860,27 +864,30 @@ noinline int __add_to_page_cache_locked(struct page *page,
-               if (!huge)
-                       __inc_lruvec_page_state(page, NR_FILE_PAGES);
+               if (!huge) {
+                       __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
+                       if (nr > 1)
+                               __mod_node_page_state(page_pgdat(page),
+                                               NR_FILE_THPS, nr);
+               }

... but I never did do some of the changes which the above changes imply
are needed.  So the thp tree probably had all kinds of bad statistics
that I never noticed.

So ... at least some of the users are definitely going to want to
cache the 'nr_pages' and use it multiple times, including calling
__mod_node_folio_state(), but others should do what you suggested.
Thanks!  I'll make that change.
diff mbox series

Patch

diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 773135fc6e19..3c3373c2c3c2 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -396,6 +396,54 @@  static inline void drain_zonestat(struct zone *zone,
 			struct per_cpu_pageset *pset) { }
 #endif		/* CONFIG_SMP */
 
+static inline
+void __inc_zone_folio_stat(struct folio *folio, enum zone_stat_item item)
+{
+	__inc_zone_page_state(&folio->page, item);
+}
+
+static inline
+void __dec_zone_folio_stat(struct folio *folio, enum zone_stat_item item)
+{
+	__dec_zone_page_state(&folio->page, item);
+}
+
+static inline
+void inc_zone_folio_stat(struct folio *folio, enum zone_stat_item item)
+{
+	inc_zone_page_state(&folio->page, item);
+}
+
+static inline
+void dec_zone_folio_stat(struct folio *folio, enum zone_stat_item item)
+{
+	dec_zone_page_state(&folio->page, item);
+}
+
+static inline
+void __inc_node_folio_stat(struct folio *folio, enum node_stat_item item)
+{
+	__inc_node_page_state(&folio->page, item);
+}
+
+static inline
+void __dec_node_folio_stat(struct folio *folio, enum node_stat_item item)
+{
+	__dec_node_page_state(&folio->page, item);
+}
+
+static inline
+void inc_node_folio_stat(struct folio *folio, enum node_stat_item item)
+{
+	inc_node_page_state(&folio->page, item);
+}
+
+static inline
+void dec_node_folio_stat(struct folio *folio, enum node_stat_item item)
+{
+	dec_node_page_state(&folio->page, item);
+}
+
 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
 					     int migratetype)
 {
@@ -530,6 +578,18 @@  static inline void __dec_lruvec_page_state(struct page *page,
 	__mod_lruvec_page_state(page, idx, -1);
 }
 
+static inline void __inc_lruvec_folio_stat(struct folio *folio,
+					   enum node_stat_item idx)
+{
+	__mod_lruvec_page_state(&folio->page, idx, 1);
+}
+
+static inline void __dec_lruvec_folio_stat(struct folio *folio,
+					   enum node_stat_item idx)
+{
+	__mod_lruvec_page_state(&folio->page, idx, -1);
+}
+
 static inline void inc_lruvec_state(struct lruvec *lruvec,
 				    enum node_stat_item idx)
 {