diff mbox series

[v6,2/4] mm: add per-order mTHP anon_swpout and anon_swpout_fallback counters

Message ID 20240412114858.407208-3-21cnbao@gmail.com (mailing list archive)
State New
Headers show
Series mm: add per-order mTHP alloc and swpout counters | expand

Commit Message

Barry Song April 12, 2024, 11:48 a.m. UTC
From: Barry Song <v-songbaohua@oppo.com>

This helps to display the fragmentation situation of the swapfile, knowing
the proportion of how much we haven't split large folios.  So far, we only
support non-split swapout for anon memory, with the possibility of
expanding to shmem in the future.  So, we add the "anon" prefix to the
counter names.

Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Yu Zhao <yuzhao@google.com>
---
 include/linux/huge_mm.h | 2 ++
 mm/huge_memory.c        | 4 ++++
 mm/page_io.c            | 1 +
 mm/vmscan.c             | 3 +++
 4 files changed, 10 insertions(+)

Comments

David Hildenbrand April 16, 2024, 8:14 a.m. UTC | #1
On 12.04.24 13:48, Barry Song wrote:
> From: Barry Song <v-songbaohua@oppo.com>
> 
> This helps to display the fragmentation situation of the swapfile, knowing
> the proportion of how much we haven't split large folios.  So far, we only
> support non-split swapout for anon memory, with the possibility of
> expanding to shmem in the future.  So, we add the "anon" prefix to the
> counter names.
> 
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> Cc: Chris Li <chrisl@kernel.org>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
> Cc: Kairui Song <kasong@tencent.com>
> Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
> Cc: Peter Xu <peterx@redhat.com>
> Cc: Ryan Roberts <ryan.roberts@arm.com>
> Cc: Suren Baghdasaryan <surenb@google.com>
> Cc: Yosry Ahmed <yosryahmed@google.com>
> Cc: Yu Zhao <yuzhao@google.com>
> ---
>   include/linux/huge_mm.h | 2 ++
>   mm/huge_memory.c        | 4 ++++
>   mm/page_io.c            | 1 +
>   mm/vmscan.c             | 3 +++
>   4 files changed, 10 insertions(+)
> 
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index d4fdb2641070..7cd07b83a3d0 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -268,6 +268,8 @@ enum mthp_stat_item {
>   	MTHP_STAT_ANON_FAULT_ALLOC,
>   	MTHP_STAT_ANON_FAULT_FALLBACK,
>   	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
> +	MTHP_STAT_ANON_SWPOUT,
> +	MTHP_STAT_ANON_SWPOUT_FALLBACK,
>   	__MTHP_STAT_COUNT
>   };
>   
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index dfc38cc83a04..58f2c4745d80 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -555,11 +555,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
>   DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
>   DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
>   DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
> +DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
> +DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
>   
>   static struct attribute *stats_attrs[] = {
>   	&anon_fault_alloc_attr.attr,
>   	&anon_fault_fallback_attr.attr,
>   	&anon_fault_fallback_charge_attr.attr,
> +	&anon_swpout_attr.attr,
> +	&anon_swpout_fallback_attr.attr,
>   	NULL,
>   };
>   
> diff --git a/mm/page_io.c b/mm/page_io.c
> index a9a7c236aecc..46c603dddf04 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -217,6 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
>   		count_memcg_folio_events(folio, THP_SWPOUT, 1);
>   		count_vm_event(THP_SWPOUT);
>   	}
> +	count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
>   #endif
>   	count_vm_events(PSWPOUT, folio_nr_pages(folio));
>   }
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index bca2d9981c95..49bd94423961 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1231,6 +1231,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>   						goto activate_locked;
>   				}
>   				if (!add_to_swap(folio)) {
> +					int __maybe_unused order = folio_order(folio);
> +
>   					if (!folio_test_large(folio))
>   						goto activate_locked_split;
>   					/* Fallback to swap normal pages */
> @@ -1242,6 +1244,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>   							THP_SWPOUT_FALLBACK, 1);
>   						count_vm_event(THP_SWPOUT_FALLBACK);
>   					}
> +					count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK);

Why the temporary variable for order?

count_mthp_stat(folio_order(order),
                 MTHP_STAT_ANON_SWPOUT_FALLBACK);

... but now I do wonder if we want to pass the folio to count_mthp_stat() ?

Anyhow

Acked-by: David Hildenbrand <david@redhat.com>
Barry Song April 16, 2024, 8:16 a.m. UTC | #2
On Tue, Apr 16, 2024 at 8:14 PM David Hildenbrand <david@redhat.com> wrote:
>
> On 12.04.24 13:48, Barry Song wrote:
> > From: Barry Song <v-songbaohua@oppo.com>
> >
> > This helps to display the fragmentation situation of the swapfile, knowing
> > the proportion of how much we haven't split large folios.  So far, we only
> > support non-split swapout for anon memory, with the possibility of
> > expanding to shmem in the future.  So, we add the "anon" prefix to the
> > counter names.
> >
> > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> > Cc: Chris Li <chrisl@kernel.org>
> > Cc: David Hildenbrand <david@redhat.com>
> > Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
> > Cc: Kairui Song <kasong@tencent.com>
> > Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
> > Cc: Peter Xu <peterx@redhat.com>
> > Cc: Ryan Roberts <ryan.roberts@arm.com>
> > Cc: Suren Baghdasaryan <surenb@google.com>
> > Cc: Yosry Ahmed <yosryahmed@google.com>
> > Cc: Yu Zhao <yuzhao@google.com>
> > ---
> >   include/linux/huge_mm.h | 2 ++
> >   mm/huge_memory.c        | 4 ++++
> >   mm/page_io.c            | 1 +
> >   mm/vmscan.c             | 3 +++
> >   4 files changed, 10 insertions(+)
> >
> > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> > index d4fdb2641070..7cd07b83a3d0 100644
> > --- a/include/linux/huge_mm.h
> > +++ b/include/linux/huge_mm.h
> > @@ -268,6 +268,8 @@ enum mthp_stat_item {
> >       MTHP_STAT_ANON_FAULT_ALLOC,
> >       MTHP_STAT_ANON_FAULT_FALLBACK,
> >       MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
> > +     MTHP_STAT_ANON_SWPOUT,
> > +     MTHP_STAT_ANON_SWPOUT_FALLBACK,
> >       __MTHP_STAT_COUNT
> >   };
> >
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index dfc38cc83a04..58f2c4745d80 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -555,11 +555,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
> >   DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
> >   DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
> >   DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
> > +DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
> > +DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
> >
> >   static struct attribute *stats_attrs[] = {
> >       &anon_fault_alloc_attr.attr,
> >       &anon_fault_fallback_attr.attr,
> >       &anon_fault_fallback_charge_attr.attr,
> > +     &anon_swpout_attr.attr,
> > +     &anon_swpout_fallback_attr.attr,
> >       NULL,
> >   };
> >
> > diff --git a/mm/page_io.c b/mm/page_io.c
> > index a9a7c236aecc..46c603dddf04 100644
> > --- a/mm/page_io.c
> > +++ b/mm/page_io.c
> > @@ -217,6 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
> >               count_memcg_folio_events(folio, THP_SWPOUT, 1);
> >               count_vm_event(THP_SWPOUT);
> >       }
> > +     count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
> >   #endif
> >       count_vm_events(PSWPOUT, folio_nr_pages(folio));
> >   }
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index bca2d9981c95..49bd94423961 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -1231,6 +1231,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> >                                               goto activate_locked;
> >                               }
> >                               if (!add_to_swap(folio)) {
> > +                                     int __maybe_unused order = folio_order(folio);
> > +
> >                                       if (!folio_test_large(folio))
> >                                               goto activate_locked_split;
> >                                       /* Fallback to swap normal pages */
> > @@ -1242,6 +1244,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> >                                                       THP_SWPOUT_FALLBACK, 1);
> >                                               count_vm_event(THP_SWPOUT_FALLBACK);
> >                                       }
> > +                                     count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK);
>
> Why the temporary variable for order?
>
> count_mthp_stat(folio_order(order),
>                  MTHP_STAT_ANON_SWPOUT_FALLBACK);
>
> ... but now I do wonder if we want to pass the folio to count_mthp_stat() ?

because we have called split_folio_to_list() before counting. that is also
why Ryan is using if (nr_pages >= HPAGE_PMD_NR) but not pmd_mappable.


>
> Anyhow
>
> Acked-by: David Hildenbrand <david@redhat.com>

thanks!

>
> --
> Cheers,
>
> David / dhildenb
>
Barry
David Hildenbrand April 16, 2024, 8:17 a.m. UTC | #3
On 16.04.24 10:14, David Hildenbrand wrote:
> On 12.04.24 13:48, Barry Song wrote:
>> From: Barry Song <v-songbaohua@oppo.com>
>>
>> This helps to display the fragmentation situation of the swapfile, knowing
>> the proportion of how much we haven't split large folios.  So far, we only
>> support non-split swapout for anon memory, with the possibility of
>> expanding to shmem in the future.  So, we add the "anon" prefix to the
>> counter names.
>>
>> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>> Cc: Chris Li <chrisl@kernel.org>
>> Cc: David Hildenbrand <david@redhat.com>
>> Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
>> Cc: Kairui Song <kasong@tencent.com>
>> Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
>> Cc: Peter Xu <peterx@redhat.com>
>> Cc: Ryan Roberts <ryan.roberts@arm.com>
>> Cc: Suren Baghdasaryan <surenb@google.com>
>> Cc: Yosry Ahmed <yosryahmed@google.com>
>> Cc: Yu Zhao <yuzhao@google.com>
>> ---
>>    include/linux/huge_mm.h | 2 ++
>>    mm/huge_memory.c        | 4 ++++
>>    mm/page_io.c            | 1 +
>>    mm/vmscan.c             | 3 +++
>>    4 files changed, 10 insertions(+)
>>
>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>> index d4fdb2641070..7cd07b83a3d0 100644
>> --- a/include/linux/huge_mm.h
>> +++ b/include/linux/huge_mm.h
>> @@ -268,6 +268,8 @@ enum mthp_stat_item {
>>    	MTHP_STAT_ANON_FAULT_ALLOC,
>>    	MTHP_STAT_ANON_FAULT_FALLBACK,
>>    	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
>> +	MTHP_STAT_ANON_SWPOUT,
>> +	MTHP_STAT_ANON_SWPOUT_FALLBACK,
>>    	__MTHP_STAT_COUNT
>>    };
>>    
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index dfc38cc83a04..58f2c4745d80 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -555,11 +555,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
>>    DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
>>    DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
>>    DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
>> +DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
>> +DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
>>    
>>    static struct attribute *stats_attrs[] = {
>>    	&anon_fault_alloc_attr.attr,
>>    	&anon_fault_fallback_attr.attr,
>>    	&anon_fault_fallback_charge_attr.attr,
>> +	&anon_swpout_attr.attr,
>> +	&anon_swpout_fallback_attr.attr,
>>    	NULL,
>>    };
>>    
>> diff --git a/mm/page_io.c b/mm/page_io.c
>> index a9a7c236aecc..46c603dddf04 100644
>> --- a/mm/page_io.c
>> +++ b/mm/page_io.c
>> @@ -217,6 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
>>    		count_memcg_folio_events(folio, THP_SWPOUT, 1);
>>    		count_vm_event(THP_SWPOUT);
>>    	}
>> +	count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
>>    #endif
>>    	count_vm_events(PSWPOUT, folio_nr_pages(folio));
>>    }
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index bca2d9981c95..49bd94423961 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -1231,6 +1231,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>>    						goto activate_locked;
>>    				}
>>    				if (!add_to_swap(folio)) {
>> +					int __maybe_unused order = folio_order(folio);
>> +
>>    					if (!folio_test_large(folio))
>>    						goto activate_locked_split;
>>    					/* Fallback to swap normal pages */
>> @@ -1242,6 +1244,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>>    							THP_SWPOUT_FALLBACK, 1);
>>    						count_vm_event(THP_SWPOUT_FALLBACK);
>>    					}
>> +					count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK);
> 
> Why the temporary variable for order?
> 
> count_mthp_stat(folio_order(order),
>                   MTHP_STAT_ANON_SWPOUT_FALLBACK);
> 
> ... but now I do wonder if we want to pass the folio to count_mthp_stat() ?

... and now realizing, that that doesn't make sense if we fail to 
allocate the folio in the first place. So all good.
diff mbox series

Patch

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index d4fdb2641070..7cd07b83a3d0 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -268,6 +268,8 @@  enum mthp_stat_item {
 	MTHP_STAT_ANON_FAULT_ALLOC,
 	MTHP_STAT_ANON_FAULT_FALLBACK,
 	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+	MTHP_STAT_ANON_SWPOUT,
+	MTHP_STAT_ANON_SWPOUT_FALLBACK,
 	__MTHP_STAT_COUNT
 };
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index dfc38cc83a04..58f2c4745d80 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -555,11 +555,15 @@  static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
+DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
+DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
 
 static struct attribute *stats_attrs[] = {
 	&anon_fault_alloc_attr.attr,
 	&anon_fault_fallback_attr.attr,
 	&anon_fault_fallback_charge_attr.attr,
+	&anon_swpout_attr.attr,
+	&anon_swpout_fallback_attr.attr,
 	NULL,
 };
 
diff --git a/mm/page_io.c b/mm/page_io.c
index a9a7c236aecc..46c603dddf04 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -217,6 +217,7 @@  static inline void count_swpout_vm_event(struct folio *folio)
 		count_memcg_folio_events(folio, THP_SWPOUT, 1);
 		count_vm_event(THP_SWPOUT);
 	}
+	count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
 #endif
 	count_vm_events(PSWPOUT, folio_nr_pages(folio));
 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bca2d9981c95..49bd94423961 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1231,6 +1231,8 @@  static unsigned int shrink_folio_list(struct list_head *folio_list,
 						goto activate_locked;
 				}
 				if (!add_to_swap(folio)) {
+					int __maybe_unused order = folio_order(folio);
+
 					if (!folio_test_large(folio))
 						goto activate_locked_split;
 					/* Fallback to swap normal pages */
@@ -1242,6 +1244,7 @@  static unsigned int shrink_folio_list(struct list_head *folio_list,
 							THP_SWPOUT_FALLBACK, 1);
 						count_vm_event(THP_SWPOUT_FALLBACK);
 					}
+					count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK);
 #endif
 					if (!add_to_swap(folio))
 						goto activate_locked_split;