diff mbox series

mm: memcg: add THP swap out info for anonymous reclaim

Message ID 20230909155242.22767-1-vernhao@tencent.com (mailing list archive)
State New
Headers show
Series mm: memcg: add THP swap out info for anonymous reclaim | expand

Commit Message

Vern Hao Sept. 9, 2023, 3:52 p.m. UTC
At present, we support per-memcg reclaim strategy, however we do not
know the number of transparent huge pages being reclaimed, as we know
the transparent huge pages need to be splited before reclaim them, and
they will bring some performance bottleneck effect. for example, when
two memcg (A & B) are doing reclaim for anonymous pages at same time,
and 'A' memcg is reclaiming a large number of transparent huge pages, we
can better analyze that the performance bottleneck will be caused by 'A'
memcg.  therefore, in order to better analyze such problems, there add
THP swap out info for per-memcg.

Signed-off-by: Xin Hao <vernhao@tencent.com>
---
 mm/memcontrol.c | 6 ++++++
 mm/page_io.c    | 4 +++-
 mm/vmscan.c     | 2 ++
 3 files changed, 11 insertions(+), 1 deletion(-)

Comments

Johannes Weiner Sept. 11, 2023, 4:08 p.m. UTC | #1
On Sat, Sep 09, 2023 at 11:52:41PM +0800, Xin Hao wrote:
> At present, we support per-memcg reclaim strategy, however we do not
> know the number of transparent huge pages being reclaimed, as we know
> the transparent huge pages need to be splited before reclaim them, and
> they will bring some performance bottleneck effect. for example, when
> two memcg (A & B) are doing reclaim for anonymous pages at same time,
> and 'A' memcg is reclaiming a large number of transparent huge pages, we
> can better analyze that the performance bottleneck will be caused by 'A'
> memcg.  therefore, in order to better analyze such problems, there add
> THP swap out info for per-memcg.
> 
> Signed-off-by: Xin Hao <vernhao@tencent.com>

That sounds reasonable. A few comments below:

> @@ -4131,6 +4133,10 @@ static const unsigned int memcg1_events[] = {
>  	PGPGOUT,
>  	PGFAULT,
>  	PGMAJFAULT,
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +	THP_SWPOUT,
> +	THP_SWPOUT_FALLBACK,
> +#endif
>  };

Cgroup1 is maintenance-only, please drop this hunk.

>  static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
> diff --git a/mm/page_io.c b/mm/page_io.c
> index fe4c21af23f2..008ada2e024a 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -208,8 +208,10 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
>  static inline void count_swpout_vm_event(struct folio *folio)
>  {
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	if (unlikely(folio_test_pmd_mappable(folio)))
> +	if (unlikely(folio_test_pmd_mappable(folio))) {
> +		count_memcg_events(folio_memcg(folio), THP_SWPOUT, 1);

count_memcg_folio_events()

>  		count_vm_event(THP_SWPOUT);
> +	}
>  #endif
>  	count_vm_events(PSWPOUT, folio_nr_pages(folio));
>  }
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index ea57a43ebd6b..29a82b72345a 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1928,6 +1928,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>  								folio_list))
>  						goto activate_locked;
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +					count_memcg_events(folio_memcg(folio),
> +							   THP_SWPOUT_FALLBACK, 1);

count_memcg_folio_events()
Vern Hao Sept. 12, 2023, 1:49 a.m. UTC | #2
在 2023/9/12 00:08, Johannes Weiner 写道:
> On Sat, Sep 09, 2023 at 11:52:41PM +0800, Xin Hao wrote:
>> At present, we support per-memcg reclaim strategy, however we do not
>> know the number of transparent huge pages being reclaimed, as we know
>> the transparent huge pages need to be splited before reclaim them, and
>> they will bring some performance bottleneck effect. for example, when
>> two memcg (A & B) are doing reclaim for anonymous pages at same time,
>> and 'A' memcg is reclaiming a large number of transparent huge pages, we
>> can better analyze that the performance bottleneck will be caused by 'A'
>> memcg.  therefore, in order to better analyze such problems, there add
>> THP swap out info for per-memcg.
>>
>> Signed-off-by: Xin Hao <vernhao@tencent.com>
> That sounds reasonable. A few comments below:
>
>> @@ -4131,6 +4133,10 @@ static const unsigned int memcg1_events[] = {
>>   	PGPGOUT,
>>   	PGFAULT,
>>   	PGMAJFAULT,
>> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> +	THP_SWPOUT,
>> +	THP_SWPOUT_FALLBACK,
>> +#endif
>>   };
> Cgroup1 is maintenance-only, please drop this hunk.
Will remove it next version thanks.
>
>>   static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
>> diff --git a/mm/page_io.c b/mm/page_io.c
>> index fe4c21af23f2..008ada2e024a 100644
>> --- a/mm/page_io.c
>> +++ b/mm/page_io.c
>> @@ -208,8 +208,10 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
>>   static inline void count_swpout_vm_event(struct folio *folio)
>>   {
>>   #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> -	if (unlikely(folio_test_pmd_mappable(folio)))
>> +	if (unlikely(folio_test_pmd_mappable(folio))) {
>> +		count_memcg_events(folio_memcg(folio), THP_SWPOUT, 1);
> count_memcg_folio_events()
Done.
>
>>   		count_vm_event(THP_SWPOUT);
>> +	}
>>   #endif
>>   	count_vm_events(PSWPOUT, folio_nr_pages(folio));
>>   }
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index ea57a43ebd6b..29a82b72345a 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -1928,6 +1928,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
>>   								folio_list))
>>   						goto activate_locked;
>>   #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> +					count_memcg_events(folio_memcg(folio),
>> +							   THP_SWPOUT_FALLBACK, 1);
> count_memcg_folio_events()

Done.

thanks.
diff mbox series

Patch

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ecc07b47e813..a644f601e2ca 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -752,6 +752,8 @@  static const unsigned int memcg_vm_event_stat[] = {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	THP_FAULT_ALLOC,
 	THP_COLLAPSE_ALLOC,
+	THP_SWPOUT,
+	THP_SWPOUT_FALLBACK,
 #endif
 };
 
@@ -4131,6 +4133,10 @@  static const unsigned int memcg1_events[] = {
 	PGPGOUT,
 	PGFAULT,
 	PGMAJFAULT,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	THP_SWPOUT,
+	THP_SWPOUT_FALLBACK,
+#endif
 };
 
 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
diff --git a/mm/page_io.c b/mm/page_io.c
index fe4c21af23f2..008ada2e024a 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -208,8 +208,10 @@  int swap_writepage(struct page *page, struct writeback_control *wbc)
 static inline void count_swpout_vm_event(struct folio *folio)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	if (unlikely(folio_test_pmd_mappable(folio)))
+	if (unlikely(folio_test_pmd_mappable(folio))) {
+		count_memcg_events(folio_memcg(folio), THP_SWPOUT, 1);
 		count_vm_event(THP_SWPOUT);
+	}
 #endif
 	count_vm_events(PSWPOUT, folio_nr_pages(folio));
 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ea57a43ebd6b..29a82b72345a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1928,6 +1928,8 @@  static unsigned int shrink_folio_list(struct list_head *folio_list,
 								folio_list))
 						goto activate_locked;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+					count_memcg_events(folio_memcg(folio),
+							   THP_SWPOUT_FALLBACK, 1);
 					count_vm_event(THP_SWPOUT_FALLBACK);
 #endif
 					if (!add_to_swap(folio))