diff mbox series

[v2,7/7] mm: shmem: add mTHP counters for anonymous shmem

Message ID b9babe1857917ff217ed0988d95ba3468845479c.1715571279.git.baolin.wang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series add mTHP support for anonymous shmem | expand

Commit Message

Baolin Wang May 13, 2024, 5:08 a.m. UTC
Add mTHP counters for anonymous shmem.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
 include/linux/huge_mm.h |  3 +++
 mm/huge_memory.c        |  6 ++++++
 mm/shmem.c              | 18 +++++++++++++++---
 3 files changed, 24 insertions(+), 3 deletions(-)

Comments

Lance Yang May 14, 2024, 2:49 p.m. UTC | #1
Hi Baolin,

On Mon, May 13, 2024 at 1:08 PM Baolin Wang
<baolin.wang@linux.alibaba.com> wrote:
>
> Add mTHP counters for anonymous shmem.
>
> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> ---
>  include/linux/huge_mm.h |  3 +++
>  mm/huge_memory.c        |  6 ++++++
>  mm/shmem.c              | 18 +++++++++++++++---
>  3 files changed, 24 insertions(+), 3 deletions(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index b5339210268d..e162498fef82 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -281,6 +281,9 @@ enum mthp_stat_item {
>         MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
>         MTHP_STAT_ANON_SWPOUT,
>         MTHP_STAT_ANON_SWPOUT_FALLBACK,
> +       MTHP_STAT_FILE_ALLOC,
> +       MTHP_STAT_FILE_FALLBACK,
> +       MTHP_STAT_FILE_FALLBACK_CHARGE,
>         __MTHP_STAT_COUNT
>  };
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index d3080a8843f2..fcda6ae604f6 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -555,6 +555,9 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
>  DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
>  DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
>  DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
> +DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC);
> +DEFINE_MTHP_STAT_ATTR(file_fallback, MTHP_STAT_FILE_FALLBACK);
> +DEFINE_MTHP_STAT_ATTR(file_fallback_charge, MTHP_STAT_FILE_FALLBACK_CHARGE);
>
>  static struct attribute *stats_attrs[] = {
>         &anon_fault_alloc_attr.attr,
> @@ -562,6 +565,9 @@ static struct attribute *stats_attrs[] = {
>         &anon_fault_fallback_charge_attr.attr,
>         &anon_swpout_attr.attr,
>         &anon_swpout_fallback_attr.attr,
> +       &file_alloc_attr.attr,
> +       &file_fallback_attr.attr,
> +       &file_fallback_charge_attr.attr,
>         NULL,
>  };
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 8b020ff09c72..fd2cb2e73a21 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1786,6 +1786,9 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
>
>                         if (pages == HPAGE_PMD_NR)
>                                 count_vm_event(THP_FILE_FALLBACK);
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +                       count_mthp_stat(order, MTHP_STAT_FILE_FALLBACK);
> +#endif

Seems like we don't need these conditional compilation directives here.

The THP_FILE_FALLBACK above will result in a compilation error if
CONFIG_TRANSPARENT_HUGEPAGE is not defined. So we don't
worry about that :)

See THP_FILE_FALLBACK in include/linux/vm_event_item.h.

>                         order = next_order(&suitable_orders, order);
>                 }
>         } else {
> @@ -1805,9 +1808,15 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
>                 if (xa_find(&mapping->i_pages, &index,
>                                 index + pages - 1, XA_PRESENT)) {
>                         error = -EEXIST;
> -               } else if (pages == HPAGE_PMD_NR) {
> -                       count_vm_event(THP_FILE_FALLBACK);
> -                       count_vm_event(THP_FILE_FALLBACK_CHARGE);
> +               } else if (pages > 1) {
> +                       if (pages == HPAGE_PMD_NR) {
> +                               count_vm_event(THP_FILE_FALLBACK);
> +                               count_vm_event(THP_FILE_FALLBACK_CHARGE);
> +                       }
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +                       count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK);
> +                       count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK_CHARGE);
> +#endif

As above.

>                 }
>                 goto unlock;
>         }
> @@ -2178,6 +2187,9 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
>                 if (!IS_ERR(folio)) {
>                         if (folio_test_pmd_mappable(folio))
>                                 count_vm_event(THP_FILE_ALLOC);
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +                       count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_ALLOC);
> +#endif

As above.

Perhaps we need to define MTHP_STAT_FILE_ALLOC and friends
using a same way as THP_FILE_ALLOC, set as '{ BUILD_BUG(); 0; }'
If CONFIG_TRANSPARENT_HUGEPAGE is not defined.

Likely:

#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define MTHP_STAT_FILE_ALLOC ({ BUILD_BUG(); 0; })
...
#endif

Thanks,
Lance


>                         goto alloced;
>                 }
>                 if (PTR_ERR(folio) == -EEXIST)
> --
> 2.39.3
>
Baolin Wang May 15, 2024, 3:14 a.m. UTC | #2
On 2024/5/14 22:49, Lance Yang wrote:
> Hi Baolin,
> 
> On Mon, May 13, 2024 at 1:08 PM Baolin Wang
> <baolin.wang@linux.alibaba.com> wrote:
>>
>> Add mTHP counters for anonymous shmem.
>>
>> Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>> ---
>>   include/linux/huge_mm.h |  3 +++
>>   mm/huge_memory.c        |  6 ++++++
>>   mm/shmem.c              | 18 +++++++++++++++---
>>   3 files changed, 24 insertions(+), 3 deletions(-)
>>
>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>> index b5339210268d..e162498fef82 100644
>> --- a/include/linux/huge_mm.h
>> +++ b/include/linux/huge_mm.h
>> @@ -281,6 +281,9 @@ enum mthp_stat_item {
>>          MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
>>          MTHP_STAT_ANON_SWPOUT,
>>          MTHP_STAT_ANON_SWPOUT_FALLBACK,
>> +       MTHP_STAT_FILE_ALLOC,
>> +       MTHP_STAT_FILE_FALLBACK,
>> +       MTHP_STAT_FILE_FALLBACK_CHARGE,
>>          __MTHP_STAT_COUNT
>>   };
>>
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index d3080a8843f2..fcda6ae604f6 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -555,6 +555,9 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
>>   DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
>>   DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
>>   DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
>> +DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC);
>> +DEFINE_MTHP_STAT_ATTR(file_fallback, MTHP_STAT_FILE_FALLBACK);
>> +DEFINE_MTHP_STAT_ATTR(file_fallback_charge, MTHP_STAT_FILE_FALLBACK_CHARGE);
>>
>>   static struct attribute *stats_attrs[] = {
>>          &anon_fault_alloc_attr.attr,
>> @@ -562,6 +565,9 @@ static struct attribute *stats_attrs[] = {
>>          &anon_fault_fallback_charge_attr.attr,
>>          &anon_swpout_attr.attr,
>>          &anon_swpout_fallback_attr.attr,
>> +       &file_alloc_attr.attr,
>> +       &file_fallback_attr.attr,
>> +       &file_fallback_charge_attr.attr,
>>          NULL,
>>   };
>>
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index 8b020ff09c72..fd2cb2e73a21 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -1786,6 +1786,9 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
>>
>>                          if (pages == HPAGE_PMD_NR)
>>                                  count_vm_event(THP_FILE_FALLBACK);
>> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> +                       count_mthp_stat(order, MTHP_STAT_FILE_FALLBACK);
>> +#endif
> 
> Seems like we don't need these conditional compilation directives here.
> 
> The THP_FILE_FALLBACK above will result in a compilation error if
> CONFIG_TRANSPARENT_HUGEPAGE is not defined. So we don't
> worry about that :)
> 
> See THP_FILE_FALLBACK in include/linux/vm_event_item.h.
> 
>>                          order = next_order(&suitable_orders, order);
>>                  }
>>          } else {
>> @@ -1805,9 +1808,15 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
>>                  if (xa_find(&mapping->i_pages, &index,
>>                                  index + pages - 1, XA_PRESENT)) {
>>                          error = -EEXIST;
>> -               } else if (pages == HPAGE_PMD_NR) {
>> -                       count_vm_event(THP_FILE_FALLBACK);
>> -                       count_vm_event(THP_FILE_FALLBACK_CHARGE);
>> +               } else if (pages > 1) {
>> +                       if (pages == HPAGE_PMD_NR) {
>> +                               count_vm_event(THP_FILE_FALLBACK);
>> +                               count_vm_event(THP_FILE_FALLBACK_CHARGE);
>> +                       }
>> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> +                       count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK);
>> +                       count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK_CHARGE);
>> +#endif
> 
> As above.
> 
>>                  }
>>                  goto unlock;
>>          }
>> @@ -2178,6 +2187,9 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
>>                  if (!IS_ERR(folio)) {
>>                          if (folio_test_pmd_mappable(folio))
>>                                  count_vm_event(THP_FILE_ALLOC);
>> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> +                       count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_ALLOC);
>> +#endif
> 
> As above.
> 
> Perhaps we need to define MTHP_STAT_FILE_ALLOC and friends
> using a same way as THP_FILE_ALLOC, set as '{ BUILD_BUG(); 0; }'
> If CONFIG_TRANSPARENT_HUGEPAGE is not defined.
> 
> Likely:
> 
> #ifndef CONFIG_TRANSPARENT_HUGEPAGE
> #define MTHP_STAT_FILE_ALLOC ({ BUILD_BUG(); 0; })
> ...
> #endif

This is not enough, and we should also define a dummy function for 
count_mthp_stat() when CONFIG_TRANSPARENT_HUGEPAGE is not enabled. I was 
also hesitant about doing this before, but adding macro controls seems 
relatively simple:)

Thanks for reviewing.
diff mbox series

Patch

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b5339210268d..e162498fef82 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -281,6 +281,9 @@  enum mthp_stat_item {
 	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
 	MTHP_STAT_ANON_SWPOUT,
 	MTHP_STAT_ANON_SWPOUT_FALLBACK,
+	MTHP_STAT_FILE_ALLOC,
+	MTHP_STAT_FILE_FALLBACK,
+	MTHP_STAT_FILE_FALLBACK_CHARGE,
 	__MTHP_STAT_COUNT
 };
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d3080a8843f2..fcda6ae604f6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -555,6 +555,9 @@  DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
 DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
 DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC);
+DEFINE_MTHP_STAT_ATTR(file_fallback, MTHP_STAT_FILE_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(file_fallback_charge, MTHP_STAT_FILE_FALLBACK_CHARGE);
 
 static struct attribute *stats_attrs[] = {
 	&anon_fault_alloc_attr.attr,
@@ -562,6 +565,9 @@  static struct attribute *stats_attrs[] = {
 	&anon_fault_fallback_charge_attr.attr,
 	&anon_swpout_attr.attr,
 	&anon_swpout_fallback_attr.attr,
+	&file_alloc_attr.attr,
+	&file_fallback_attr.attr,
+	&file_fallback_charge_attr.attr,
 	NULL,
 };
 
diff --git a/mm/shmem.c b/mm/shmem.c
index 8b020ff09c72..fd2cb2e73a21 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1786,6 +1786,9 @@  static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
 
 			if (pages == HPAGE_PMD_NR)
 				count_vm_event(THP_FILE_FALLBACK);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+			count_mthp_stat(order, MTHP_STAT_FILE_FALLBACK);
+#endif
 			order = next_order(&suitable_orders, order);
 		}
 	} else {
@@ -1805,9 +1808,15 @@  static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
 		if (xa_find(&mapping->i_pages, &index,
 				index + pages - 1, XA_PRESENT)) {
 			error = -EEXIST;
-		} else if (pages == HPAGE_PMD_NR) {
-			count_vm_event(THP_FILE_FALLBACK);
-			count_vm_event(THP_FILE_FALLBACK_CHARGE);
+		} else if (pages > 1) {
+			if (pages == HPAGE_PMD_NR) {
+				count_vm_event(THP_FILE_FALLBACK);
+				count_vm_event(THP_FILE_FALLBACK_CHARGE);
+			}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+			count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK);
+			count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK_CHARGE);
+#endif
 		}
 		goto unlock;
 	}
@@ -2178,6 +2187,9 @@  static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
 		if (!IS_ERR(folio)) {
 			if (folio_test_pmd_mappable(folio))
 				count_vm_event(THP_FILE_ALLOC);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+			count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_ALLOC);
+#endif
 			goto alloced;
 		}
 		if (PTR_ERR(folio) == -EEXIST)