diff mbox series

[v5,1/2] hugetlb: arm64: add mte support

Message ID 20240913183404.3517556-1-yang@os.amperecomputing.com (mailing list archive)
State New, archived
Headers show
Series [v5,1/2] hugetlb: arm64: add mte support | expand

Commit Message

Yang Shi Sept. 13, 2024, 6:34 p.m. UTC
Enable MTE support for hugetlb.

The MTE page flags will be set on the folio only.  When copying
hugetlb folio (for example, CoW), the tags for all subpages will be copied
when copying the first subpage.

When freeing hugetlb folio, the MTE flags will be cleared.

Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
---
 arch/arm64/include/asm/hugetlb.h |  8 ++++
 arch/arm64/include/asm/mman.h    |  3 +-
 arch/arm64/include/asm/mte.h     | 67 ++++++++++++++++++++++++++++++++
 arch/arm64/kernel/hibernate.c    |  6 +++
 arch/arm64/kernel/mte.c          | 27 ++++++++++++-
 arch/arm64/kvm/guest.c           | 16 ++++++--
 arch/arm64/kvm/mmu.c             | 11 ++++++
 arch/arm64/mm/copypage.c         | 27 ++++++++++++-
 fs/hugetlbfs/inode.c             |  2 +-
 9 files changed, 159 insertions(+), 8 deletions(-)

v5: * Indentation fix and renaming per Catalin.
v4: * Fixed the comment from David.
v3: * Fixed the build error when !CONFIG_ARM64_MTE.
    * Incorporated the comment from David to have hugetlb folio
      specific APIs for manipulating the page flags.
    * Don't assume the first page is the head page since huge page copy
      can start from any subpage.
v2: * Reimplemented the patch to fix the comments from Catalin.
    * Added test cases (patch #2) per Catalin.

Comments

Catalin Marinas Sept. 16, 2024, 3:34 p.m. UTC | #1
On Fri, Sep 13, 2024 at 11:34:03AM -0700, Yang Shi wrote:
> Enable MTE support for hugetlb.
> 
> The MTE page flags will be set on the folio only.  When copying
> hugetlb folio (for example, CoW), the tags for all subpages will be copied
> when copying the first subpage.
> 
> When freeing hugetlb folio, the MTE flags will be cleared.
> 
> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>

The patch looks fine to me:

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>

You may want to rebase and repost after the merging window.
David Hildenbrand Sept. 17, 2024, 10:36 a.m. UTC | #2
On 13.09.24 20:34, Yang Shi wrote:
> Enable MTE support for hugetlb.
> 
> The MTE page flags will be set on the folio only.  When copying
> hugetlb folio (for example, CoW), the tags for all subpages will be copied
> when copying the first subpage.
> 
> When freeing hugetlb folio, the MTE flags will be cleared.
> 
> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
> ---
>   arch/arm64/include/asm/hugetlb.h |  8 ++++
>   arch/arm64/include/asm/mman.h    |  3 +-
>   arch/arm64/include/asm/mte.h     | 67 ++++++++++++++++++++++++++++++++
>   arch/arm64/kernel/hibernate.c    |  6 +++
>   arch/arm64/kernel/mte.c          | 27 ++++++++++++-
>   arch/arm64/kvm/guest.c           | 16 ++++++--
>   arch/arm64/kvm/mmu.c             | 11 ++++++
>   arch/arm64/mm/copypage.c         | 27 ++++++++++++-
>   fs/hugetlbfs/inode.c             |  2 +-
>   9 files changed, 159 insertions(+), 8 deletions(-)
> 
> v5: * Indentation fix and renaming per Catalin.
> v4: * Fixed the comment from David.
> v3: * Fixed the build error when !CONFIG_ARM64_MTE.
>      * Incorporated the comment from David to have hugetlb folio
>        specific APIs for manipulating the page flags.
>      * Don't assume the first page is the head page since huge page copy
>        can start from any subpage.
> v2: * Reimplemented the patch to fix the comments from Catalin.
>      * Added test cases (patch #2) per Catalin.
> 
> diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
> index 293f880865e8..c6dff3e69539 100644
> --- a/arch/arm64/include/asm/hugetlb.h
> +++ b/arch/arm64/include/asm/hugetlb.h
> @@ -11,6 +11,7 @@
>   #define __ASM_HUGETLB_H
>   
>   #include <asm/cacheflush.h>
> +#include <asm/mte.h>
>   #include <asm/page.h>
>   
>   #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
> @@ -21,6 +22,13 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
>   static inline void arch_clear_hugetlb_flags(struct folio *folio)
>   {
>   	clear_bit(PG_dcache_clean, &folio->flags);
> +
> +#ifdef CONFIG_ARM64_MTE
> +	if (system_supports_mte()) {
> +		clear_bit(PG_mte_tagged, &folio->flags);
> +		clear_bit(PG_mte_lock, &folio->flags);
> +	}
> +#endif
>   }
>   #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
>   
> diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
> index 5966ee4a6154..304dfc499e68 100644
> --- a/arch/arm64/include/asm/mman.h
> +++ b/arch/arm64/include/asm/mman.h
> @@ -28,7 +28,8 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
>   	 * backed by tags-capable memory. The vm_flags may be overridden by a
>   	 * filesystem supporting MTE (RAM-based).
>   	 */
> -	if (system_supports_mte() && (flags & MAP_ANONYMOUS))
> +	if (system_supports_mte() &&
> +	    (flags & (MAP_ANONYMOUS | MAP_HUGETLB)))
>   		return VM_MTE_ALLOWED;
>   
>   	return 0;
> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
> index 0f84518632b4..03dc43636aba 100644
> --- a/arch/arm64/include/asm/mte.h
> +++ b/arch/arm64/include/asm/mte.h
> @@ -41,6 +41,8 @@ void mte_free_tag_storage(char *storage);
>   
>   static inline void set_page_mte_tagged(struct page *page)
>   {
> +	VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
> +
>   	/*
>   	 * Ensure that the tags written prior to this function are visible
>   	 * before the page flags update.
> @@ -51,6 +53,8 @@ static inline void set_page_mte_tagged(struct page *page)
>   
>   static inline bool page_mte_tagged(struct page *page)
>   {
> +	VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
> +
>   	bool ret = test_bit(PG_mte_tagged, &page->flags);
>   
>   	/*
> @@ -76,6 +80,8 @@ static inline bool page_mte_tagged(struct page *page)
>    */
>   static inline bool try_page_mte_tagging(struct page *page)
>   {
> +	VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
> +
>   	if (!test_and_set_bit(PG_mte_lock, &page->flags))
>   		return true;
>   
> @@ -157,6 +163,67 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
>   
>   #endif /* CONFIG_ARM64_MTE */
>   
> +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_ARM64_MTE)
> +static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
> +{
> +	VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
> +
> +	/*
> +	 * Ensure that the tags written prior to this function are visible
> +	 * before the folio flags update.
> +	 */
> +	smp_wmb();
> +	set_bit(PG_mte_tagged, &folio->flags);
> +
> +}
> +
> +static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
> +{
> +	VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
> +
> +	bool ret = test_bit(PG_mte_tagged, &folio->flags);

Nit: VM_WARN_ should come after "bool ret" ...

> +
> +	/*
> +	 * If the folio is tagged, ensure ordering with a likely subsequent
> +	 * read of the tags.
> +	 */
> +	if (ret)
> +		smp_rmb();
> +	return ret;
> +}
> +

Reviewed-by: David Hildenbrand <david@redhat.com>
Yang Shi Sept. 19, 2024, 7:48 a.m. UTC | #3
On 9/16/24 8:34 AM, Catalin Marinas wrote:
> On Fri, Sep 13, 2024 at 11:34:03AM -0700, Yang Shi wrote:
>> Enable MTE support for hugetlb.
>>
>> The MTE page flags will be set on the folio only.  When copying
>> hugetlb folio (for example, CoW), the tags for all subpages will be copied
>> when copying the first subpage.
>>
>> When freeing hugetlb folio, the MTE flags will be cleared.
>>
>> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
> The patch looks fine to me:
>
> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
>
> You may want to rebase and repost after the merging window.

Thank you. Will rebase and repost it after the merge window.
Yang Shi Sept. 19, 2024, 7:49 a.m. UTC | #4
On 9/17/24 3:36 AM, David Hildenbrand wrote:
> On 13.09.24 20:34, Yang Shi wrote:
>> Enable MTE support for hugetlb.
>>
>> The MTE page flags will be set on the folio only.  When copying
>> hugetlb folio (for example, CoW), the tags for all subpages will be 
>> copied
>> when copying the first subpage.
>>
>> When freeing hugetlb folio, the MTE flags will be cleared.
>>
>> Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
>> ---
>>   arch/arm64/include/asm/hugetlb.h |  8 ++++
>>   arch/arm64/include/asm/mman.h    |  3 +-
>>   arch/arm64/include/asm/mte.h     | 67 ++++++++++++++++++++++++++++++++
>>   arch/arm64/kernel/hibernate.c    |  6 +++
>>   arch/arm64/kernel/mte.c          | 27 ++++++++++++-
>>   arch/arm64/kvm/guest.c           | 16 ++++++--
>>   arch/arm64/kvm/mmu.c             | 11 ++++++
>>   arch/arm64/mm/copypage.c         | 27 ++++++++++++-
>>   fs/hugetlbfs/inode.c             |  2 +-
>>   9 files changed, 159 insertions(+), 8 deletions(-)
>>
>> v5: * Indentation fix and renaming per Catalin.
>> v4: * Fixed the comment from David.
>> v3: * Fixed the build error when !CONFIG_ARM64_MTE.
>>      * Incorporated the comment from David to have hugetlb folio
>>        specific APIs for manipulating the page flags.
>>      * Don't assume the first page is the head page since huge page copy
>>        can start from any subpage.
>> v2: * Reimplemented the patch to fix the comments from Catalin.
>>      * Added test cases (patch #2) per Catalin.
>>
>> diff --git a/arch/arm64/include/asm/hugetlb.h 
>> b/arch/arm64/include/asm/hugetlb.h
>> index 293f880865e8..c6dff3e69539 100644
>> --- a/arch/arm64/include/asm/hugetlb.h
>> +++ b/arch/arm64/include/asm/hugetlb.h
>> @@ -11,6 +11,7 @@
>>   #define __ASM_HUGETLB_H
>>     #include <asm/cacheflush.h>
>> +#include <asm/mte.h>
>>   #include <asm/page.h>
>>     #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
>> @@ -21,6 +22,13 @@ extern bool 
>> arch_hugetlb_migration_supported(struct hstate *h);
>>   static inline void arch_clear_hugetlb_flags(struct folio *folio)
>>   {
>>       clear_bit(PG_dcache_clean, &folio->flags);
>> +
>> +#ifdef CONFIG_ARM64_MTE
>> +    if (system_supports_mte()) {
>> +        clear_bit(PG_mte_tagged, &folio->flags);
>> +        clear_bit(PG_mte_lock, &folio->flags);
>> +    }
>> +#endif
>>   }
>>   #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
>>   diff --git a/arch/arm64/include/asm/mman.h 
>> b/arch/arm64/include/asm/mman.h
>> index 5966ee4a6154..304dfc499e68 100644
>> --- a/arch/arm64/include/asm/mman.h
>> +++ b/arch/arm64/include/asm/mman.h
>> @@ -28,7 +28,8 @@ static inline unsigned long 
>> arch_calc_vm_flag_bits(unsigned long flags)
>>        * backed by tags-capable memory. The vm_flags may be 
>> overridden by a
>>        * filesystem supporting MTE (RAM-based).
>>        */
>> -    if (system_supports_mte() && (flags & MAP_ANONYMOUS))
>> +    if (system_supports_mte() &&
>> +        (flags & (MAP_ANONYMOUS | MAP_HUGETLB)))
>>           return VM_MTE_ALLOWED;
>>         return 0;
>> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
>> index 0f84518632b4..03dc43636aba 100644
>> --- a/arch/arm64/include/asm/mte.h
>> +++ b/arch/arm64/include/asm/mte.h
>> @@ -41,6 +41,8 @@ void mte_free_tag_storage(char *storage);
>>     static inline void set_page_mte_tagged(struct page *page)
>>   {
>> +    VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
>> +
>>       /*
>>        * Ensure that the tags written prior to this function are visible
>>        * before the page flags update.
>> @@ -51,6 +53,8 @@ static inline void set_page_mte_tagged(struct page 
>> *page)
>>     static inline bool page_mte_tagged(struct page *page)
>>   {
>> +    VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
>> +
>>       bool ret = test_bit(PG_mte_tagged, &page->flags);
>>         /*
>> @@ -76,6 +80,8 @@ static inline bool page_mte_tagged(struct page *page)
>>    */
>>   static inline bool try_page_mte_tagging(struct page *page)
>>   {
>> +    VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
>> +
>>       if (!test_and_set_bit(PG_mte_lock, &page->flags))
>>           return true;
>>   @@ -157,6 +163,67 @@ static inline int mte_ptrace_copy_tags(struct 
>> task_struct *child,
>>     #endif /* CONFIG_ARM64_MTE */
>>   +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_ARM64_MTE)
>> +static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
>> +{
>> +    VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
>> +
>> +    /*
>> +     * Ensure that the tags written prior to this function are visible
>> +     * before the folio flags update.
>> +     */
>> +    smp_wmb();
>> +    set_bit(PG_mte_tagged, &folio->flags);
>> +
>> +}
>> +
>> +static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
>> +{
>> +    VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
>> +
>> +    bool ret = test_bit(PG_mte_tagged, &folio->flags);
>
> Nit: VM_WARN_ should come after "bool ret" ...
>
>> +
>> +    /*
>> +     * If the folio is tagged, ensure ordering with a likely subsequent
>> +     * read of the tags.
>> +     */
>> +    if (ret)
>> +        smp_rmb();
>> +    return ret;
>> +}
>> +
>
> Reviewed-by: David Hildenbrand <david@redhat.com>

Thanks. Will fix the nit when I rebase the patch after the merge window.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 293f880865e8..c6dff3e69539 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -11,6 +11,7 @@ 
 #define __ASM_HUGETLB_H
 
 #include <asm/cacheflush.h>
+#include <asm/mte.h>
 #include <asm/page.h>
 
 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
@@ -21,6 +22,13 @@  extern bool arch_hugetlb_migration_supported(struct hstate *h);
 static inline void arch_clear_hugetlb_flags(struct folio *folio)
 {
 	clear_bit(PG_dcache_clean, &folio->flags);
+
+#ifdef CONFIG_ARM64_MTE
+	if (system_supports_mte()) {
+		clear_bit(PG_mte_tagged, &folio->flags);
+		clear_bit(PG_mte_lock, &folio->flags);
+	}
+#endif
 }
 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
 
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
index 5966ee4a6154..304dfc499e68 100644
--- a/arch/arm64/include/asm/mman.h
+++ b/arch/arm64/include/asm/mman.h
@@ -28,7 +28,8 @@  static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
 	 * backed by tags-capable memory. The vm_flags may be overridden by a
 	 * filesystem supporting MTE (RAM-based).
 	 */
-	if (system_supports_mte() && (flags & MAP_ANONYMOUS))
+	if (system_supports_mte() &&
+	    (flags & (MAP_ANONYMOUS | MAP_HUGETLB)))
 		return VM_MTE_ALLOWED;
 
 	return 0;
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 0f84518632b4..03dc43636aba 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -41,6 +41,8 @@  void mte_free_tag_storage(char *storage);
 
 static inline void set_page_mte_tagged(struct page *page)
 {
+	VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
+
 	/*
 	 * Ensure that the tags written prior to this function are visible
 	 * before the page flags update.
@@ -51,6 +53,8 @@  static inline void set_page_mte_tagged(struct page *page)
 
 static inline bool page_mte_tagged(struct page *page)
 {
+	VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
+
 	bool ret = test_bit(PG_mte_tagged, &page->flags);
 
 	/*
@@ -76,6 +80,8 @@  static inline bool page_mte_tagged(struct page *page)
  */
 static inline bool try_page_mte_tagging(struct page *page)
 {
+	VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
+
 	if (!test_and_set_bit(PG_mte_lock, &page->flags))
 		return true;
 
@@ -157,6 +163,67 @@  static inline int mte_ptrace_copy_tags(struct task_struct *child,
 
 #endif /* CONFIG_ARM64_MTE */
 
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_ARM64_MTE)
+static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
+{
+	VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
+
+	/*
+	 * Ensure that the tags written prior to this function are visible
+	 * before the folio flags update.
+	 */
+	smp_wmb();
+	set_bit(PG_mte_tagged, &folio->flags);
+
+}
+
+static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
+{
+	VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
+
+	bool ret = test_bit(PG_mte_tagged, &folio->flags);
+
+	/*
+	 * If the folio is tagged, ensure ordering with a likely subsequent
+	 * read of the tags.
+	 */
+	if (ret)
+		smp_rmb();
+	return ret;
+}
+
+static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
+{
+	VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
+
+	if (!test_and_set_bit(PG_mte_lock, &folio->flags))
+		return true;
+
+	/*
+	 * The tags are either being initialised or may have been initialised
+	 * already. Check if the PG_mte_tagged flag has been set or wait
+	 * otherwise.
+	 */
+	smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
+
+	return false;
+}
+#else
+static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
+{
+}
+
+static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
+{
+	return false;
+}
+
+static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
+{
+	return false;
+}
+#endif
+
 static inline void mte_disable_tco_entry(struct task_struct *task)
 {
 	if (!system_supports_mte())
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 02870beb271e..74626d5cdf1d 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -266,9 +266,15 @@  static int swsusp_mte_save_tags(void)
 		max_zone_pfn = zone_end_pfn(zone);
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
 			struct page *page = pfn_to_online_page(pfn);
+			struct folio *folio;
 
 			if (!page)
 				continue;
+			folio = page_folio(page);
+
+			if (folio_test_hugetlb(folio) &&
+			    !folio_test_hugetlb_mte_tagged(folio))
+				continue;
 
 			if (!page_mte_tagged(page))
 				continue;
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 6174671be7c1..2fbfd27ff5f2 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -38,7 +38,24 @@  EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
 void mte_sync_tags(pte_t pte, unsigned int nr_pages)
 {
 	struct page *page = pte_page(pte);
-	unsigned int i;
+	struct folio *folio = page_folio(page);
+	unsigned long i;
+
+	if (folio_test_hugetlb(folio)) {
+		unsigned long nr = folio_nr_pages(folio);
+
+		/* Hugetlb MTE flags are set for head page only */
+		if (folio_try_hugetlb_mte_tagging(folio)) {
+			for (i = 0; i < nr; i++, page++)
+				mte_clear_page_tags(page_address(page));
+			folio_set_hugetlb_mte_tagged(folio);
+		}
+
+		/* ensure the tags are visible before the PTE is set */
+		smp_wmb();
+
+		return;
+	}
 
 	/* if PG_mte_tagged is set, tags have already been initialised */
 	for (i = 0; i < nr_pages; i++, page++) {
@@ -410,6 +427,7 @@  static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
 		void *maddr;
 		struct page *page = get_user_page_vma_remote(mm, addr,
 							     gup_flags, &vma);
+		struct folio *folio;
 
 		if (IS_ERR(page)) {
 			err = PTR_ERR(page);
@@ -428,7 +446,12 @@  static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
 			put_page(page);
 			break;
 		}
-		WARN_ON_ONCE(!page_mte_tagged(page));
+
+		folio = page_folio(page);
+		if (folio_test_hugetlb(folio))
+			WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio));
+		else
+			WARN_ON_ONCE(!page_mte_tagged(page));
 
 		/* limit access to the end of the page */
 		offset = offset_in_page(addr);
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 11098eb7eb44..ed9b3f31f3ab 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -1050,6 +1050,7 @@  int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
 		void *maddr;
 		unsigned long num_tags;
 		struct page *page;
+		struct folio *folio;
 
 		if (is_error_noslot_pfn(pfn)) {
 			ret = -EFAULT;
@@ -1062,10 +1063,13 @@  int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
 			ret = -EFAULT;
 			goto out;
 		}
+		folio = page_folio(page);
 		maddr = page_address(page);
 
 		if (!write) {
-			if (page_mte_tagged(page))
+			if ((folio_test_hugetlb(folio) &&
+			     folio_test_hugetlb_mte_tagged(folio)) ||
+			     page_mte_tagged(page))
 				num_tags = mte_copy_tags_to_user(tags, maddr,
 							MTE_GRANULES_PER_PAGE);
 			else
@@ -1079,14 +1083,20 @@  int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
 			 * __set_ptes() in the VMM but still overriding the
 			 * tags, hence ignoring the return value.
 			 */
-			try_page_mte_tagging(page);
+			if (folio_test_hugetlb(folio))
+				folio_try_hugetlb_mte_tagging(folio);
+			else
+				try_page_mte_tagging(page);
 			num_tags = mte_copy_tags_from_user(maddr, tags,
 							MTE_GRANULES_PER_PAGE);
 
 			/* uaccess failed, don't leave stale tags */
 			if (num_tags != MTE_GRANULES_PER_PAGE)
 				mte_clear_page_tags(maddr);
-			set_page_mte_tagged(page);
+			if (folio_test_hugetlb(folio))
+				folio_set_hugetlb_mte_tagged(folio);
+			else
+				set_page_mte_tagged(page);
 
 			kvm_release_pfn_dirty(pfn);
 		}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index a509b63bd4dd..962449f9ac2f 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1401,10 +1401,21 @@  static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
 {
 	unsigned long i, nr_pages = size >> PAGE_SHIFT;
 	struct page *page = pfn_to_page(pfn);
+	struct folio *folio = page_folio(page);
 
 	if (!kvm_has_mte(kvm))
 		return;
 
+	if (folio_test_hugetlb(folio)) {
+		/* Hugetlb has MTE flags set on head page only */
+		if (folio_try_hugetlb_mte_tagging(folio)) {
+			for (i = 0; i < nr_pages; i++, page++)
+				mte_clear_page_tags(page_address(page));
+			folio_set_hugetlb_mte_tagged(folio);
+		}
+		return;
+	}
+
 	for (i = 0; i < nr_pages; i++, page++) {
 		if (try_page_mte_tagging(page)) {
 			mte_clear_page_tags(page_address(page));
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index a7bb20055ce0..87b3f1a25535 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -18,15 +18,40 @@  void copy_highpage(struct page *to, struct page *from)
 {
 	void *kto = page_address(to);
 	void *kfrom = page_address(from);
+	struct folio *src = page_folio(from);
+	struct folio *dst = page_folio(to);
+	unsigned int i, nr_pages;
 
 	copy_page(kto, kfrom);
 
 	if (kasan_hw_tags_enabled())
 		page_kasan_tag_reset(to);
 
-	if (system_supports_mte() && page_mte_tagged(from)) {
+	if (!system_supports_mte())
+		return;
+
+	if (folio_test_hugetlb(src) &&
+	    folio_test_hugetlb_mte_tagged(src)) {
+		if (!folio_try_hugetlb_mte_tagging(dst))
+			return;
+
+		/*
+		 * Populate tags for all subpages.
+		 *
+		 * Don't assume the first page is head page since
+		 * huge page copy may start from any subpage.
+		 */
+		nr_pages = folio_nr_pages(src);
+		for (i = 0; i < nr_pages; i++) {
+			kfrom = page_address(folio_page(src, i));
+			kto = page_address(folio_page(dst, i));
+			mte_copy_page_tags(kto, kfrom);
+		}
+		folio_set_hugetlb_mte_tagged(dst);
+	} else if (page_mte_tagged(from)) {
 		/* It's a new page, shouldn't have been tagged yet */
 		WARN_ON_ONCE(!try_page_mte_tagging(to));
+
 		mte_copy_page_tags(kto, kfrom);
 		set_page_mte_tagged(to);
 	}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9f6cff356796..f944e8e7126b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -110,7 +110,7 @@  static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
 	 * way when do_mmap unwinds (may be important on powerpc
 	 * and ia64).
 	 */
-	vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
+	vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND | VM_MTE_ALLOWED);
 	vma->vm_ops = &hugetlb_vm_ops;
 
 	ret = seal_check_write(info->seals, vma);