diff mbox series

[v2,4/4] arm64: kasan: Revert "arm64: mte: reset the page tag in page->flags"

Message ID 20220610152141.2148929-5-catalin.marinas@arm.com (mailing list archive)
State New
Headers show
Series kasan: Fix ordering between MTE tag colouring and page->flags | expand

Commit Message

Catalin Marinas June 10, 2022, 3:21 p.m. UTC
This reverts commit e5b8d9218951e59df986f627ec93569a0d22149b.

Pages mapped in user-space with PROT_MTE have the allocation tags either
zeroed or copied/restored to some user values. In order for the kernel
to access such pages via page_address(), resetting the tag in
page->flags was necessary. This tag resetting was deferred to
set_pte_at() -> mte_sync_page_tags() but it can race with another CPU
reading the flags (via page_to_virt()):

P0 (mte_sync_page_tags):	P1 (memcpy from virt_to_page):
				  Rflags!=0xff
  Wflags=0xff
  DMB (doesn't help)
  Wtags=0
				  Rtags=0   // fault

Since now the post_alloc_hook() function resets the page->flags tag when
unpoisoning is skipped for user pages (including the __GFP_ZEROTAGS
case), revert the arm64 commit calling page_kasan_tag_reset().

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Peter Collingbourne <pcc@google.com>
---
 arch/arm64/kernel/hibernate.c | 5 -----
 arch/arm64/kernel/mte.c       | 9 ---------
 arch/arm64/mm/copypage.c      | 9 ---------
 arch/arm64/mm/mteswap.c       | 9 ---------
 4 files changed, 32 deletions(-)

Comments

Andrey Konovalov June 11, 2022, 7:40 p.m. UTC | #1
On Fri, Jun 10, 2022 at 5:21 PM Catalin Marinas <catalin.marinas@arm.com> wrote:
>
> This reverts commit e5b8d9218951e59df986f627ec93569a0d22149b.
>
> Pages mapped in user-space with PROT_MTE have the allocation tags either
> zeroed or copied/restored to some user values. In order for the kernel
> to access such pages via page_address(), resetting the tag in
> page->flags was necessary. This tag resetting was deferred to
> set_pte_at() -> mte_sync_page_tags() but it can race with another CPU
> reading the flags (via page_to_virt()):
>
> P0 (mte_sync_page_tags):        P1 (memcpy from virt_to_page):
>                                   Rflags!=0xff
>   Wflags=0xff
>   DMB (doesn't help)
>   Wtags=0
>                                   Rtags=0   // fault
>
> Since now the post_alloc_hook() function resets the page->flags tag when
> unpoisoning is skipped for user pages (including the __GFP_ZEROTAGS
> case), revert the arm64 commit calling page_kasan_tag_reset().
>
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Peter Collingbourne <pcc@google.com>
> ---
>  arch/arm64/kernel/hibernate.c | 5 -----
>  arch/arm64/kernel/mte.c       | 9 ---------
>  arch/arm64/mm/copypage.c      | 9 ---------
>  arch/arm64/mm/mteswap.c       | 9 ---------
>  4 files changed, 32 deletions(-)
>
> diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
> index 2e248342476e..af5df48ba915 100644
> --- a/arch/arm64/kernel/hibernate.c
> +++ b/arch/arm64/kernel/hibernate.c
> @@ -300,11 +300,6 @@ static void swsusp_mte_restore_tags(void)
>                 unsigned long pfn = xa_state.xa_index;
>                 struct page *page = pfn_to_online_page(pfn);
>
> -               /*
> -                * It is not required to invoke page_kasan_tag_reset(page)
> -                * at this point since the tags stored in page->flags are
> -                * already restored.
> -                */
>                 mte_restore_page_tags(page_address(page), tags);
>
>                 mte_free_tag_storage(tags);
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 57b30bcf9f21..7ba4d6fd1f72 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -48,15 +48,6 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
>         if (!pte_is_tagged)
>                 return;
>
> -       page_kasan_tag_reset(page);
> -       /*
> -        * We need smp_wmb() in between setting the flags and clearing the
> -        * tags because if another thread reads page->flags and builds a
> -        * tagged address out of it, there is an actual dependency to the
> -        * memory access, but on the current thread we do not guarantee that
> -        * the new page->flags are visible before the tags were updated.
> -        */
> -       smp_wmb();
>         mte_clear_page_tags(page_address(page));
>  }
>
> diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
> index 0dea80bf6de4..24913271e898 100644
> --- a/arch/arm64/mm/copypage.c
> +++ b/arch/arm64/mm/copypage.c
> @@ -23,15 +23,6 @@ void copy_highpage(struct page *to, struct page *from)
>
>         if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
>                 set_bit(PG_mte_tagged, &to->flags);
> -               page_kasan_tag_reset(to);
> -               /*
> -                * We need smp_wmb() in between setting the flags and clearing the
> -                * tags because if another thread reads page->flags and builds a
> -                * tagged address out of it, there is an actual dependency to the
> -                * memory access, but on the current thread we do not guarantee that
> -                * the new page->flags are visible before the tags were updated.
> -                */
> -               smp_wmb();
>                 mte_copy_page_tags(kto, kfrom);
>         }
>  }
> diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
> index a9e50e930484..4334dec93bd4 100644
> --- a/arch/arm64/mm/mteswap.c
> +++ b/arch/arm64/mm/mteswap.c
> @@ -53,15 +53,6 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
>         if (!tags)
>                 return false;
>
> -       page_kasan_tag_reset(page);
> -       /*
> -        * We need smp_wmb() in between setting the flags and clearing the
> -        * tags because if another thread reads page->flags and builds a
> -        * tagged address out of it, there is an actual dependency to the
> -        * memory access, but on the current thread we do not guarantee that
> -        * the new page->flags are visible before the tags were updated.
> -        */
> -       smp_wmb();
>         mte_restore_page_tags(page_address(page), tags);
>
>         return true;

Acked-by: Andrey Konovalov <andreyknvl@gmail.com>
Vincenzo Frascino June 16, 2022, 8:44 a.m. UTC | #2
On 6/10/22 16:21, Catalin Marinas wrote:
> This reverts commit e5b8d9218951e59df986f627ec93569a0d22149b.
> 
> Pages mapped in user-space with PROT_MTE have the allocation tags either
> zeroed or copied/restored to some user values. In order for the kernel
> to access such pages via page_address(), resetting the tag in
> page->flags was necessary. This tag resetting was deferred to
> set_pte_at() -> mte_sync_page_tags() but it can race with another CPU
> reading the flags (via page_to_virt()):
> 
> P0 (mte_sync_page_tags):	P1 (memcpy from virt_to_page):
> 				  Rflags!=0xff
>   Wflags=0xff
>   DMB (doesn't help)
>   Wtags=0
> 				  Rtags=0   // fault
> 
> Since now the post_alloc_hook() function resets the page->flags tag when
> unpoisoning is skipped for user pages (including the __GFP_ZEROTAGS
> case), revert the arm64 commit calling page_kasan_tag_reset().
> 
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Peter Collingbourne <pcc@google.com>

Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>

> ---
>  arch/arm64/kernel/hibernate.c | 5 -----
>  arch/arm64/kernel/mte.c       | 9 ---------
>  arch/arm64/mm/copypage.c      | 9 ---------
>  arch/arm64/mm/mteswap.c       | 9 ---------
>  4 files changed, 32 deletions(-)
> 
> diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
> index 2e248342476e..af5df48ba915 100644
> --- a/arch/arm64/kernel/hibernate.c
> +++ b/arch/arm64/kernel/hibernate.c
> @@ -300,11 +300,6 @@ static void swsusp_mte_restore_tags(void)
>  		unsigned long pfn = xa_state.xa_index;
>  		struct page *page = pfn_to_online_page(pfn);
>  
> -		/*
> -		 * It is not required to invoke page_kasan_tag_reset(page)
> -		 * at this point since the tags stored in page->flags are
> -		 * already restored.
> -		 */
>  		mte_restore_page_tags(page_address(page), tags);
>  
>  		mte_free_tag_storage(tags);
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 57b30bcf9f21..7ba4d6fd1f72 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -48,15 +48,6 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
>  	if (!pte_is_tagged)
>  		return;
>  
> -	page_kasan_tag_reset(page);
> -	/*
> -	 * We need smp_wmb() in between setting the flags and clearing the
> -	 * tags because if another thread reads page->flags and builds a
> -	 * tagged address out of it, there is an actual dependency to the
> -	 * memory access, but on the current thread we do not guarantee that
> -	 * the new page->flags are visible before the tags were updated.
> -	 */
> -	smp_wmb();
>  	mte_clear_page_tags(page_address(page));
>  }
>  
> diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
> index 0dea80bf6de4..24913271e898 100644
> --- a/arch/arm64/mm/copypage.c
> +++ b/arch/arm64/mm/copypage.c
> @@ -23,15 +23,6 @@ void copy_highpage(struct page *to, struct page *from)
>  
>  	if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
>  		set_bit(PG_mte_tagged, &to->flags);
> -		page_kasan_tag_reset(to);
> -		/*
> -		 * We need smp_wmb() in between setting the flags and clearing the
> -		 * tags because if another thread reads page->flags and builds a
> -		 * tagged address out of it, there is an actual dependency to the
> -		 * memory access, but on the current thread we do not guarantee that
> -		 * the new page->flags are visible before the tags were updated.
> -		 */
> -		smp_wmb();
>  		mte_copy_page_tags(kto, kfrom);
>  	}
>  }
> diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
> index a9e50e930484..4334dec93bd4 100644
> --- a/arch/arm64/mm/mteswap.c
> +++ b/arch/arm64/mm/mteswap.c
> @@ -53,15 +53,6 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
>  	if (!tags)
>  		return false;
>  
> -	page_kasan_tag_reset(page);
> -	/*
> -	 * We need smp_wmb() in between setting the flags and clearing the
> -	 * tags because if another thread reads page->flags and builds a
> -	 * tagged address out of it, there is an actual dependency to the
> -	 * memory access, but on the current thread we do not guarantee that
> -	 * the new page->flags are visible before the tags were updated.
> -	 */
> -	smp_wmb();
>  	mte_restore_page_tags(page_address(page), tags);
>  
>  	return true;
diff mbox series

Patch

diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 2e248342476e..af5df48ba915 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -300,11 +300,6 @@  static void swsusp_mte_restore_tags(void)
 		unsigned long pfn = xa_state.xa_index;
 		struct page *page = pfn_to_online_page(pfn);
 
-		/*
-		 * It is not required to invoke page_kasan_tag_reset(page)
-		 * at this point since the tags stored in page->flags are
-		 * already restored.
-		 */
 		mte_restore_page_tags(page_address(page), tags);
 
 		mte_free_tag_storage(tags);
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 57b30bcf9f21..7ba4d6fd1f72 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -48,15 +48,6 @@  static void mte_sync_page_tags(struct page *page, pte_t old_pte,
 	if (!pte_is_tagged)
 		return;
 
-	page_kasan_tag_reset(page);
-	/*
-	 * We need smp_wmb() in between setting the flags and clearing the
-	 * tags because if another thread reads page->flags and builds a
-	 * tagged address out of it, there is an actual dependency to the
-	 * memory access, but on the current thread we do not guarantee that
-	 * the new page->flags are visible before the tags were updated.
-	 */
-	smp_wmb();
 	mte_clear_page_tags(page_address(page));
 }
 
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 0dea80bf6de4..24913271e898 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -23,15 +23,6 @@  void copy_highpage(struct page *to, struct page *from)
 
 	if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
 		set_bit(PG_mte_tagged, &to->flags);
-		page_kasan_tag_reset(to);
-		/*
-		 * We need smp_wmb() in between setting the flags and clearing the
-		 * tags because if another thread reads page->flags and builds a
-		 * tagged address out of it, there is an actual dependency to the
-		 * memory access, but on the current thread we do not guarantee that
-		 * the new page->flags are visible before the tags were updated.
-		 */
-		smp_wmb();
 		mte_copy_page_tags(kto, kfrom);
 	}
 }
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
index a9e50e930484..4334dec93bd4 100644
--- a/arch/arm64/mm/mteswap.c
+++ b/arch/arm64/mm/mteswap.c
@@ -53,15 +53,6 @@  bool mte_restore_tags(swp_entry_t entry, struct page *page)
 	if (!tags)
 		return false;
 
-	page_kasan_tag_reset(page);
-	/*
-	 * We need smp_wmb() in between setting the flags and clearing the
-	 * tags because if another thread reads page->flags and builds a
-	 * tagged address out of it, there is an actual dependency to the
-	 * memory access, but on the current thread we do not guarantee that
-	 * the new page->flags are visible before the tags were updated.
-	 */
-	smp_wmb();
 	mte_restore_page_tags(page_address(page), tags);
 
 	return true;