diff mbox series

arm64: mte: Ensure the cleared tags are visible before setting the PTE

Message ID 20220513100809.2324920-1-catalin.marinas@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: mte: Ensure the cleared tags are visible before setting the PTE | expand

Commit Message

Catalin Marinas May 13, 2022, 10:08 a.m. UTC
As an optimisation, only pages mapped with PROT_MTE in user space have
the MTE tags zeroed. This is done lazily at the set_pte_at() time via
mte_sync_tags(). However, this function is missing a barrier and another
CPU may see the PTE updated before the zeroed tags are visible. Add an
smp_wmb() barrier if the page tags have been updated.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Fixes: 34bfeea4a9e9 ("arm64: mte: Clear the tags when a page is mapped in user-space with PROT_MTE")
Cc: <stable@vger.kernel.org> # 5.10.x
Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/kernel/mte.c | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

Comments

Steven Price May 13, 2022, 10:50 a.m. UTC | #1
On 13/05/2022 11:08, Catalin Marinas wrote:
> As an optimisation, only pages mapped with PROT_MTE in user space have
> the MTE tags zeroed. This is done lazily at the set_pte_at() time via
> mte_sync_tags(). However, this function is missing a barrier and another
> CPU may see the PTE updated before the zeroed tags are visible. Add an
> smp_wmb() barrier if the page tags have been updated.
> 
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Fixes: 34bfeea4a9e9 ("arm64: mte: Clear the tags when a page is mapped in user-space with PROT_MTE")
> Cc: <stable@vger.kernel.org> # 5.10.x
> Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
> Cc: Will Deacon <will@kernel.org>

Reviewed-by: Steven Price <steven.price@arm.com>

> ---
>  arch/arm64/kernel/mte.c | 19 +++++++++++++------
>  1 file changed, 13 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 78b3e0f8e997..07dabd52377d 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -34,18 +34,18 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
>  EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
>  #endif
>  
> -static void mte_sync_page_tags(struct page *page, pte_t old_pte,
> +static bool mte_sync_page_tags(struct page *page, pte_t old_pte,
>  			       bool check_swap, bool pte_is_tagged)
>  {
>  	if (check_swap && is_swap_pte(old_pte)) {
>  		swp_entry_t entry = pte_to_swp_entry(old_pte);
>  
>  		if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
> -			return;
> +			return true;
>  	}
>  
>  	if (!pte_is_tagged)
> -		return;
> +		return false;
>  
>  	page_kasan_tag_reset(page);
>  	/*
> @@ -57,6 +57,7 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
>  	 */
>  	smp_wmb();
>  	mte_clear_page_tags(page_address(page));
> +	return true;
>  }
>  
>  void mte_sync_tags(pte_t old_pte, pte_t pte)
> @@ -65,6 +66,7 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
>  	long i, nr_pages = compound_nr(page);
>  	bool check_swap = nr_pages == 1;
>  	bool pte_is_tagged = pte_tagged(pte);
> +	bool updated = false;
>  
>  	/* Early out if there's nothing to do */
>  	if (!check_swap && !pte_is_tagged)
> @@ -72,10 +74,15 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
>  
>  	/* if PG_mte_tagged is set, tags have already been initialised */
>  	for (i = 0; i < nr_pages; i++, page++) {
> -		if (!test_and_set_bit(PG_mte_tagged, &page->flags))
> -			mte_sync_page_tags(page, old_pte, check_swap,
> -					   pte_is_tagged);
> +		if (!test_and_set_bit(PG_mte_tagged, &page->flags) &&
> +		    mte_sync_page_tags(page, old_pte, check_swap,
> +				       pte_is_tagged))
> +			updated = true;
>  	}
> +
> +	/* ensure the tags are visible before the PTE is set */
> +	if (updated)
> +		smp_wmb();
>  }
>  
>  int memcmp_pages(struct page *page1, struct page *page2)
Vincenzo Frascino May 17, 2022, 7:12 a.m. UTC | #2
On 5/13/22 11:08, Catalin Marinas wrote:
> As an optimisation, only pages mapped with PROT_MTE in user space have
> the MTE tags zeroed. This is done lazily at the set_pte_at() time via
> mte_sync_tags(). However, this function is missing a barrier and another
> CPU may see the PTE updated before the zeroed tags are visible. Add an
> smp_wmb() barrier if the page tags have been updated.
> 
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Fixes: 34bfeea4a9e9 ("arm64: mte: Clear the tags when a page is mapped in user-space with PROT_MTE")
> Cc: <stable@vger.kernel.org> # 5.10.x
> Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
> Cc: Will Deacon <will@kernel.org>

Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>

> ---
>  arch/arm64/kernel/mte.c | 19 +++++++++++++------
>  1 file changed, 13 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 78b3e0f8e997..07dabd52377d 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -34,18 +34,18 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
>  EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
>  #endif
>  
> -static void mte_sync_page_tags(struct page *page, pte_t old_pte,
> +static bool mte_sync_page_tags(struct page *page, pte_t old_pte,
>  			       bool check_swap, bool pte_is_tagged)
>  {
>  	if (check_swap && is_swap_pte(old_pte)) {
>  		swp_entry_t entry = pte_to_swp_entry(old_pte);
>  
>  		if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
> -			return;
> +			return true;
>  	}
>  
>  	if (!pte_is_tagged)
> -		return;
> +		return false;
>  
>  	page_kasan_tag_reset(page);
>  	/*
> @@ -57,6 +57,7 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
>  	 */
>  	smp_wmb();
>  	mte_clear_page_tags(page_address(page));
> +	return true;
>  }
>  
>  void mte_sync_tags(pte_t old_pte, pte_t pte)
> @@ -65,6 +66,7 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
>  	long i, nr_pages = compound_nr(page);
>  	bool check_swap = nr_pages == 1;
>  	bool pte_is_tagged = pte_tagged(pte);
> +	bool updated = false;
>  
>  	/* Early out if there's nothing to do */
>  	if (!check_swap && !pte_is_tagged)
> @@ -72,10 +74,15 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
>  
>  	/* if PG_mte_tagged is set, tags have already been initialised */
>  	for (i = 0; i < nr_pages; i++, page++) {
> -		if (!test_and_set_bit(PG_mte_tagged, &page->flags))
> -			mte_sync_page_tags(page, old_pte, check_swap,
> -					   pte_is_tagged);
> +		if (!test_and_set_bit(PG_mte_tagged, &page->flags) &&
> +		    mte_sync_page_tags(page, old_pte, check_swap,
> +				       pte_is_tagged))
> +			updated = true;
>  	}
> +
> +	/* ensure the tags are visible before the PTE is set */
> +	if (updated)
> +		smp_wmb();
>  }
>  
>  int memcmp_pages(struct page *page1, struct page *page2)
Vladimir Murzin May 17, 2022, 9:22 a.m. UTC | #3
On 5/13/22 11:08, Catalin Marinas wrote:
> As an optimisation, only pages mapped with PROT_MTE in user space have
> the MTE tags zeroed. This is done lazily at the set_pte_at() time via
> mte_sync_tags(). However, this function is missing a barrier and another
> CPU may see the PTE updated before the zeroed tags are visible. Add an
> smp_wmb() barrier if the page tags have been updated.
> 
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Fixes: 34bfeea4a9e9 ("arm64: mte: Clear the tags when a page is mapped in user-space with PROT_MTE")
> Cc: <stable@vger.kernel.org> # 5.10.x
> Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
> Cc: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/kernel/mte.c | 19 +++++++++++++------
>  1 file changed, 13 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 78b3e0f8e997..07dabd52377d 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -34,18 +34,18 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
>  EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
>  #endif
>  
> -static void mte_sync_page_tags(struct page *page, pte_t old_pte,
> +static bool mte_sync_page_tags(struct page *page, pte_t old_pte,
>  			       bool check_swap, bool pte_is_tagged)
>  {
>  	if (check_swap && is_swap_pte(old_pte)) {
>  		swp_entry_t entry = pte_to_swp_entry(old_pte);
>  
>  		if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
> -			return;
> +			return true;
>  	}
>  
>  	if (!pte_is_tagged)
> -		return;
> +		return false;
>  
>  	page_kasan_tag_reset(page);
>  	/*
> @@ -57,6 +57,7 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
>  	 */
>  	smp_wmb();
>  	mte_clear_page_tags(page_address(page));
> +	return true;
>  }
>  
>  void mte_sync_tags(pte_t old_pte, pte_t pte)
> @@ -65,6 +66,7 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
>  	long i, nr_pages = compound_nr(page);
>  	bool check_swap = nr_pages == 1;
>  	bool pte_is_tagged = pte_tagged(pte);
> +	bool updated = false;
>  
>  	/* Early out if there's nothing to do */
>  	if (!check_swap && !pte_is_tagged)
> @@ -72,10 +74,15 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
>  
>  	/* if PG_mte_tagged is set, tags have already been initialised */
>  	for (i = 0; i < nr_pages; i++, page++) {
> -		if (!test_and_set_bit(PG_mte_tagged, &page->flags))
> -			mte_sync_page_tags(page, old_pte, check_swap,
> -					   pte_is_tagged);
> +		if (!test_and_set_bit(PG_mte_tagged, &page->flags) &&
> +		    mte_sync_page_tags(page, old_pte, check_swap,
> +				       pte_is_tagged))
> +			updated = true;
>  	}
> +
> +	/* ensure the tags are visible before the PTE is set */
> +	if (updated)
> +		smp_wmb();
>  }
>  
>  int memcmp_pages(struct page *page1, struct page *page2)
> 

Tested-by: Vladimir Murzin <vladimir.murzin@arm.com>


> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>
Vladimir Murzin May 17, 2022, 9:23 a.m. UTC | #4
On 5/13/22 11:08, Catalin Marinas wrote:
> As an optimisation, only pages mapped with PROT_MTE in user space have
> the MTE tags zeroed. This is done lazily at the set_pte_at() time via
> mte_sync_tags(). However, this function is missing a barrier and another
> CPU may see the PTE updated before the zeroed tags are visible. Add an
> smp_wmb() barrier if the page tags have been updated.
> 
> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
> Fixes: 34bfeea4a9e9 ("arm64: mte: Clear the tags when a page is mapped in user-space with PROT_MTE")
> Cc: <stable@vger.kernel.org> # 5.10.x
> Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
> Cc: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/kernel/mte.c | 19 +++++++++++++------
>  1 file changed, 13 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 78b3e0f8e997..07dabd52377d 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -34,18 +34,18 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
>  EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
>  #endif
>  
> -static void mte_sync_page_tags(struct page *page, pte_t old_pte,
> +static bool mte_sync_page_tags(struct page *page, pte_t old_pte,
>  			       bool check_swap, bool pte_is_tagged)
>  {
>  	if (check_swap && is_swap_pte(old_pte)) {
>  		swp_entry_t entry = pte_to_swp_entry(old_pte);
>  
>  		if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
> -			return;
> +			return true;
>  	}
>  
>  	if (!pte_is_tagged)
> -		return;
> +		return false;
>  
>  	page_kasan_tag_reset(page);
>  	/*
> @@ -57,6 +57,7 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
>  	 */
>  	smp_wmb();
>  	mte_clear_page_tags(page_address(page));
> +	return true;
>  }
>  
>  void mte_sync_tags(pte_t old_pte, pte_t pte)
> @@ -65,6 +66,7 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
>  	long i, nr_pages = compound_nr(page);
>  	bool check_swap = nr_pages == 1;
>  	bool pte_is_tagged = pte_tagged(pte);
> +	bool updated = false;
>  
>  	/* Early out if there's nothing to do */
>  	if (!check_swap && !pte_is_tagged)
> @@ -72,10 +74,15 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
>  
>  	/* if PG_mte_tagged is set, tags have already been initialised */
>  	for (i = 0; i < nr_pages; i++, page++) {
> -		if (!test_and_set_bit(PG_mte_tagged, &page->flags))
> -			mte_sync_page_tags(page, old_pte, check_swap,
> -					   pte_is_tagged);
> +		if (!test_and_set_bit(PG_mte_tagged, &page->flags) &&
> +		    mte_sync_page_tags(page, old_pte, check_swap,
> +				       pte_is_tagged))
> +			updated = true;
>  	}
> +
> +	/* ensure the tags are visible before the PTE is set */
> +	if (updated)
> +		smp_wmb();
>  }
>  
>  int memcmp_pages(struct page *page1, struct page *page2)
> 

Tested-by: Vladimir Murzin <vladimir.murzin@arm.com>

> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>
Catalin Marinas May 17, 2022, 9:30 a.m. UTC | #5
On Tue, May 17, 2022 at 10:22:32AM +0100, Vladimir Murzin wrote:
> On 5/13/22 11:08, Catalin Marinas wrote:
> > @@ -72,10 +74,15 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
> >  
> >  	/* if PG_mte_tagged is set, tags have already been initialised */
> >  	for (i = 0; i < nr_pages; i++, page++) {
> > -		if (!test_and_set_bit(PG_mte_tagged, &page->flags))
> > -			mte_sync_page_tags(page, old_pte, check_swap,
> > -					   pte_is_tagged);
> > +		if (!test_and_set_bit(PG_mte_tagged, &page->flags) &&
> > +		    mte_sync_page_tags(page, old_pte, check_swap,
> > +				       pte_is_tagged))
> > +			updated = true;
> >  	}
> > +
> > +	/* ensure the tags are visible before the PTE is set */
> > +	if (updated)
> > +		smp_wmb();
> >  }
> >  
> >  int memcmp_pages(struct page *page1, struct page *page2)
> 
> Tested-by: Vladimir Murzin <vladimir.murzin@arm.com>

Thanks Vladimir. Talking to Will earlier, I'll make the smp_wmb()
unconditional, it's not worth the extra checks. I'll post the updated
version soon but I won't add your tested-by as it's slightly different
(or we add a Tested-old-version-by... ;)).
Vladimir Murzin May 17, 2022, 9:38 a.m. UTC | #6
On 5/17/22 10:30, Catalin Marinas wrote:
> On Tue, May 17, 2022 at 10:22:32AM +0100, Vladimir Murzin wrote:
>> On 5/13/22 11:08, Catalin Marinas wrote:
>>> @@ -72,10 +74,15 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
>>>  
>>>  	/* if PG_mte_tagged is set, tags have already been initialised */
>>>  	for (i = 0; i < nr_pages; i++, page++) {
>>> -		if (!test_and_set_bit(PG_mte_tagged, &page->flags))
>>> -			mte_sync_page_tags(page, old_pte, check_swap,
>>> -					   pte_is_tagged);
>>> +		if (!test_and_set_bit(PG_mte_tagged, &page->flags) &&
>>> +		    mte_sync_page_tags(page, old_pte, check_swap,
>>> +				       pte_is_tagged))
>>> +			updated = true;
>>>  	}
>>> +
>>> +	/* ensure the tags are visible before the PTE is set */
>>> +	if (updated)
>>> +		smp_wmb();
>>>  }
>>>  
>>>  int memcmp_pages(struct page *page1, struct page *page2)
>>
>> Tested-by: Vladimir Murzin <vladimir.murzin@arm.com>
> 
> Thanks Vladimir. Talking to Will earlier, I'll make the smp_wmb()
> unconditional, it's not worth the extra checks. I'll post the updated
> version soon but I won't add your tested-by as it's slightly different
> (or we add a Tested-old-version-by... ;)).
> 

You can carry on my Tested-by with unconditional smp_wmb() since it has
been tested in parallel to your patch ;)

Cheers
Vladimir
diff mbox series

Patch

diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 78b3e0f8e997..07dabd52377d 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -34,18 +34,18 @@  DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
 EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
 #endif
 
-static void mte_sync_page_tags(struct page *page, pte_t old_pte,
+static bool mte_sync_page_tags(struct page *page, pte_t old_pte,
 			       bool check_swap, bool pte_is_tagged)
 {
 	if (check_swap && is_swap_pte(old_pte)) {
 		swp_entry_t entry = pte_to_swp_entry(old_pte);
 
 		if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
-			return;
+			return true;
 	}
 
 	if (!pte_is_tagged)
-		return;
+		return false;
 
 	page_kasan_tag_reset(page);
 	/*
@@ -57,6 +57,7 @@  static void mte_sync_page_tags(struct page *page, pte_t old_pte,
 	 */
 	smp_wmb();
 	mte_clear_page_tags(page_address(page));
+	return true;
 }
 
 void mte_sync_tags(pte_t old_pte, pte_t pte)
@@ -65,6 +66,7 @@  void mte_sync_tags(pte_t old_pte, pte_t pte)
 	long i, nr_pages = compound_nr(page);
 	bool check_swap = nr_pages == 1;
 	bool pte_is_tagged = pte_tagged(pte);
+	bool updated = false;
 
 	/* Early out if there's nothing to do */
 	if (!check_swap && !pte_is_tagged)
@@ -72,10 +74,15 @@  void mte_sync_tags(pte_t old_pte, pte_t pte)
 
 	/* if PG_mte_tagged is set, tags have already been initialised */
 	for (i = 0; i < nr_pages; i++, page++) {
-		if (!test_and_set_bit(PG_mte_tagged, &page->flags))
-			mte_sync_page_tags(page, old_pte, check_swap,
-					   pte_is_tagged);
+		if (!test_and_set_bit(PG_mte_tagged, &page->flags) &&
+		    mte_sync_page_tags(page, old_pte, check_swap,
+				       pte_is_tagged))
+			updated = true;
 	}
+
+	/* ensure the tags are visible before the PTE is set */
+	if (updated)
+		smp_wmb();
 }
 
 int memcmp_pages(struct page *page1, struct page *page2)