diff mbox series

[20/35] arm64: mte: Add in-kernel MTE helpers

Message ID 2cf260bdc20793419e32240d2a3e692b0adf1f80.1597425745.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: add hardware tag-based mode for arm64 | expand

Commit Message

Andrey Konovalov Aug. 14, 2020, 5:27 p.m. UTC
From: Vincenzo Frascino <vincenzo.frascino@arm.com>

Provide helper functions to manipulate allocation and pointer tags for
kernel addresses.

Low-level helper functions (mte_assign_*, written in assembly) operate
tag values from the [0x0, 0xF] range. High-level helper functions
(mte_get/set_*) use the [0xF0, 0xFF] range to preserve compatibility
with normal kernel pointers that have 0xFF in their top byte.

MTE_GRANULE_SIZE definition is moved to mte_asm.h header that doesn't
have any dependencies and is safe to include into any low-level header.

Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Co-developed-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 arch/arm64/include/asm/esr.h     |  1 +
 arch/arm64/include/asm/mte.h     | 46 +++++++++++++++++++++++++++++---
 arch/arm64/include/asm/mte_asm.h | 10 +++++++
 arch/arm64/kernel/mte.c          | 43 +++++++++++++++++++++++++++++
 arch/arm64/lib/mte.S             | 41 ++++++++++++++++++++++++++++
 5 files changed, 138 insertions(+), 3 deletions(-)
 create mode 100644 arch/arm64/include/asm/mte_asm.h

Comments

Catalin Marinas Aug. 27, 2020, 9:38 a.m. UTC | #1
On Fri, Aug 14, 2020 at 07:27:02PM +0200, Andrey Konovalov wrote:
> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
> index 1c99fcadb58c..733be1cb5c95 100644
> --- a/arch/arm64/include/asm/mte.h
> +++ b/arch/arm64/include/asm/mte.h
> @@ -5,14 +5,19 @@
>  #ifndef __ASM_MTE_H
>  #define __ASM_MTE_H
>  
> -#define MTE_GRANULE_SIZE	UL(16)
> +#include <asm/mte_asm.h>

So the reason for this move is to include it in asm/cache.h. Fine by
me but...

>  #define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
>  #define MTE_TAG_SHIFT		56
>  #define MTE_TAG_SIZE		4
> +#define MTE_TAG_MASK		GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
> +#define MTE_TAG_MAX		(MTE_TAG_MASK >> MTE_TAG_SHIFT)

... I'd rather move all these definitions in a file with a more
meaningful name like mte-def.h. The _asm implies being meant for .S
files inclusion which isn't the case.

> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index eb39504e390a..e2d708b4583d 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -72,6 +74,47 @@ int memcmp_pages(struct page *page1, struct page *page2)
>  	return ret;
>  }
>  
> +u8 mte_get_mem_tag(void *addr)
> +{
> +	if (system_supports_mte())
> +		addr = mte_assign_valid_ptr_tag(addr);

The mte_assign_valid_ptr_tag() is slightly misleading. All it does is
read the allocation tag from memory.

I also think this should be inline asm, possibly using alternatives.
It's just an LDG instruction (and it saves us from having to invent a
better function name).

> +
> +	return 0xF0 | mte_get_ptr_tag(addr);
> +}
> +
> +u8 mte_get_random_tag(void)
> +{
> +	u8 tag = 0xF;
> +
> +	if (system_supports_mte())
> +		tag = mte_get_ptr_tag(mte_assign_random_ptr_tag(NULL));

Another alternative inline asm with an IRG instruction.

> +
> +	return 0xF0 | tag;
> +}
> +
> +void * __must_check mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> +{
> +	void *ptr = addr;
> +
> +	if ((!system_supports_mte()) || (size == 0))
> +		return addr;
> +
> +	tag = 0xF0 | (tag & 0xF);
> +	ptr = (void *)__tag_set(ptr, tag);
> +	size = ALIGN(size, MTE_GRANULE_SIZE);

I think aligning the size is dangerous. Can we instead turn it into a
WARN_ON if not already aligned? At a quick look, the callers of
kasan_{un,}poison_memory() already align the size.

> +
> +	mte_assign_mem_tag_range(ptr, size);
> +
> +	/*
> +	 * mte_assign_mem_tag_range() can be invoked in a multi-threaded
> +	 * context, ensure that tags are written in memory before the
> +	 * reference is used.
> +	 */
> +	smp_wmb();
> +
> +	return ptr;

I'm not sure I understand the barrier here. It ensures the relative
ordering of memory (or tag) accesses on a CPU as observed by other CPUs.
While the first access here is setting the tag, I can't see what other
access on _this_ CPU it is ordered with.

> +}
> +
>  static void update_sctlr_el1_tcf0(u64 tcf0)
>  {
>  	/* ISB required for the kernel uaccess routines */
> diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
> index 03ca6d8b8670..8c743540e32c 100644
> --- a/arch/arm64/lib/mte.S
> +++ b/arch/arm64/lib/mte.S
> @@ -149,3 +149,44 @@ SYM_FUNC_START(mte_restore_page_tags)
>  
>  	ret
>  SYM_FUNC_END(mte_restore_page_tags)
> +
> +/*
> + * Assign pointer tag based on the allocation tag
> + *   x0 - source pointer
> + * Returns:
> + *   x0 - pointer with the correct tag to access memory
> + */
> +SYM_FUNC_START(mte_assign_valid_ptr_tag)
> +	ldg	x0, [x0]
> +	ret
> +SYM_FUNC_END(mte_assign_valid_ptr_tag)
> +
> +/*
> + * Assign random pointer tag
> + *   x0 - source pointer
> + * Returns:
> + *   x0 - pointer with a random tag
> + */
> +SYM_FUNC_START(mte_assign_random_ptr_tag)
> +	irg	x0, x0
> +	ret
> +SYM_FUNC_END(mte_assign_random_ptr_tag)

As I said above, these two can be inline asm.

> +
> +/*
> + * Assign allocation tags for a region of memory based on the pointer tag
> + *   x0 - source pointer
> + *   x1 - size
> + *
> + * Note: size is expected to be MTE_GRANULE_SIZE aligned
> + */
> +SYM_FUNC_START(mte_assign_mem_tag_range)
> +	/* if (src == NULL) return; */
> +	cbz	x0, 2f
> +	/* if (size == 0) return; */

You could skip the cbz here and just document that the size should be
non-zero and aligned. The caller already takes care of this check.

> +	cbz	x1, 2f
> +1:	stg	x0, [x0]
> +	add	x0, x0, #MTE_GRANULE_SIZE
> +	sub	x1, x1, #MTE_GRANULE_SIZE
> +	cbnz	x1, 1b
> +2:	ret
> +SYM_FUNC_END(mte_assign_mem_tag_range)
Vincenzo Frascino Aug. 27, 2020, 10:31 a.m. UTC | #2
Hi Catalin,

On 8/27/20 10:38 AM, Catalin Marinas wrote:
> On Fri, Aug 14, 2020 at 07:27:02PM +0200, Andrey Konovalov wrote:
>> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
>> index 1c99fcadb58c..733be1cb5c95 100644
>> --- a/arch/arm64/include/asm/mte.h
>> +++ b/arch/arm64/include/asm/mte.h
>> @@ -5,14 +5,19 @@
>>  #ifndef __ASM_MTE_H
>>  #define __ASM_MTE_H
>>  
>> -#define MTE_GRANULE_SIZE	UL(16)
>> +#include <asm/mte_asm.h>
> 
> So the reason for this move is to include it in asm/cache.h. Fine by
> me but...
> 
>>  #define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
>>  #define MTE_TAG_SHIFT		56
>>  #define MTE_TAG_SIZE		4
>> +#define MTE_TAG_MASK		GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
>> +#define MTE_TAG_MAX		(MTE_TAG_MASK >> MTE_TAG_SHIFT)
> 
> ... I'd rather move all these definitions in a file with a more
> meaningful name like mte-def.h. The _asm implies being meant for .S
> files inclusion which isn't the case.
> 

mte-asm.h was originally called mte_helper.h hence it made sense to have these
defines here. But I agree with your proposal it makes things more readable and
it is in line with the rest of the arm64 code (e.g. page-def.h).

We should as well update the commit message accordingly.

>> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
>> index eb39504e390a..e2d708b4583d 100644
>> --- a/arch/arm64/kernel/mte.c
>> +++ b/arch/arm64/kernel/mte.c
>> @@ -72,6 +74,47 @@ int memcmp_pages(struct page *page1, struct page *page2)
>>  	return ret;
>>  }
>>  
>> +u8 mte_get_mem_tag(void *addr)
>> +{
>> +	if (system_supports_mte())
>> +		addr = mte_assign_valid_ptr_tag(addr);
> 
> The mte_assign_valid_ptr_tag() is slightly misleading. All it does is
> read the allocation tag from memory.
> 
> I also think this should be inline asm, possibly using alternatives.
> It's just an LDG instruction (and it saves us from having to invent a
> better function name).
> 

Yes, I agree, I implemented this code in the early days and never got around to
refactor it.

>> +
>> +	return 0xF0 | mte_get_ptr_tag(addr);
>> +}
>> +
>> +u8 mte_get_random_tag(void)
>> +{
>> +	u8 tag = 0xF;
>> +
>> +	if (system_supports_mte())
>> +		tag = mte_get_ptr_tag(mte_assign_random_ptr_tag(NULL));
> 
> Another alternative inline asm with an IRG instruction.
> 

As per above.

>> +
>> +	return 0xF0 | tag;
>> +}
>> +
>> +void * __must_check mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
>> +{
>> +	void *ptr = addr;
>> +
>> +	if ((!system_supports_mte()) || (size == 0))
>> +		return addr;
>> +
>> +	tag = 0xF0 | (tag & 0xF);
>> +	ptr = (void *)__tag_set(ptr, tag);
>> +	size = ALIGN(size, MTE_GRANULE_SIZE);
> 
> I think aligning the size is dangerous. Can we instead turn it into a
> WARN_ON if not already aligned? At a quick look, the callers of
> kasan_{un,}poison_memory() already align the size.
> 

The size here is used only for tagging purposes and if we want to tag a
subgranule amount of memory we end up tagging the granule anyway. Why do you
think it can be dangerous?

Anyway I agree on the fact that is seems redundant, a WARN_ON here should be
sufficient.

>> +
>> +	mte_assign_mem_tag_range(ptr, size);
>> +
>> +	/*
>> +	 * mte_assign_mem_tag_range() can be invoked in a multi-threaded
>> +	 * context, ensure that tags are written in memory before the
>> +	 * reference is used.
>> +	 */
>> +	smp_wmb();
>> +
>> +	return ptr;
> 
> I'm not sure I understand the barrier here. It ensures the relative
> ordering of memory (or tag) accesses on a CPU as observed by other CPUs.
> While the first access here is setting the tag, I can't see what other
> access on _this_ CPU it is ordered with.
> 

You are right it can be removed. I was just overthinking here.

>> +}
>> +
>>  static void update_sctlr_el1_tcf0(u64 tcf0)
>>  {
>>  	/* ISB required for the kernel uaccess routines */
>> diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
>> index 03ca6d8b8670..8c743540e32c 100644
>> --- a/arch/arm64/lib/mte.S
>> +++ b/arch/arm64/lib/mte.S
>> @@ -149,3 +149,44 @@ SYM_FUNC_START(mte_restore_page_tags)
>>  
>>  	ret
>>  SYM_FUNC_END(mte_restore_page_tags)
>> +
>> +/*
>> + * Assign pointer tag based on the allocation tag
>> + *   x0 - source pointer
>> + * Returns:
>> + *   x0 - pointer with the correct tag to access memory
>> + */
>> +SYM_FUNC_START(mte_assign_valid_ptr_tag)
>> +	ldg	x0, [x0]
>> +	ret
>> +SYM_FUNC_END(mte_assign_valid_ptr_tag)
>> +
>> +/*
>> + * Assign random pointer tag
>> + *   x0 - source pointer
>> + * Returns:
>> + *   x0 - pointer with a random tag
>> + */
>> +SYM_FUNC_START(mte_assign_random_ptr_tag)
>> +	irg	x0, x0
>> +	ret
>> +SYM_FUNC_END(mte_assign_random_ptr_tag)
> 
> As I said above, these two can be inline asm.
> 

Agreed.

>> +
>> +/*
>> + * Assign allocation tags for a region of memory based on the pointer tag
>> + *   x0 - source pointer
>> + *   x1 - size
>> + *
>> + * Note: size is expected to be MTE_GRANULE_SIZE aligned
>> + */
>> +SYM_FUNC_START(mte_assign_mem_tag_range)
>> +	/* if (src == NULL) return; */
>> +	cbz	x0, 2f
>> +	/* if (size == 0) return; */
> 
> You could skip the cbz here and just document that the size should be
> non-zero and aligned. The caller already takes care of this check.
>

I would prefer to keep the check here, unless there is a valid reason, since
allocate(0) is a viable option hence tag(x, 0) should be as well. The caller
takes care of it in one place, today, but I do not know where the API will be
used in future.

>> +	cbz	x1, 2f
>> +1:	stg	x0, [x0]
>> +	add	x0, x0, #MTE_GRANULE_SIZE
>> +	sub	x1, x1, #MTE_GRANULE_SIZE
>> +	cbnz	x1, 1b
>> +2:	ret
>> +SYM_FUNC_END(mte_assign_mem_tag_range)
>
Catalin Marinas Aug. 27, 2020, 11:10 a.m. UTC | #3
On Thu, Aug 27, 2020 at 11:31:56AM +0100, Vincenzo Frascino wrote:
> On 8/27/20 10:38 AM, Catalin Marinas wrote:
> > On Fri, Aug 14, 2020 at 07:27:02PM +0200, Andrey Konovalov wrote:
> >> +void * __must_check mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> >> +{
> >> +	void *ptr = addr;
> >> +
> >> +	if ((!system_supports_mte()) || (size == 0))
> >> +		return addr;
> >> +
> >> +	tag = 0xF0 | (tag & 0xF);
> >> +	ptr = (void *)__tag_set(ptr, tag);
> >> +	size = ALIGN(size, MTE_GRANULE_SIZE);
> > 
> > I think aligning the size is dangerous. Can we instead turn it into a
> > WARN_ON if not already aligned? At a quick look, the callers of
> > kasan_{un,}poison_memory() already align the size.
> 
> The size here is used only for tagging purposes and if we want to tag a
> subgranule amount of memory we end up tagging the granule anyway. Why do you
> think it can be dangerous?

In principle, I don't like expanding the size unless you are an
allocator. Since this code doesn't control the placement of the object
it was given, a warn seems more appropriate.

> >> +/*
> >> + * Assign allocation tags for a region of memory based on the pointer tag
> >> + *   x0 - source pointer
> >> + *   x1 - size
> >> + *
> >> + * Note: size is expected to be MTE_GRANULE_SIZE aligned
> >> + */
> >> +SYM_FUNC_START(mte_assign_mem_tag_range)
> >> +	/* if (src == NULL) return; */
> >> +	cbz	x0, 2f
> >> +	/* if (size == 0) return; */
> > 
> > You could skip the cbz here and just document that the size should be
> > non-zero and aligned. The caller already takes care of this check.
> 
> I would prefer to keep the check here, unless there is a valid reason, since
> allocate(0) is a viable option hence tag(x, 0) should be as well. The caller
> takes care of it in one place, today, but I do not know where the API will be
> used in future.

That's why I said just document it in the comment above the function.

The check is also insufficient if the size is not aligned to an MTE
granule, so it's not really consistent. This function should end with a
subs followed by b.gt as cbnz will get stuck in a loop for unaligned
size.
Vincenzo Frascino Aug. 27, 2020, 11:24 a.m. UTC | #4
On 8/27/20 12:10 PM, Catalin Marinas wrote:
> On Thu, Aug 27, 2020 at 11:31:56AM +0100, Vincenzo Frascino wrote:
>> On 8/27/20 10:38 AM, Catalin Marinas wrote:
>>> On Fri, Aug 14, 2020 at 07:27:02PM +0200, Andrey Konovalov wrote:
>>>> +void * __must_check mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
>>>> +{
>>>> +	void *ptr = addr;
>>>> +
>>>> +	if ((!system_supports_mte()) || (size == 0))
>>>> +		return addr;
>>>> +
>>>> +	tag = 0xF0 | (tag & 0xF);
>>>> +	ptr = (void *)__tag_set(ptr, tag);
>>>> +	size = ALIGN(size, MTE_GRANULE_SIZE);
>>>
>>> I think aligning the size is dangerous. Can we instead turn it into a
>>> WARN_ON if not already aligned? At a quick look, the callers of
>>> kasan_{un,}poison_memory() already align the size.
>>
>> The size here is used only for tagging purposes and if we want to tag a
>> subgranule amount of memory we end up tagging the granule anyway. Why do you
>> think it can be dangerous?
> 
> In principle, I don't like expanding the size unless you are an
> allocator. Since this code doesn't control the placement of the object
> it was given, a warn seems more appropriate.
> 

That's a good point. Ok, we can change this in a warning.

>>>> +/*
>>>> + * Assign allocation tags for a region of memory based on the pointer tag
>>>> + *   x0 - source pointer
>>>> + *   x1 - size
>>>> + *
>>>> + * Note: size is expected to be MTE_GRANULE_SIZE aligned
>>>> + */
>>>> +SYM_FUNC_START(mte_assign_mem_tag_range)
>>>> +	/* if (src == NULL) return; */
>>>> +	cbz	x0, 2f
>>>> +	/* if (size == 0) return; */
>>>
>>> You could skip the cbz here and just document that the size should be
>>> non-zero and aligned. The caller already takes care of this check.
>>
>> I would prefer to keep the check here, unless there is a valid reason, since
>> allocate(0) is a viable option hence tag(x, 0) should be as well. The caller
>> takes care of it in one place, today, but I do not know where the API will be
>> used in future.
> 
> That's why I said just document it in the comment above the function.
> 
> The check is also insufficient if the size is not aligned to an MTE
> granule, so it's not really consistent. This function should end with a
> subs followed by b.gt as cbnz will get stuck in a loop for unaligned
> size.
> 

That's correct. Thanks for pointing this out. I currently used it only in places
where the caller took care to align the size. But in future we cannot know hence
we should harden the function with what you are suggesting.
Andrey Konovalov Aug. 27, 2020, 12:46 p.m. UTC | #5
On Thu, Aug 27, 2020 at 11:38 AM Catalin Marinas
<catalin.marinas@arm.com> wrote:
>
> On Fri, Aug 14, 2020 at 07:27:02PM +0200, Andrey Konovalov wrote:
> > diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
> > index 1c99fcadb58c..733be1cb5c95 100644
> > --- a/arch/arm64/include/asm/mte.h
> > +++ b/arch/arm64/include/asm/mte.h
> > @@ -5,14 +5,19 @@
> >  #ifndef __ASM_MTE_H
> >  #define __ASM_MTE_H
> >
> > -#define MTE_GRANULE_SIZE     UL(16)
> > +#include <asm/mte_asm.h>
>
> So the reason for this move is to include it in asm/cache.h. Fine by
> me but...
>
> >  #define MTE_GRANULE_MASK     (~(MTE_GRANULE_SIZE - 1))
> >  #define MTE_TAG_SHIFT                56
> >  #define MTE_TAG_SIZE         4
> > +#define MTE_TAG_MASK         GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
> > +#define MTE_TAG_MAX          (MTE_TAG_MASK >> MTE_TAG_SHIFT)
>
> ... I'd rather move all these definitions in a file with a more
> meaningful name like mte-def.h. The _asm implies being meant for .S
> files inclusion which isn't the case.

Sounds good, I'll leave fixing this and other arm64-specific comments
to Vincenzo. I'll change KASAN code to use mte-def.h once I have
patches where this file is renamed.

>
> > diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> > index eb39504e390a..e2d708b4583d 100644
> > --- a/arch/arm64/kernel/mte.c
> > +++ b/arch/arm64/kernel/mte.c
> > @@ -72,6 +74,47 @@ int memcmp_pages(struct page *page1, struct page *page2)
> >       return ret;
> >  }
> >
> > +u8 mte_get_mem_tag(void *addr)
> > +{
> > +     if (system_supports_mte())
> > +             addr = mte_assign_valid_ptr_tag(addr);
>
> The mte_assign_valid_ptr_tag() is slightly misleading. All it does is
> read the allocation tag from memory.
>
> I also think this should be inline asm, possibly using alternatives.
> It's just an LDG instruction (and it saves us from having to invent a
> better function name).
>
> > +
> > +     return 0xF0 | mte_get_ptr_tag(addr);
> > +}
> > +
> > +u8 mte_get_random_tag(void)
> > +{
> > +     u8 tag = 0xF;
> > +
> > +     if (system_supports_mte())
> > +             tag = mte_get_ptr_tag(mte_assign_random_ptr_tag(NULL));
>
> Another alternative inline asm with an IRG instruction.
>
> > +
> > +     return 0xF0 | tag;
> > +}
> > +
> > +void * __must_check mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> > +{
> > +     void *ptr = addr;
> > +
> > +     if ((!system_supports_mte()) || (size == 0))
> > +             return addr;
> > +
> > +     tag = 0xF0 | (tag & 0xF);
> > +     ptr = (void *)__tag_set(ptr, tag);
> > +     size = ALIGN(size, MTE_GRANULE_SIZE);
>
> I think aligning the size is dangerous. Can we instead turn it into a
> WARN_ON if not already aligned? At a quick look, the callers of
> kasan_{un,}poison_memory() already align the size.
>
> > +
> > +     mte_assign_mem_tag_range(ptr, size);
> > +
> > +     /*
> > +      * mte_assign_mem_tag_range() can be invoked in a multi-threaded
> > +      * context, ensure that tags are written in memory before the
> > +      * reference is used.
> > +      */
> > +     smp_wmb();
> > +
> > +     return ptr;
>
> I'm not sure I understand the barrier here. It ensures the relative
> ordering of memory (or tag) accesses on a CPU as observed by other CPUs.
> While the first access here is setting the tag, I can't see what other
> access on _this_ CPU it is ordered with.
>
> > +}
> > +
> >  static void update_sctlr_el1_tcf0(u64 tcf0)
> >  {
> >       /* ISB required for the kernel uaccess routines */
> > diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
> > index 03ca6d8b8670..8c743540e32c 100644
> > --- a/arch/arm64/lib/mte.S
> > +++ b/arch/arm64/lib/mte.S
> > @@ -149,3 +149,44 @@ SYM_FUNC_START(mte_restore_page_tags)
> >
> >       ret
> >  SYM_FUNC_END(mte_restore_page_tags)
> > +
> > +/*
> > + * Assign pointer tag based on the allocation tag
> > + *   x0 - source pointer
> > + * Returns:
> > + *   x0 - pointer with the correct tag to access memory
> > + */
> > +SYM_FUNC_START(mte_assign_valid_ptr_tag)
> > +     ldg     x0, [x0]
> > +     ret
> > +SYM_FUNC_END(mte_assign_valid_ptr_tag)
> > +
> > +/*
> > + * Assign random pointer tag
> > + *   x0 - source pointer
> > + * Returns:
> > + *   x0 - pointer with a random tag
> > + */
> > +SYM_FUNC_START(mte_assign_random_ptr_tag)
> > +     irg     x0, x0
> > +     ret
> > +SYM_FUNC_END(mte_assign_random_ptr_tag)
>
> As I said above, these two can be inline asm.
>
> > +
> > +/*
> > + * Assign allocation tags for a region of memory based on the pointer tag
> > + *   x0 - source pointer
> > + *   x1 - size
> > + *
> > + * Note: size is expected to be MTE_GRANULE_SIZE aligned
> > + */
> > +SYM_FUNC_START(mte_assign_mem_tag_range)
> > +     /* if (src == NULL) return; */
> > +     cbz     x0, 2f
> > +     /* if (size == 0) return; */
>
> You could skip the cbz here and just document that the size should be
> non-zero and aligned. The caller already takes care of this check.
>
> > +     cbz     x1, 2f
> > +1:   stg     x0, [x0]
> > +     add     x0, x0, #MTE_GRANULE_SIZE
> > +     sub     x1, x1, #MTE_GRANULE_SIZE
> > +     cbnz    x1, 1b
> > +2:   ret
> > +SYM_FUNC_END(mte_assign_mem_tag_range)
>
> --
> Catalin
>
> --
> You received this message because you are subscribed to the Google Groups "kasan-dev" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20200827093808.GB29264%40gaia.
Andrey Konovalov Sept. 8, 2020, 1:23 p.m. UTC | #6
On Thu, Aug 27, 2020 at 11:38 AM Catalin Marinas
<catalin.marinas@arm.com> wrote:
>
> On Fri, Aug 14, 2020 at 07:27:02PM +0200, Andrey Konovalov wrote:
> > diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
> > index 1c99fcadb58c..733be1cb5c95 100644
> > --- a/arch/arm64/include/asm/mte.h
> > +++ b/arch/arm64/include/asm/mte.h
> > @@ -5,14 +5,19 @@
> >  #ifndef __ASM_MTE_H
> >  #define __ASM_MTE_H
> >
> > -#define MTE_GRANULE_SIZE     UL(16)
> > +#include <asm/mte_asm.h>
>
> So the reason for this move is to include it in asm/cache.h. Fine by
> me but...
>
> >  #define MTE_GRANULE_MASK     (~(MTE_GRANULE_SIZE - 1))
> >  #define MTE_TAG_SHIFT                56
> >  #define MTE_TAG_SIZE         4
> > +#define MTE_TAG_MASK         GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
> > +#define MTE_TAG_MAX          (MTE_TAG_MASK >> MTE_TAG_SHIFT)
>
> ... I'd rather move all these definitions in a file with a more
> meaningful name like mte-def.h. The _asm implies being meant for .S
> files inclusion which isn't the case.
>
> > diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> > index eb39504e390a..e2d708b4583d 100644
> > --- a/arch/arm64/kernel/mte.c
> > +++ b/arch/arm64/kernel/mte.c
> > @@ -72,6 +74,47 @@ int memcmp_pages(struct page *page1, struct page *page2)
> >       return ret;
> >  }
> >
> > +u8 mte_get_mem_tag(void *addr)
> > +{
> > +     if (system_supports_mte())
> > +             addr = mte_assign_valid_ptr_tag(addr);
>
> The mte_assign_valid_ptr_tag() is slightly misleading. All it does is
> read the allocation tag from memory.
>
> I also think this should be inline asm, possibly using alternatives.
> It's just an LDG instruction (and it saves us from having to invent a
> better function name).

Could you point me to an example of inline asm with alternatives if
there's any? I see alternative_if and other similar macros used in
arch/arm64/ code, is that what you mean? Those seem to always use
static conditions, like config values, but here we have a dynamic
system_supports_mte(). Could you elaborate on how I should implement
this?
Catalin Marinas Sept. 8, 2020, 2:50 p.m. UTC | #7
On Tue, Sep 08, 2020 at 03:23:20PM +0200, Andrey Konovalov wrote:
> On Thu, Aug 27, 2020 at 11:38 AM Catalin Marinas
> <catalin.marinas@arm.com> wrote:
> > On Fri, Aug 14, 2020 at 07:27:02PM +0200, Andrey Konovalov wrote:
> > > diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
> > > index 1c99fcadb58c..733be1cb5c95 100644
> > > --- a/arch/arm64/include/asm/mte.h
> > > +++ b/arch/arm64/include/asm/mte.h
> > > @@ -5,14 +5,19 @@
> > >  #ifndef __ASM_MTE_H
> > >  #define __ASM_MTE_H
> > >
> > > -#define MTE_GRANULE_SIZE     UL(16)
> > > +#include <asm/mte_asm.h>
> >
> > So the reason for this move is to include it in asm/cache.h. Fine by
> > me but...
> >
> > >  #define MTE_GRANULE_MASK     (~(MTE_GRANULE_SIZE - 1))
> > >  #define MTE_TAG_SHIFT                56
> > >  #define MTE_TAG_SIZE         4
> > > +#define MTE_TAG_MASK         GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
> > > +#define MTE_TAG_MAX          (MTE_TAG_MASK >> MTE_TAG_SHIFT)
> >
> > ... I'd rather move all these definitions in a file with a more
> > meaningful name like mte-def.h. The _asm implies being meant for .S
> > files inclusion which isn't the case.
> >
> > > diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> > > index eb39504e390a..e2d708b4583d 100644
> > > --- a/arch/arm64/kernel/mte.c
> > > +++ b/arch/arm64/kernel/mte.c
> > > @@ -72,6 +74,47 @@ int memcmp_pages(struct page *page1, struct page *page2)
> > >       return ret;
> > >  }
> > >
> > > +u8 mte_get_mem_tag(void *addr)
> > > +{
> > > +     if (system_supports_mte())
> > > +             addr = mte_assign_valid_ptr_tag(addr);
> >
> > The mte_assign_valid_ptr_tag() is slightly misleading. All it does is
> > read the allocation tag from memory.
> >
> > I also think this should be inline asm, possibly using alternatives.
> > It's just an LDG instruction (and it saves us from having to invent a
> > better function name).
> 
> Could you point me to an example of inline asm with alternatives if
> there's any? I see alternative_if and other similar macros used in
> arch/arm64/ code, is that what you mean? Those seem to always use
> static conditions, like config values, but here we have a dynamic
> system_supports_mte(). Could you elaborate on how I should implement
> this?

There are plenty of ALTERNATIVE macro uses under arch/arm64, see
arch/arm64/include/asm/alternative.h for the definition and some simple
documentation.

In this case, something like (untested, haven't even checked whether it
matches the mte_assign_valid_ptr_tag() code):

	asm(ALTERNATIVE("orr %0, %1, #0xff << 56", "ldg %0, [%1]", ARM64_HAS_MTE));
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 035003acfa87..bc0dc66a6a27 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -103,6 +103,7 @@ 
 #define ESR_ELx_FSC		(0x3F)
 #define ESR_ELx_FSC_TYPE	(0x3C)
 #define ESR_ELx_FSC_EXTABT	(0x10)
+#define ESR_ELx_FSC_MTE		(0x11)
 #define ESR_ELx_FSC_SERROR	(0x11)
 #define ESR_ELx_FSC_ACCESS	(0x08)
 #define ESR_ELx_FSC_FAULT	(0x04)
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 1c99fcadb58c..733be1cb5c95 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -5,14 +5,19 @@ 
 #ifndef __ASM_MTE_H
 #define __ASM_MTE_H
 
-#define MTE_GRANULE_SIZE	UL(16)
+#include <asm/mte_asm.h>
+
 #define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
 #define MTE_TAG_SHIFT		56
 #define MTE_TAG_SIZE		4
+#define MTE_TAG_MASK		GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
+#define MTE_TAG_MAX		(MTE_TAG_MASK >> MTE_TAG_SHIFT)
 
 #ifndef __ASSEMBLY__
 
+#include <linux/bitfield.h>
 #include <linux/page-flags.h>
+#include <linux/types.h>
 
 #include <asm/pgtable-types.h>
 
@@ -45,7 +50,16 @@  long get_mte_ctrl(struct task_struct *task);
 int mte_ptrace_copy_tags(struct task_struct *child, long request,
 			 unsigned long addr, unsigned long data);
 
-#else
+void *mte_assign_valid_ptr_tag(void *ptr);
+void *mte_assign_random_ptr_tag(void *ptr);
+void mte_assign_mem_tag_range(void *addr, size_t size);
+
+#define mte_get_ptr_tag(ptr)	((u8)(((u64)(ptr)) >> MTE_TAG_SHIFT))
+u8 mte_get_mem_tag(void *addr);
+u8 mte_get_random_tag(void);
+void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
+
+#else /* CONFIG_ARM64_MTE */
 
 /* unused if !CONFIG_ARM64_MTE, silence the compiler */
 #define PG_mte_tagged	0
@@ -80,7 +94,33 @@  static inline int mte_ptrace_copy_tags(struct task_struct *child,
 	return -EIO;
 }
 
-#endif
+static inline void *mte_assign_valid_ptr_tag(void *ptr)
+{
+	return ptr;
+}
+static inline void *mte_assign_random_ptr_tag(void *ptr)
+{
+	return ptr;
+}
+static inline void mte_assign_mem_tag_range(void *addr, size_t size)
+{
+}
+
+#define mte_get_ptr_tag(ptr)	0xFF
+static inline u8 mte_get_mem_tag(void *addr)
+{
+	return 0xFF;
+}
+static inline u8 mte_get_random_tag(void)
+{
+	return 0xFF;
+}
+static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+{
+	return addr;
+}
+
+#endif /* CONFIG_ARM64_MTE */
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_MTE_H  */
diff --git a/arch/arm64/include/asm/mte_asm.h b/arch/arm64/include/asm/mte_asm.h
new file mode 100644
index 000000000000..aa532c1851e1
--- /dev/null
+++ b/arch/arm64/include/asm/mte_asm.h
@@ -0,0 +1,10 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_MTE_ASM_H
+#define __ASM_MTE_ASM_H
+
+#define MTE_GRANULE_SIZE	UL(16)
+
+#endif /* __ASM_MTE_ASM_H  */
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index eb39504e390a..e2d708b4583d 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -13,8 +13,10 @@ 
 #include <linux/swap.h>
 #include <linux/swapops.h>
 #include <linux/thread_info.h>
+#include <linux/types.h>
 #include <linux/uio.h>
 
+#include <asm/barrier.h>
 #include <asm/cpufeature.h>
 #include <asm/mte.h>
 #include <asm/ptrace.h>
@@ -72,6 +74,47 @@  int memcmp_pages(struct page *page1, struct page *page2)
 	return ret;
 }
 
+u8 mte_get_mem_tag(void *addr)
+{
+	if (system_supports_mte())
+		addr = mte_assign_valid_ptr_tag(addr);
+
+	return 0xF0 | mte_get_ptr_tag(addr);
+}
+
+u8 mte_get_random_tag(void)
+{
+	u8 tag = 0xF;
+
+	if (system_supports_mte())
+		tag = mte_get_ptr_tag(mte_assign_random_ptr_tag(NULL));
+
+	return 0xF0 | tag;
+}
+
+void * __must_check mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+{
+	void *ptr = addr;
+
+	if ((!system_supports_mte()) || (size == 0))
+		return addr;
+
+	tag = 0xF0 | (tag & 0xF);
+	ptr = (void *)__tag_set(ptr, tag);
+	size = ALIGN(size, MTE_GRANULE_SIZE);
+
+	mte_assign_mem_tag_range(ptr, size);
+
+	/*
+	 * mte_assign_mem_tag_range() can be invoked in a multi-threaded
+	 * context, ensure that tags are written in memory before the
+	 * reference is used.
+	 */
+	smp_wmb();
+
+	return ptr;
+}
+
 static void update_sctlr_el1_tcf0(u64 tcf0)
 {
 	/* ISB required for the kernel uaccess routines */
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 03ca6d8b8670..8c743540e32c 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -149,3 +149,44 @@  SYM_FUNC_START(mte_restore_page_tags)
 
 	ret
 SYM_FUNC_END(mte_restore_page_tags)
+
+/*
+ * Assign pointer tag based on the allocation tag
+ *   x0 - source pointer
+ * Returns:
+ *   x0 - pointer with the correct tag to access memory
+ */
+SYM_FUNC_START(mte_assign_valid_ptr_tag)
+	ldg	x0, [x0]
+	ret
+SYM_FUNC_END(mte_assign_valid_ptr_tag)
+
+/*
+ * Assign random pointer tag
+ *   x0 - source pointer
+ * Returns:
+ *   x0 - pointer with a random tag
+ */
+SYM_FUNC_START(mte_assign_random_ptr_tag)
+	irg	x0, x0
+	ret
+SYM_FUNC_END(mte_assign_random_ptr_tag)
+
+/*
+ * Assign allocation tags for a region of memory based on the pointer tag
+ *   x0 - source pointer
+ *   x1 - size
+ *
+ * Note: size is expected to be MTE_GRANULE_SIZE aligned
+ */
+SYM_FUNC_START(mte_assign_mem_tag_range)
+	/* if (src == NULL) return; */
+	cbz	x0, 2f
+	/* if (size == 0) return; */
+	cbz	x1, 2f
+1:	stg	x0, [x0]
+	add	x0, x0, #MTE_GRANULE_SIZE
+	sub	x1, x1, #MTE_GRANULE_SIZE
+	cbnz	x1, 1b
+2:	ret
+SYM_FUNC_END(mte_assign_mem_tag_range)