diff mbox series

[v3,24/39] arm64: mte: Add in-kernel MTE helpers

Message ID ae603463aed82bdff74942f23338a681b8ed8820.1600987622.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: add hardware tag-based mode for arm64 | expand

Commit Message

Andrey Konovalov Sept. 24, 2020, 10:50 p.m. UTC
From: Vincenzo Frascino <vincenzo.frascino@arm.com>

Provide helper functions to manipulate allocation and pointer tags for
kernel addresses.

Low-level helper functions (mte_assign_*, written in assembly) operate
tag values from the [0x0, 0xF] range. High-level helper functions
(mte_get/set_*) use the [0xF0, 0xFF] range to preserve compatibility
with normal kernel pointers that have 0xFF in their top byte.

MTE_GRANULE_SIZE and related definitions are moved to mte-def.h header
that doesn't have any dependencies and is safe to include into any
low-level header.

Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Co-developed-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
Change-Id: I1b5230254f90dc21a913447cb17f07fea7944ece
---
 arch/arm64/include/asm/esr.h       |  1 +
 arch/arm64/include/asm/mte-kasan.h | 60 ++++++++++++++++++++++++++++++
 arch/arm64/include/asm/mte.h       | 17 ++++++---
 arch/arm64/kernel/mte.c            | 44 ++++++++++++++++++++++
 arch/arm64/lib/mte.S               | 19 ++++++++++
 5 files changed, 135 insertions(+), 6 deletions(-)
 create mode 100644 arch/arm64/include/asm/mte-kasan.h

Comments

Catalin Marinas Sept. 25, 2020, 10:15 a.m. UTC | #1
On Fri, Sep 25, 2020 at 12:50:31AM +0200, Andrey Konovalov wrote:
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index 035003acfa87..bc0dc66a6a27 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -103,6 +103,7 @@
>  #define ESR_ELx_FSC		(0x3F)
>  #define ESR_ELx_FSC_TYPE	(0x3C)
>  #define ESR_ELx_FSC_EXTABT	(0x10)
> +#define ESR_ELx_FSC_MTE		(0x11)
>  #define ESR_ELx_FSC_SERROR	(0x11)
>  #define ESR_ELx_FSC_ACCESS	(0x08)
>  #define ESR_ELx_FSC_FAULT	(0x04)
> diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
> new file mode 100644
> index 000000000000..b0f27de8de33
> --- /dev/null
> +++ b/arch/arm64/include/asm/mte-kasan.h
> @@ -0,0 +1,60 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020 ARM Ltd.
> + */
> +#ifndef __ASM_MTE_ASM_H
> +#define __ASM_MTE_ASM_H
> +
> +#include <asm/compiler.h>
> +
> +#define __MTE_PREAMBLE		ARM64_ASM_PREAMBLE ".arch_extension memtag\n"

Can this not live in mte.h?

> +#define MTE_GRANULE_SIZE	UL(16)
> +#define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
> +#define MTE_TAG_SHIFT		56
> +#define MTE_TAG_SIZE		4
> +#define MTE_TAG_MASK		GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
> +#define MTE_TAG_MAX		(MTE_TAG_MASK >> MTE_TAG_SHIFT)

I'd still like these MTE_* macros in a separate mte-hwdef.h file. The
only reason I see they were not in mte.h is because they need to be
included in asm/cache.h. They are not KASAN specific.

> +
> +#ifndef __ASSEMBLY__
> +
> +#include <linux/types.h>
> +
> +#ifdef CONFIG_ARM64_MTE
> +
> +static inline u8 mte_get_ptr_tag(void *ptr)
> +{
> +	u8 tag = (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
> +
> +	return tag;
> +}

So this returns the top 8 bits of the address (i.e. no masking with
MTE_TAG_MASK). Fine by me.

> +
> +u8 mte_get_mem_tag(void *addr);
> +u8 mte_get_random_tag(void);
> +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
> +
> +#else /* CONFIG_ARM64_MTE */
> +
> +static inline u8 mte_get_ptr_tag(void *ptr)
> +{
> +	return 0xFF;
> +}
> +
> +static inline u8 mte_get_mem_tag(void *addr)
> +{
> +	return 0xFF;
> +}
> +static inline u8 mte_get_random_tag(void)
> +{
> +	return 0xFF;
> +}
> +static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> +{
> +	return addr;
> +}

Maybe these can stay in mte-kasan.h, although they are not a direct
interface for KASAN AFAICT (the arch_* equivalent are defined in
asm/memory.h. If there's no good reason, we could move them to mte.h.

> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
> index 1c99fcadb58c..3a2bf3ccb26c 100644
> --- a/arch/arm64/include/asm/mte.h
> +++ b/arch/arm64/include/asm/mte.h
> @@ -5,14 +5,13 @@
>  #ifndef __ASM_MTE_H
>  #define __ASM_MTE_H
>  
> -#define MTE_GRANULE_SIZE	UL(16)
> -#define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
> -#define MTE_TAG_SHIFT		56
> -#define MTE_TAG_SIZE		4
> +#include <asm/mte-kasan.h>
>  
>  #ifndef __ASSEMBLY__
>  
> +#include <linux/bitfield.h>
>  #include <linux/page-flags.h>
> +#include <linux/types.h>
>  
>  #include <asm/pgtable-types.h>
>  
> @@ -45,7 +44,9 @@ long get_mte_ctrl(struct task_struct *task);
>  int mte_ptrace_copy_tags(struct task_struct *child, long request,
>  			 unsigned long addr, unsigned long data);
>  
> -#else
> +void mte_assign_mem_tag_range(void *addr, size_t size);

So mte_set_mem_tag_range() is KASAN specific but
mte_assign_mem_tag_range() is not. Slightly confusing.

> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 52a0638ed967..833b63fdd5e2 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -13,8 +13,10 @@
>  #include <linux/swap.h>
>  #include <linux/swapops.h>
>  #include <linux/thread_info.h>
> +#include <linux/types.h>
>  #include <linux/uio.h>
>  
> +#include <asm/barrier.h>
>  #include <asm/cpufeature.h>
>  #include <asm/mte.h>
>  #include <asm/ptrace.h>
> @@ -72,6 +74,48 @@ int memcmp_pages(struct page *page1, struct page *page2)
>  	return ret;
>  }
>  
> +u8 mte_get_mem_tag(void *addr)
> +{
> +	if (!system_supports_mte())
> +		return 0xFF;
> +
> +	asm volatile(__MTE_PREAMBLE "ldg %0, [%0]"
> +		    : "+r" (addr));

Nitpick: do we need volatile or plain asm would do?

I wonder whether we'd need the "memory" clobber. I don't see how this
would fail though, maybe later on with stack tagging if the compiler
writes tags behind our back.

> +
> +	return 0xF0 | mte_get_ptr_tag(addr);

Since mte_get_ptr_tag() returns the top byte of the address, we don't
need the additional 0xF0 or'ing. LDG only sets bits 59:56.

> +}
> +
> +u8 mte_get_random_tag(void)
> +{
> +	void *addr;
> +
> +	if (!system_supports_mte())
> +		return 0xFF;
> +
> +	asm volatile(__MTE_PREAMBLE "irg %0, %0"
> +		    : "+r" (addr));
> +
> +	return 0xF0 | mte_get_ptr_tag(addr);

Same here.

> +}
> +
> +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> +{
> +	void *ptr = addr;
> +
> +	if ((!system_supports_mte()) || (size == 0))
> +		return addr;
> +
> +	/* Make sure that size is aligned. */
> +	WARN_ON(size & (MTE_GRANULE_SIZE - 1));

Doesn't the address need to be aligned as well?

> +
> +	tag = 0xF0 | tag;
> +	ptr = (void *)__tag_set(ptr, tag);
> +
> +	mte_assign_mem_tag_range(ptr, size);
> +
> +	return ptr;
> +}
> +
>  static void update_sctlr_el1_tcf0(u64 tcf0)
>  {
>  	/* ISB required for the kernel uaccess routines */
> diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
> index 03ca6d8b8670..aa0ab01252fe 100644
> --- a/arch/arm64/lib/mte.S
> +++ b/arch/arm64/lib/mte.S
> @@ -149,3 +149,22 @@ SYM_FUNC_START(mte_restore_page_tags)
>  
>  	ret
>  SYM_FUNC_END(mte_restore_page_tags)
> +
> +/*
> + * Assign allocation tags for a region of memory based on the pointer tag
> + *   x0 - source pointer
> + *   x1 - size
> + *
> + * Note: size must be non-zero and MTE_GRANULE_SIZE aligned

Doesn't the address need to be aligned as well?

> + */
> +SYM_FUNC_START(mte_assign_mem_tag_range)
> +	/* if (src == NULL) return; */
> +	cbz	x0, 2f
> +	/* if (size == 0) return; */
> +	cbz	x1, 2f

I find these checks unnecessary, as I said a couple of times before,
just document the function pre-conditions. They are also incomplete
(i.e. you check for NULL but not alignment).

> +1:	stg	x0, [x0]
> +	add	x0, x0, #MTE_GRANULE_SIZE
> +	subs	x1, x1, #MTE_GRANULE_SIZE
> +	b.gt	1b
> +2:	ret
> +SYM_FUNC_END(mte_assign_mem_tag_range)
> -- 
> 2.28.0.681.g6f77f65b4e-goog
Vincenzo Frascino Sept. 25, 2020, 11:28 a.m. UTC | #2
On 9/25/20 11:15 AM, Catalin Marinas wrote:
> On Fri, Sep 25, 2020 at 12:50:31AM +0200, Andrey Konovalov wrote:
>> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
>> index 035003acfa87..bc0dc66a6a27 100644
>> --- a/arch/arm64/include/asm/esr.h
>> +++ b/arch/arm64/include/asm/esr.h
>> @@ -103,6 +103,7 @@
>>  #define ESR_ELx_FSC		(0x3F)
>>  #define ESR_ELx_FSC_TYPE	(0x3C)
>>  #define ESR_ELx_FSC_EXTABT	(0x10)
>> +#define ESR_ELx_FSC_MTE		(0x11)
>>  #define ESR_ELx_FSC_SERROR	(0x11)
>>  #define ESR_ELx_FSC_ACCESS	(0x08)
>>  #define ESR_ELx_FSC_FAULT	(0x04)
>> diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
>> new file mode 100644
>> index 000000000000..b0f27de8de33
>> --- /dev/null
>> +++ b/arch/arm64/include/asm/mte-kasan.h
>> @@ -0,0 +1,60 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/*
>> + * Copyright (C) 2020 ARM Ltd.
>> + */
>> +#ifndef __ASM_MTE_ASM_H
>> +#define __ASM_MTE_ASM_H
>> +
>> +#include <asm/compiler.h>
>> +
>> +#define __MTE_PREAMBLE		ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
> 
> Can this not live in mte.h?
>

Yes, I can move it there in the next version.

>> +#define MTE_GRANULE_SIZE	UL(16)
>> +#define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
>> +#define MTE_TAG_SHIFT		56
>> +#define MTE_TAG_SIZE		4
>> +#define MTE_TAG_MASK		GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
>> +#define MTE_TAG_MAX		(MTE_TAG_MASK >> MTE_TAG_SHIFT)
> 
> I'd still like these MTE_* macros in a separate mte-hwdef.h file. The
> only reason I see they were not in mte.h is because they need to be
> included in asm/cache.h. They are not KASAN specific.
> 

Ok, fine I will reintroduce it in the next version.

>> +
>> +#ifndef __ASSEMBLY__
>> +
>> +#include <linux/types.h>
>> +
>> +#ifdef CONFIG_ARM64_MTE
>> +
>> +static inline u8 mte_get_ptr_tag(void *ptr)
>> +{
>> +	u8 tag = (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
>> +
>> +	return tag;
>> +}
> 
> So this returns the top 8 bits of the address (i.e. no masking with
> MTE_TAG_MASK). Fine by me.
> 
>> +
>> +u8 mte_get_mem_tag(void *addr);
>> +u8 mte_get_random_tag(void);
>> +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
>> +
>> +#else /* CONFIG_ARM64_MTE */
>> +
>> +static inline u8 mte_get_ptr_tag(void *ptr)
>> +{
>> +	return 0xFF;
>> +}
>> +
>> +static inline u8 mte_get_mem_tag(void *addr)
>> +{
>> +	return 0xFF;
>> +}
>> +static inline u8 mte_get_random_tag(void)
>> +{
>> +	return 0xFF;
>> +}
>> +static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
>> +{
>> +	return addr;
>> +}
> 
> Maybe these can stay in mte-kasan.h, although they are not a direct
> interface for KASAN AFAICT (the arch_* equivalent are defined in
> asm/memory.h. If there's no good reason, we could move them to mte.h.
>

This is here because it is not a direct interface as you noticed. I tried to
keep the separation (even if it I have something to fix based on your comment
below ;)).

The other kasan implementation define the arch_* indirection in asm/memory.h in
every architecture. I think maintaining the design is the best way to non create
confusion.

>> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
>> index 1c99fcadb58c..3a2bf3ccb26c 100644
>> --- a/arch/arm64/include/asm/mte.h
>> +++ b/arch/arm64/include/asm/mte.h
>> @@ -5,14 +5,13 @@
>>  #ifndef __ASM_MTE_H
>>  #define __ASM_MTE_H
>>  
>> -#define MTE_GRANULE_SIZE	UL(16)
>> -#define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
>> -#define MTE_TAG_SHIFT		56
>> -#define MTE_TAG_SIZE		4
>> +#include <asm/mte-kasan.h>
>>  
>>  #ifndef __ASSEMBLY__
>>  
>> +#include <linux/bitfield.h>
>>  #include <linux/page-flags.h>
>> +#include <linux/types.h>
>>  
>>  #include <asm/pgtable-types.h>
>>  
>> @@ -45,7 +44,9 @@ long get_mte_ctrl(struct task_struct *task);
>>  int mte_ptrace_copy_tags(struct task_struct *child, long request,
>>  			 unsigned long addr, unsigned long data);
>>  
>> -#else
>> +void mte_assign_mem_tag_range(void *addr, size_t size);
> 
> So mte_set_mem_tag_range() is KASAN specific but
> mte_assign_mem_tag_range() is not. Slightly confusing.
> 

mte_assign_mem_tag_range() is the internal function implemented in assembler
which is not used directly by KASAN. Is it the name that you find confusing? Do
you have a better proposal?

>> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
>> index 52a0638ed967..833b63fdd5e2 100644
>> --- a/arch/arm64/kernel/mte.c
>> +++ b/arch/arm64/kernel/mte.c
>> @@ -13,8 +13,10 @@
>>  #include <linux/swap.h>
>>  #include <linux/swapops.h>
>>  #include <linux/thread_info.h>
>> +#include <linux/types.h>
>>  #include <linux/uio.h>
>>  
>> +#include <asm/barrier.h>
>>  #include <asm/cpufeature.h>
>>  #include <asm/mte.h>
>>  #include <asm/ptrace.h>
>> @@ -72,6 +74,48 @@ int memcmp_pages(struct page *page1, struct page *page2)
>>  	return ret;
>>  }
>>  
>> +u8 mte_get_mem_tag(void *addr)
>> +{
>> +	if (!system_supports_mte())
>> +		return 0xFF;
>> +
>> +	asm volatile(__MTE_PREAMBLE "ldg %0, [%0]"
>> +		    : "+r" (addr));
> 
> Nitpick: do we need volatile or plain asm would do?
> 

No we clearly don't anymore :) I will remove it in the next iteration.

> I wonder whether we'd need the "memory" clobber. I don't see how this
> would fail though, maybe later on with stack tagging if the compiler
> writes tags behind our back.
> 

As you said, I do not see how this can fail either. We can be overcautious
though here and add a comment that the clobber has been added in prevision of
stack tagging.

>> +
>> +	return 0xF0 | mte_get_ptr_tag(addr);
> 
> Since mte_get_ptr_tag() returns the top byte of the address, we don't
> need the additional 0xF0 or'ing. LDG only sets bits 59:56.
> 

Yes, this can clearly go away.

>> +}
>> +
>> +u8 mte_get_random_tag(void)
>> +{
>> +	void *addr;
>> +
>> +	if (!system_supports_mte())
>> +		return 0xFF;
>> +
>> +	asm volatile(__MTE_PREAMBLE "irg %0, %0"
>> +		    : "+r" (addr));
>> +
>> +	return 0xF0 | mte_get_ptr_tag(addr);
> 
> Same here.
> 

Agreed.

>> +}
>> +
>> +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
>> +{
>> +	void *ptr = addr;
>> +
>> +	if ((!system_supports_mte()) || (size == 0))
>> +		return addr;
>> +
>> +	/* Make sure that size is aligned. */
>> +	WARN_ON(size & (MTE_GRANULE_SIZE - 1));
> 
> Doesn't the address need to be aligned as well?
> 

Yes, we need an extra WARN_ON here. I will add it in the next version.

>> +
>> +	tag = 0xF0 | tag;
>> +	ptr = (void *)__tag_set(ptr, tag);
>> +
>> +	mte_assign_mem_tag_range(ptr, size);
>> +
>> +	return ptr;
>> +}
>> +
>>  static void update_sctlr_el1_tcf0(u64 tcf0)
>>  {
>>  	/* ISB required for the kernel uaccess routines */
>> diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
>> index 03ca6d8b8670..aa0ab01252fe 100644
>> --- a/arch/arm64/lib/mte.S
>> +++ b/arch/arm64/lib/mte.S
>> @@ -149,3 +149,22 @@ SYM_FUNC_START(mte_restore_page_tags)
>>  
>>  	ret
>>  SYM_FUNC_END(mte_restore_page_tags)
>> +
>> +/*
>> + * Assign allocation tags for a region of memory based on the pointer tag
>> + *   x0 - source pointer
>> + *   x1 - size
>> + *
>> + * Note: size must be non-zero and MTE_GRANULE_SIZE aligned
> 
> Doesn't the address need to be aligned as well?
> 

The comment can be extended.

>> + */
>> +SYM_FUNC_START(mte_assign_mem_tag_range)
>> +	/* if (src == NULL) return; */
>> +	cbz	x0, 2f
>> +	/* if (size == 0) return; */
>> +	cbz	x1, 2f
> 
> I find these checks unnecessary, as I said a couple of times before,
> just document the function pre-conditions. They are also incomplete
> (i.e. you check for NULL but not alignment).
> 

I thought we agreed to harden the code further, based on [1]. Maybe I
misunderstood. I am going to remove them and extend the comment in the next version.

[1]
https://lore.kernel.org/linux-arm-kernel/921c4ed0-b5b5-bc01-5418-c52d80f1af59@arm.com/

>> +1:	stg	x0, [x0]
>> +	add	x0, x0, #MTE_GRANULE_SIZE
>> +	subs	x1, x1, #MTE_GRANULE_SIZE
>> +	b.gt	1b
>> +2:	ret
>> +SYM_FUNC_END(mte_assign_mem_tag_range)
>> -- 
>> 2.28.0.681.g6f77f65b4e-goog
>
Catalin Marinas Sept. 25, 2020, 12:50 p.m. UTC | #3
On Fri, Sep 25, 2020 at 12:28:24PM +0100, Vincenzo Frascino wrote:
> On 9/25/20 11:15 AM, Catalin Marinas wrote:
> > On Fri, Sep 25, 2020 at 12:50:31AM +0200, Andrey Konovalov wrote:
> >> +u8 mte_get_mem_tag(void *addr);
> >> +u8 mte_get_random_tag(void);
> >> +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
> >> +
> >> +#else /* CONFIG_ARM64_MTE */
> >> +
> >> +static inline u8 mte_get_ptr_tag(void *ptr)
> >> +{
> >> +	return 0xFF;
> >> +}
> >> +
> >> +static inline u8 mte_get_mem_tag(void *addr)
> >> +{
> >> +	return 0xFF;
> >> +}
> >> +static inline u8 mte_get_random_tag(void)
> >> +{
> >> +	return 0xFF;
> >> +}
> >> +static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> >> +{
> >> +	return addr;
> >> +}
> > 
> > Maybe these can stay in mte-kasan.h, although they are not a direct
> > interface for KASAN AFAICT (the arch_* equivalent are defined in
> > asm/memory.h. If there's no good reason, we could move them to mte.h.
> 
> This is here because it is not a direct interface as you noticed. I tried to
> keep the separation (even if it I have something to fix based on your comment
> below ;)).
> 
> The other kasan implementation define the arch_* indirection in asm/memory.h in
> every architecture. I think maintaining the design is the best way to non create
> confusion.

I'm ok with asm/memory.h for kasan, no need to change that. You can also
keep these functions in asm/mte-kasan.h but add a comment that they are
only for the kasan interface defined in asm/memory.h.

> >> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
> >> index 1c99fcadb58c..3a2bf3ccb26c 100644
> >> --- a/arch/arm64/include/asm/mte.h
> >> +++ b/arch/arm64/include/asm/mte.h
> >> @@ -5,14 +5,13 @@
> >>  #ifndef __ASM_MTE_H
> >>  #define __ASM_MTE_H
> >>  
> >> -#define MTE_GRANULE_SIZE	UL(16)
> >> -#define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
> >> -#define MTE_TAG_SHIFT		56
> >> -#define MTE_TAG_SIZE		4
> >> +#include <asm/mte-kasan.h>

And this include should be replaced by asm/mte-hwdef.h.

> >>  #ifndef __ASSEMBLY__
> >>  
> >> +#include <linux/bitfield.h>
> >>  #include <linux/page-flags.h>
> >> +#include <linux/types.h>
> >>  
> >>  #include <asm/pgtable-types.h>
> >>  
> >> @@ -45,7 +44,9 @@ long get_mte_ctrl(struct task_struct *task);
> >>  int mte_ptrace_copy_tags(struct task_struct *child, long request,
> >>  			 unsigned long addr, unsigned long data);
> >>  
> >> -#else
> >> +void mte_assign_mem_tag_range(void *addr, size_t size);
> > 
> > So mte_set_mem_tag_range() is KASAN specific but
> > mte_assign_mem_tag_range() is not. Slightly confusing.
> 
> mte_assign_mem_tag_range() is the internal function implemented in assembler
> which is not used directly by KASAN. Is it the name that you find confusing? Do
> you have a better proposal?

I don't mind the name, just trying to find some consistency in the
headers.

> >> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> >> index 52a0638ed967..833b63fdd5e2 100644
> >> --- a/arch/arm64/kernel/mte.c
> >> +++ b/arch/arm64/kernel/mte.c
> >> @@ -13,8 +13,10 @@
> >>  #include <linux/swap.h>
> >>  #include <linux/swapops.h>
> >>  #include <linux/thread_info.h>
> >> +#include <linux/types.h>
> >>  #include <linux/uio.h>
> >>  
> >> +#include <asm/barrier.h>
> >>  #include <asm/cpufeature.h>
> >>  #include <asm/mte.h>
> >>  #include <asm/ptrace.h>
> >> @@ -72,6 +74,48 @@ int memcmp_pages(struct page *page1, struct page *page2)
> >>  	return ret;
> >>  }
> >>  
> >> +u8 mte_get_mem_tag(void *addr)
> >> +{
> >> +	if (!system_supports_mte())
> >> +		return 0xFF;
> >> +
> >> +	asm volatile(__MTE_PREAMBLE "ldg %0, [%0]"
> >> +		    : "+r" (addr));
[...]
> > I wonder whether we'd need the "memory" clobber. I don't see how this
> > would fail though, maybe later on with stack tagging if the compiler
> > writes tags behind our back.
> > 
> 
> As you said, I do not see how this can fail either. We can be overcautious
> though here and add a comment that the clobber has been added in prevision of
> stack tagging.

I don't think we should bother, it may not even matter.

> >> + */
> >> +SYM_FUNC_START(mte_assign_mem_tag_range)
> >> +	/* if (src == NULL) return; */
> >> +	cbz	x0, 2f
> >> +	/* if (size == 0) return; */
> >> +	cbz	x1, 2f
> > 
> > I find these checks unnecessary, as I said a couple of times before,
> > just document the function pre-conditions. They are also incomplete
> > (i.e. you check for NULL but not alignment).
> > 
> 
> I thought we agreed to harden the code further, based on [1]. Maybe I
> misunderstood. I am going to remove them and extend the comment in the next version.
> 
> [1]
> https://lore.kernel.org/linux-arm-kernel/921c4ed0-b5b5-bc01-5418-c52d80f1af59@arm.com/

Well, you concluded that but I haven't confirmed ;). Since it's called
from a single place which does the checks already, I don't see the point
in duplicating them. Documenting should be sufficient.
Vincenzo Frascino Sept. 25, 2020, 1:36 p.m. UTC | #4
On 9/25/20 1:50 PM, Catalin Marinas wrote:
>>>> + */
>>>> +SYM_FUNC_START(mte_assign_mem_tag_range)
>>>> +	/* if (src == NULL) return; */
>>>> +	cbz	x0, 2f
>>>> +	/* if (size == 0) return; */
>>>> +	cbz	x1, 2f
>>> I find these checks unnecessary, as I said a couple of times before,
>>> just document the function pre-conditions. They are also incomplete
>>> (i.e. you check for NULL but not alignment).
>>>
>> I thought we agreed to harden the code further, based on [1]. Maybe I
>> misunderstood. I am going to remove them and extend the comment in the next version.
>>
>> [1]
>> https://lore.kernel.org/linux-arm-kernel/921c4ed0-b5b5-bc01-5418-c52d80f1af59@arm.com/
> Well, you concluded that but I haven't confirmed ;). Since it's called
> from a single place which does the checks already, I don't see the point
> in duplicating them. Documenting should be sufficient.

Have you ever heard about "tacit consent"? ;) Anw, fine by me, I will add a
comment here.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 035003acfa87..bc0dc66a6a27 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -103,6 +103,7 @@ 
 #define ESR_ELx_FSC		(0x3F)
 #define ESR_ELx_FSC_TYPE	(0x3C)
 #define ESR_ELx_FSC_EXTABT	(0x10)
+#define ESR_ELx_FSC_MTE		(0x11)
 #define ESR_ELx_FSC_SERROR	(0x11)
 #define ESR_ELx_FSC_ACCESS	(0x08)
 #define ESR_ELx_FSC_FAULT	(0x04)
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
new file mode 100644
index 000000000000..b0f27de8de33
--- /dev/null
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -0,0 +1,60 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_MTE_ASM_H
+#define __ASM_MTE_ASM_H
+
+#include <asm/compiler.h>
+
+#define __MTE_PREAMBLE		ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
+
+#define MTE_GRANULE_SIZE	UL(16)
+#define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
+#define MTE_TAG_SHIFT		56
+#define MTE_TAG_SIZE		4
+#define MTE_TAG_MASK		GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
+#define MTE_TAG_MAX		(MTE_TAG_MASK >> MTE_TAG_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#ifdef CONFIG_ARM64_MTE
+
+static inline u8 mte_get_ptr_tag(void *ptr)
+{
+	u8 tag = (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
+
+	return tag;
+}
+
+u8 mte_get_mem_tag(void *addr);
+u8 mte_get_random_tag(void);
+void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
+
+#else /* CONFIG_ARM64_MTE */
+
+static inline u8 mte_get_ptr_tag(void *ptr)
+{
+	return 0xFF;
+}
+
+static inline u8 mte_get_mem_tag(void *addr)
+{
+	return 0xFF;
+}
+static inline u8 mte_get_random_tag(void)
+{
+	return 0xFF;
+}
+static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+{
+	return addr;
+}
+
+#endif /* CONFIG_ARM64_MTE */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_MTE_ASM_H  */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 1c99fcadb58c..3a2bf3ccb26c 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -5,14 +5,13 @@ 
 #ifndef __ASM_MTE_H
 #define __ASM_MTE_H
 
-#define MTE_GRANULE_SIZE	UL(16)
-#define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
-#define MTE_TAG_SHIFT		56
-#define MTE_TAG_SIZE		4
+#include <asm/mte-kasan.h>
 
 #ifndef __ASSEMBLY__
 
+#include <linux/bitfield.h>
 #include <linux/page-flags.h>
+#include <linux/types.h>
 
 #include <asm/pgtable-types.h>
 
@@ -45,7 +44,9 @@  long get_mte_ctrl(struct task_struct *task);
 int mte_ptrace_copy_tags(struct task_struct *child, long request,
 			 unsigned long addr, unsigned long data);
 
-#else
+void mte_assign_mem_tag_range(void *addr, size_t size);
+
+#else /* CONFIG_ARM64_MTE */
 
 /* unused if !CONFIG_ARM64_MTE, silence the compiler */
 #define PG_mte_tagged	0
@@ -80,7 +81,11 @@  static inline int mte_ptrace_copy_tags(struct task_struct *child,
 	return -EIO;
 }
 
-#endif
+static inline void mte_assign_mem_tag_range(void *addr, size_t size)
+{
+}
+
+#endif /* CONFIG_ARM64_MTE */
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_MTE_H  */
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 52a0638ed967..833b63fdd5e2 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -13,8 +13,10 @@ 
 #include <linux/swap.h>
 #include <linux/swapops.h>
 #include <linux/thread_info.h>
+#include <linux/types.h>
 #include <linux/uio.h>
 
+#include <asm/barrier.h>
 #include <asm/cpufeature.h>
 #include <asm/mte.h>
 #include <asm/ptrace.h>
@@ -72,6 +74,48 @@  int memcmp_pages(struct page *page1, struct page *page2)
 	return ret;
 }
 
+u8 mte_get_mem_tag(void *addr)
+{
+	if (!system_supports_mte())
+		return 0xFF;
+
+	asm volatile(__MTE_PREAMBLE "ldg %0, [%0]"
+		    : "+r" (addr));
+
+	return 0xF0 | mte_get_ptr_tag(addr);
+}
+
+u8 mte_get_random_tag(void)
+{
+	void *addr;
+
+	if (!system_supports_mte())
+		return 0xFF;
+
+	asm volatile(__MTE_PREAMBLE "irg %0, %0"
+		    : "+r" (addr));
+
+	return 0xF0 | mte_get_ptr_tag(addr);
+}
+
+void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+{
+	void *ptr = addr;
+
+	if ((!system_supports_mte()) || (size == 0))
+		return addr;
+
+	/* Make sure that size is aligned. */
+	WARN_ON(size & (MTE_GRANULE_SIZE - 1));
+
+	tag = 0xF0 | tag;
+	ptr = (void *)__tag_set(ptr, tag);
+
+	mte_assign_mem_tag_range(ptr, size);
+
+	return ptr;
+}
+
 static void update_sctlr_el1_tcf0(u64 tcf0)
 {
 	/* ISB required for the kernel uaccess routines */
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 03ca6d8b8670..aa0ab01252fe 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -149,3 +149,22 @@  SYM_FUNC_START(mte_restore_page_tags)
 
 	ret
 SYM_FUNC_END(mte_restore_page_tags)
+
+/*
+ * Assign allocation tags for a region of memory based on the pointer tag
+ *   x0 - source pointer
+ *   x1 - size
+ *
+ * Note: size must be non-zero and MTE_GRANULE_SIZE aligned
+ */
+SYM_FUNC_START(mte_assign_mem_tag_range)
+	/* if (src == NULL) return; */
+	cbz	x0, 2f
+	/* if (size == 0) return; */
+	cbz	x1, 2f
+1:	stg	x0, [x0]
+	add	x0, x0, #MTE_GRANULE_SIZE
+	subs	x1, x1, #MTE_GRANULE_SIZE
+	b.gt	1b
+2:	ret
+SYM_FUNC_END(mte_assign_mem_tag_range)