diff mbox series

[v5,02/40] arm64: mte: Add in-kernel MTE helpers

Message ID 94dfda607f7f7a28a5df9ee68703922aa9a52a1e.1602535397.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series kasan: add hardware tag-based mode for arm64 | expand

Commit Message

Andrey Konovalov Oct. 12, 2020, 8:44 p.m. UTC
From: Vincenzo Frascino <vincenzo.frascino@arm.com>

Provide helper functions to manipulate allocation and pointer tags for
kernel addresses.

Low-level helper functions (mte_assign_*, written in assembly) operate
tag values from the [0x0, 0xF] range. High-level helper functions
(mte_get/set_*) use the [0xF0, 0xFF] range to preserve compatibility
with normal kernel pointers that have 0xFF in their top byte.

MTE_GRANULE_SIZE and related definitions are moved to mte-def.h header
that doesn't have any dependencies and is safe to include into any
low-level header.

Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Co-developed-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
---
Change-Id: I1b5230254f90dc21a913447cb17f07fea7944ece
---
 arch/arm64/include/asm/esr.h       |  1 +
 arch/arm64/include/asm/mte-def.h   | 15 ++++++++
 arch/arm64/include/asm/mte-kasan.h | 56 ++++++++++++++++++++++++++++++
 arch/arm64/include/asm/mte.h       | 20 +++++++----
 arch/arm64/kernel/mte.c            | 48 +++++++++++++++++++++++++
 arch/arm64/lib/mte.S               | 16 +++++++++
 6 files changed, 150 insertions(+), 6 deletions(-)
 create mode 100644 arch/arm64/include/asm/mte-def.h
 create mode 100644 arch/arm64/include/asm/mte-kasan.h

Comments

Dmitry Vyukov Oct. 28, 2020, 11:28 a.m. UTC | #1
On Mon, Oct 12, 2020 at 10:44 PM Andrey Konovalov <andreyknvl@google.com> wrote:
>
> From: Vincenzo Frascino <vincenzo.frascino@arm.com>
>
> Provide helper functions to manipulate allocation and pointer tags for
> kernel addresses.
>
> Low-level helper functions (mte_assign_*, written in assembly) operate
> tag values from the [0x0, 0xF] range. High-level helper functions
> (mte_get/set_*) use the [0xF0, 0xFF] range to preserve compatibility
> with normal kernel pointers that have 0xFF in their top byte.
>
> MTE_GRANULE_SIZE and related definitions are moved to mte-def.h header
> that doesn't have any dependencies and is safe to include into any
> low-level header.
>
> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Co-developed-by: Andrey Konovalov <andreyknvl@google.com>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
> ---
> Change-Id: I1b5230254f90dc21a913447cb17f07fea7944ece
> ---
>  arch/arm64/include/asm/esr.h       |  1 +
>  arch/arm64/include/asm/mte-def.h   | 15 ++++++++
>  arch/arm64/include/asm/mte-kasan.h | 56 ++++++++++++++++++++++++++++++
>  arch/arm64/include/asm/mte.h       | 20 +++++++----
>  arch/arm64/kernel/mte.c            | 48 +++++++++++++++++++++++++
>  arch/arm64/lib/mte.S               | 16 +++++++++
>  6 files changed, 150 insertions(+), 6 deletions(-)
>  create mode 100644 arch/arm64/include/asm/mte-def.h
>  create mode 100644 arch/arm64/include/asm/mte-kasan.h
>
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index 035003acfa87..bc0dc66a6a27 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -103,6 +103,7 @@
>  #define ESR_ELx_FSC            (0x3F)
>  #define ESR_ELx_FSC_TYPE       (0x3C)
>  #define ESR_ELx_FSC_EXTABT     (0x10)
> +#define ESR_ELx_FSC_MTE                (0x11)
>  #define ESR_ELx_FSC_SERROR     (0x11)
>  #define ESR_ELx_FSC_ACCESS     (0x08)
>  #define ESR_ELx_FSC_FAULT      (0x04)
> diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h
> new file mode 100644
> index 000000000000..8401ac5840c7
> --- /dev/null
> +++ b/arch/arm64/include/asm/mte-def.h
> @@ -0,0 +1,15 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020 ARM Ltd.
> + */
> +#ifndef __ASM_MTE_DEF_H
> +#define __ASM_MTE_DEF_H
> +
> +#define MTE_GRANULE_SIZE       UL(16)
> +#define MTE_GRANULE_MASK       (~(MTE_GRANULE_SIZE - 1))
> +#define MTE_TAG_SHIFT          56
> +#define MTE_TAG_SIZE           4
> +#define MTE_TAG_MASK           GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
> +#define MTE_TAG_MAX            (MTE_TAG_MASK >> MTE_TAG_SHIFT)
> +
> +#endif /* __ASM_MTE_DEF_H  */
> diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
> new file mode 100644
> index 000000000000..3a70fb1807fd
> --- /dev/null
> +++ b/arch/arm64/include/asm/mte-kasan.h
> @@ -0,0 +1,56 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020 ARM Ltd.
> + */
> +#ifndef __ASM_MTE_KASAN_H
> +#define __ASM_MTE_KASAN_H
> +
> +#include <asm/mte-def.h>
> +
> +#ifndef __ASSEMBLY__
> +
> +#include <linux/types.h>
> +
> +/*
> + * The functions below are meant to be used only for the
> + * KASAN_HW_TAGS interface defined in asm/memory.h.
> + */
> +#ifdef CONFIG_ARM64_MTE
> +
> +static inline u8 mte_get_ptr_tag(void *ptr)
> +{
> +       /* Note: The format of KASAN tags is 0xF<x> */
> +       u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
> +
> +       return tag;
> +}
> +
> +u8 mte_get_mem_tag(void *addr);
> +u8 mte_get_random_tag(void);
> +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
> +
> +#else /* CONFIG_ARM64_MTE */
> +
> +static inline u8 mte_get_ptr_tag(void *ptr)
> +{
> +       return 0xFF;
> +}
> +
> +static inline u8 mte_get_mem_tag(void *addr)
> +{
> +       return 0xFF;
> +}
> +static inline u8 mte_get_random_tag(void)
> +{
> +       return 0xFF;
> +}
> +static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> +{
> +       return addr;
> +}
> +
> +#endif /* CONFIG_ARM64_MTE */
> +
> +#endif /* __ASSEMBLY__ */
> +
> +#endif /* __ASM_MTE_KASAN_H  */
> diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
> index 1c99fcadb58c..cf1cd181dcb2 100644
> --- a/arch/arm64/include/asm/mte.h
> +++ b/arch/arm64/include/asm/mte.h
> @@ -5,14 +5,16 @@
>  #ifndef __ASM_MTE_H
>  #define __ASM_MTE_H
>
> -#define MTE_GRANULE_SIZE       UL(16)
> -#define MTE_GRANULE_MASK       (~(MTE_GRANULE_SIZE - 1))
> -#define MTE_TAG_SHIFT          56
> -#define MTE_TAG_SIZE           4
> +#include <asm/compiler.h>
> +#include <asm/mte-def.h>
> +
> +#define __MTE_PREAMBLE         ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
>
>  #ifndef __ASSEMBLY__
>
> +#include <linux/bitfield.h>
>  #include <linux/page-flags.h>
> +#include <linux/types.h>
>
>  #include <asm/pgtable-types.h>
>
> @@ -45,7 +47,9 @@ long get_mte_ctrl(struct task_struct *task);
>  int mte_ptrace_copy_tags(struct task_struct *child, long request,
>                          unsigned long addr, unsigned long data);
>
> -#else
> +void mte_assign_mem_tag_range(void *addr, size_t size);
> +
> +#else /* CONFIG_ARM64_MTE */
>
>  /* unused if !CONFIG_ARM64_MTE, silence the compiler */
>  #define PG_mte_tagged  0
> @@ -80,7 +84,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
>         return -EIO;
>  }
>
> -#endif
> +static inline void mte_assign_mem_tag_range(void *addr, size_t size)
> +{
> +}
> +
> +#endif /* CONFIG_ARM64_MTE */
>
>  #endif /* __ASSEMBLY__ */
>  #endif /* __ASM_MTE_H  */
> diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
> index 52a0638ed967..8f99c65837fd 100644
> --- a/arch/arm64/kernel/mte.c
> +++ b/arch/arm64/kernel/mte.c
> @@ -13,10 +13,13 @@
>  #include <linux/swap.h>
>  #include <linux/swapops.h>
>  #include <linux/thread_info.h>
> +#include <linux/types.h>
>  #include <linux/uio.h>
>
> +#include <asm/barrier.h>
>  #include <asm/cpufeature.h>
>  #include <asm/mte.h>
> +#include <asm/mte-kasan.h>
>  #include <asm/ptrace.h>
>  #include <asm/sysreg.h>
>
> @@ -72,6 +75,51 @@ int memcmp_pages(struct page *page1, struct page *page2)
>         return ret;
>  }
>
> +u8 mte_get_mem_tag(void *addr)
> +{
> +       if (!system_supports_mte())
> +               return 0xFF;
> +
> +       asm(__MTE_PREAMBLE "ldg %0, [%0]"
> +           : "+r" (addr));
> +
> +       return mte_get_ptr_tag(addr);
> +}
> +
> +u8 mte_get_random_tag(void)
> +{
> +       void *addr;
> +
> +       if (!system_supports_mte())
> +               return 0xFF;
> +
> +       asm(__MTE_PREAMBLE "irg %0, %0"
> +           : "+r" (addr));
> +
> +       return mte_get_ptr_tag(addr);
> +}
> +
> +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> +{
> +       void *ptr = addr;
> +
> +       if ((!system_supports_mte()) || (size == 0))
> +               return addr;
> +
> +       /* Make sure that size is MTE granule aligned. */
> +       WARN_ON(size & (MTE_GRANULE_SIZE - 1));
> +
> +       /* Make sure that the address is MTE granule aligned. */
> +       WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
> +
> +       tag = 0xF0 | tag;
> +       ptr = (void *)__tag_set(ptr, tag);
> +
> +       mte_assign_mem_tag_range(ptr, size);

This function will be called on production hot paths. I think it makes
sense to shave off some overheads here.

The additional debug checks may be useful, so maybe we need an
additional debug mode (debug of MTE/KASAN itself)?

Do we ever call this when !system_supports_mte()? I think we wanted to
have static_if's higher up the stack. Having additional checks
scattered across lower-level functions is overhead for every
malloc/free.

Looking at how this is called from KASAN code.
KASAN code already ensures addr/size are properly aligned. I think we
should either remove the duplicate alignment checks, or do them only
in the additional debugging mode.
Does KASAN also ensure proper tag value (0xF0 mask)?

KASAN wrapper is inlined in this patch:
https://linux-review.googlesource.com/c/linux/kernel/git/torvalds/linux/+/3699
but here we still have 2 non-inlined calls. The
mte_assign_mem_tag_range is kinda inherent since it's in .S. But then
I think this wrapper should be inlinable.

Also, can we move mte_assign_mem_tag_range into inline asm in the
header? This would avoid register spills around the call in
malloc/free.

The asm code seems to do the rounding of the size up at no additional
cost (checks remaining size > 0, right?). I think it makes sense to
document that as the contract and remove the additional round_up(size,
KASAN_GRANULE_SIZE) in KASAN code.



> +       return ptr;
> +}
> +
>  static void update_sctlr_el1_tcf0(u64 tcf0)
>  {
>         /* ISB required for the kernel uaccess routines */
> diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
> index 03ca6d8b8670..ede1ea65428c 100644
> --- a/arch/arm64/lib/mte.S
> +++ b/arch/arm64/lib/mte.S
> @@ -149,3 +149,19 @@ SYM_FUNC_START(mte_restore_page_tags)
>
>         ret
>  SYM_FUNC_END(mte_restore_page_tags)
> +
> +/*
> + * Assign allocation tags for a region of memory based on the pointer tag
> + *   x0 - source pointer
> + *   x1 - size
> + *
> + * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
> + * size must be non-zero and MTE_GRANULE_SIZE aligned.
> + */
> +SYM_FUNC_START(mte_assign_mem_tag_range)
> +1:     stg     x0, [x0]
> +       add     x0, x0, #MTE_GRANULE_SIZE
> +       subs    x1, x1, #MTE_GRANULE_SIZE
> +       b.gt    1b
> +       ret
> +SYM_FUNC_END(mte_assign_mem_tag_range)
> --
> 2.28.0.1011.ga647a8990f-goog
>
Andrey Konovalov Oct. 29, 2020, 4:50 p.m. UTC | #2
On Wed, Oct 28, 2020 at 12:28 PM Dmitry Vyukov <dvyukov@google.com> wrote:
>

[...]

> > +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
> > +{
> > +       void *ptr = addr;
> > +
> > +       if ((!system_supports_mte()) || (size == 0))
> > +               return addr;
> > +
> > +       /* Make sure that size is MTE granule aligned. */
> > +       WARN_ON(size & (MTE_GRANULE_SIZE - 1));
> > +
> > +       /* Make sure that the address is MTE granule aligned. */
> > +       WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
> > +
> > +       tag = 0xF0 | tag;
> > +       ptr = (void *)__tag_set(ptr, tag);
> > +
> > +       mte_assign_mem_tag_range(ptr, size);
>
> This function will be called on production hot paths. I think it makes
> sense to shave off some overheads here.
>
> The additional debug checks may be useful, so maybe we need an
> additional debug mode (debug of MTE/KASAN itself)?
>
> Do we ever call this when !system_supports_mte()? I think we wanted to
> have static_if's higher up the stack. Having additional checks
> scattered across lower-level functions is overhead for every
> malloc/free.
>
> Looking at how this is called from KASAN code.
> KASAN code already ensures addr/size are properly aligned. I think we
> should either remove the duplicate alignment checks, or do them only
> in the additional debugging mode.
> Does KASAN also ensure proper tag value (0xF0 mask)?
>
> KASAN wrapper is inlined in this patch:
> https://linux-review.googlesource.com/c/linux/kernel/git/torvalds/linux/+/3699
> but here we still have 2 non-inlined calls. The
> mte_assign_mem_tag_range is kinda inherent since it's in .S. But then
> I think this wrapper should be inlinable.
>
> Also, can we move mte_assign_mem_tag_range into inline asm in the
> header? This would avoid register spills around the call in
> malloc/free.
>
> The asm code seems to do the rounding of the size up at no additional
> cost (checks remaining size > 0, right?). I think it makes sense to
> document that as the contract and remove the additional round_up(size,
> KASAN_GRANULE_SIZE) in KASAN code.

These are all valid concerns. It would be great to have inline asm
mte_assign_mem_tag_range() implementation. We can also call it
directly from KASAN code without all these additional checks.

Perhaps it makes sense to include this change into the other series
that adds the production mode. And then squash if we decide to put
both changes into a single one.

Vincenzo, could you write a patch that adds inline asm
mte_assign_mem_tag_range() implementation?
Vincenzo Frascino Nov. 4, 2020, 5:49 p.m. UTC | #3
Hi Andrey/Dmitry,

sorry I missed this one.

On 10/29/20 4:50 PM, Andrey Konovalov wrote:
> On Wed, Oct 28, 2020 at 12:28 PM Dmitry Vyukov <dvyukov@google.com> wrote:
>>
> 
> [...]
> 
>>> +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
>>> +{
>>> +       void *ptr = addr;
>>> +
>>> +       if ((!system_supports_mte()) || (size == 0))
>>> +               return addr;
>>> +
>>> +       /* Make sure that size is MTE granule aligned. */
>>> +       WARN_ON(size & (MTE_GRANULE_SIZE - 1));
>>> +
>>> +       /* Make sure that the address is MTE granule aligned. */
>>> +       WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
>>> +
>>> +       tag = 0xF0 | tag;
>>> +       ptr = (void *)__tag_set(ptr, tag);
>>> +
>>> +       mte_assign_mem_tag_range(ptr, size);
>>
>> This function will be called on production hot paths. I think it makes
>> sense to shave off some overheads here.
>>
>> The additional debug checks may be useful, so maybe we need an
>> additional debug mode (debug of MTE/KASAN itself)?
>>
>> Do we ever call this when !system_supports_mte()? I think we wanted to
>> have static_if's higher up the stack. Having additional checks
>> scattered across lower-level functions is overhead for every
>> malloc/free.
>>
>> Looking at how this is called from KASAN code.
>> KASAN code already ensures addr/size are properly aligned. I think we
>> should either remove the duplicate alignment checks, or do them only
>> in the additional debugging mode.
>> Does KASAN also ensure proper tag value (0xF0 mask)?
>>
>> KASAN wrapper is inlined in this patch:
>> https://linux-review.googlesource.com/c/linux/kernel/git/torvalds/linux/+/3699
>> but here we still have 2 non-inlined calls. The
>> mte_assign_mem_tag_range is kinda inherent since it's in .S. But then
>> I think this wrapper should be inlinable.
>>
>> Also, can we move mte_assign_mem_tag_range into inline asm in the
>> header? This would avoid register spills around the call in
>> malloc/free.
>>
>> The asm code seems to do the rounding of the size up at no additional
>> cost (checks remaining size > 0, right?). I think it makes sense to
>> document that as the contract and remove the additional round_up(size,
>> KASAN_GRANULE_SIZE) in KASAN code.
> 
> These are all valid concerns. It would be great to have inline asm
> mte_assign_mem_tag_range() implementation. We can also call it
> directly from KASAN code without all these additional checks.
> 
> Perhaps it makes sense to include this change into the other series
> that adds the production mode. And then squash if we decide to put
> both changes into a single one.
> 
> Vincenzo, could you write a patch that adds inline asm
> mte_assign_mem_tag_range() implementation?
> 

As Andrey said those are valid concerns, this function was originally thought
for the debugging version of kasan, but since we are planning to use it in
production the inline optimization sounds a good approach.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 035003acfa87..bc0dc66a6a27 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -103,6 +103,7 @@ 
 #define ESR_ELx_FSC		(0x3F)
 #define ESR_ELx_FSC_TYPE	(0x3C)
 #define ESR_ELx_FSC_EXTABT	(0x10)
+#define ESR_ELx_FSC_MTE		(0x11)
 #define ESR_ELx_FSC_SERROR	(0x11)
 #define ESR_ELx_FSC_ACCESS	(0x08)
 #define ESR_ELx_FSC_FAULT	(0x04)
diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h
new file mode 100644
index 000000000000..8401ac5840c7
--- /dev/null
+++ b/arch/arm64/include/asm/mte-def.h
@@ -0,0 +1,15 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_MTE_DEF_H
+#define __ASM_MTE_DEF_H
+
+#define MTE_GRANULE_SIZE	UL(16)
+#define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
+#define MTE_TAG_SHIFT		56
+#define MTE_TAG_SIZE		4
+#define MTE_TAG_MASK		GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
+#define MTE_TAG_MAX		(MTE_TAG_MASK >> MTE_TAG_SHIFT)
+
+#endif /* __ASM_MTE_DEF_H  */
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
new file mode 100644
index 000000000000..3a70fb1807fd
--- /dev/null
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -0,0 +1,56 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_MTE_KASAN_H
+#define __ASM_MTE_KASAN_H
+
+#include <asm/mte-def.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * The functions below are meant to be used only for the
+ * KASAN_HW_TAGS interface defined in asm/memory.h.
+ */
+#ifdef CONFIG_ARM64_MTE
+
+static inline u8 mte_get_ptr_tag(void *ptr)
+{
+	/* Note: The format of KASAN tags is 0xF<x> */
+	u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
+
+	return tag;
+}
+
+u8 mte_get_mem_tag(void *addr);
+u8 mte_get_random_tag(void);
+void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
+
+#else /* CONFIG_ARM64_MTE */
+
+static inline u8 mte_get_ptr_tag(void *ptr)
+{
+	return 0xFF;
+}
+
+static inline u8 mte_get_mem_tag(void *addr)
+{
+	return 0xFF;
+}
+static inline u8 mte_get_random_tag(void)
+{
+	return 0xFF;
+}
+static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+{
+	return addr;
+}
+
+#endif /* CONFIG_ARM64_MTE */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_MTE_KASAN_H  */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 1c99fcadb58c..cf1cd181dcb2 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -5,14 +5,16 @@ 
 #ifndef __ASM_MTE_H
 #define __ASM_MTE_H
 
-#define MTE_GRANULE_SIZE	UL(16)
-#define MTE_GRANULE_MASK	(~(MTE_GRANULE_SIZE - 1))
-#define MTE_TAG_SHIFT		56
-#define MTE_TAG_SIZE		4
+#include <asm/compiler.h>
+#include <asm/mte-def.h>
+
+#define __MTE_PREAMBLE		ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
 
 #ifndef __ASSEMBLY__
 
+#include <linux/bitfield.h>
 #include <linux/page-flags.h>
+#include <linux/types.h>
 
 #include <asm/pgtable-types.h>
 
@@ -45,7 +47,9 @@  long get_mte_ctrl(struct task_struct *task);
 int mte_ptrace_copy_tags(struct task_struct *child, long request,
 			 unsigned long addr, unsigned long data);
 
-#else
+void mte_assign_mem_tag_range(void *addr, size_t size);
+
+#else /* CONFIG_ARM64_MTE */
 
 /* unused if !CONFIG_ARM64_MTE, silence the compiler */
 #define PG_mte_tagged	0
@@ -80,7 +84,11 @@  static inline int mte_ptrace_copy_tags(struct task_struct *child,
 	return -EIO;
 }
 
-#endif
+static inline void mte_assign_mem_tag_range(void *addr, size_t size)
+{
+}
+
+#endif /* CONFIG_ARM64_MTE */
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_MTE_H  */
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 52a0638ed967..8f99c65837fd 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -13,10 +13,13 @@ 
 #include <linux/swap.h>
 #include <linux/swapops.h>
 #include <linux/thread_info.h>
+#include <linux/types.h>
 #include <linux/uio.h>
 
+#include <asm/barrier.h>
 #include <asm/cpufeature.h>
 #include <asm/mte.h>
+#include <asm/mte-kasan.h>
 #include <asm/ptrace.h>
 #include <asm/sysreg.h>
 
@@ -72,6 +75,51 @@  int memcmp_pages(struct page *page1, struct page *page2)
 	return ret;
 }
 
+u8 mte_get_mem_tag(void *addr)
+{
+	if (!system_supports_mte())
+		return 0xFF;
+
+	asm(__MTE_PREAMBLE "ldg %0, [%0]"
+	    : "+r" (addr));
+
+	return mte_get_ptr_tag(addr);
+}
+
+u8 mte_get_random_tag(void)
+{
+	void *addr;
+
+	if (!system_supports_mte())
+		return 0xFF;
+
+	asm(__MTE_PREAMBLE "irg %0, %0"
+	    : "+r" (addr));
+
+	return mte_get_ptr_tag(addr);
+}
+
+void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+{
+	void *ptr = addr;
+
+	if ((!system_supports_mte()) || (size == 0))
+		return addr;
+
+	/* Make sure that size is MTE granule aligned. */
+	WARN_ON(size & (MTE_GRANULE_SIZE - 1));
+
+	/* Make sure that the address is MTE granule aligned. */
+	WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
+
+	tag = 0xF0 | tag;
+	ptr = (void *)__tag_set(ptr, tag);
+
+	mte_assign_mem_tag_range(ptr, size);
+
+	return ptr;
+}
+
 static void update_sctlr_el1_tcf0(u64 tcf0)
 {
 	/* ISB required for the kernel uaccess routines */
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 03ca6d8b8670..ede1ea65428c 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -149,3 +149,19 @@  SYM_FUNC_START(mte_restore_page_tags)
 
 	ret
 SYM_FUNC_END(mte_restore_page_tags)
+
+/*
+ * Assign allocation tags for a region of memory based on the pointer tag
+ *   x0 - source pointer
+ *   x1 - size
+ *
+ * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
+ * size must be non-zero and MTE_GRANULE_SIZE aligned.
+ */
+SYM_FUNC_START(mte_assign_mem_tag_range)
+1:	stg	x0, [x0]
+	add	x0, x0, #MTE_GRANULE_SIZE
+	subs	x1, x1, #MTE_GRANULE_SIZE
+	b.gt	1b
+	ret
+SYM_FUNC_END(mte_assign_mem_tag_range)