@@ -153,6 +153,7 @@ struct thread_struct {
#endif
#ifdef CONFIG_ARM64_MTE
u64 sctlr_tcf0;
+ u64 gcr_incl;
#endif
};
@@ -981,6 +981,13 @@
write_sysreg(__scs_new, sysreg); \
} while (0)
+#define sysreg_clear_set_s(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg_s(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg_s(__scs_new, sysreg); \
+} while (0)
+
#endif
#endif /* __ASM_SYSREG_H */
@@ -71,6 +71,25 @@ static void set_sctlr_el1_tcf0(u64 tcf0)
preempt_enable();
}
+static void update_gcr_el1_excl(u64 incl)
+{
+ u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
+
+ /*
+ * Note that 'incl' is an include mask (controlled by the user via
+ * prctl()) while GCR_EL1 accepts an exclude mask.
+ * No need for ISB since this only affects EL0 currently, implicit
+ * with ERET.
+ */
+ sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
+}
+
+static void set_gcr_el1_excl(u64 incl)
+{
+ current->thread.gcr_incl = incl;
+ update_gcr_el1_excl(incl);
+}
+
void flush_mte_state(void)
{
if (!system_supports_mte())
@@ -82,6 +101,8 @@ void flush_mte_state(void)
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
/* disable tag checking */
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
+ /* reset tag generation mask */
+ set_gcr_el1_excl(0);
}
void mte_thread_switch(struct task_struct *next)
@@ -92,6 +113,7 @@ void mte_thread_switch(struct task_struct *next)
/* avoid expensive SCTLR_EL1 accesses if no change */
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
+ update_gcr_el1_excl(next->thread.gcr_incl);
}
long set_mte_ctrl(unsigned long arg)
@@ -116,23 +138,30 @@ long set_mte_ctrl(unsigned long arg)
}
set_sctlr_el1_tcf0(tcf0);
+ set_gcr_el1_excl((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT);
return 0;
}
long get_mte_ctrl(void)
{
+ unsigned long ret;
+
if (!system_supports_mte())
return 0;
+ ret = current->thread.gcr_incl << PR_MTE_TAG_SHIFT;
+
switch (current->thread.sctlr_tcf0) {
case SCTLR_EL1_TCF0_NONE:
return PR_MTE_TCF_NONE;
case SCTLR_EL1_TCF0_SYNC:
- return PR_MTE_TCF_SYNC;
+ ret |= PR_MTE_TCF_SYNC;
+ break;
case SCTLR_EL1_TCF0_ASYNC:
- return PR_MTE_TCF_ASYNC;
+ ret |= PR_MTE_TCF_ASYNC;
+ break;
}
- return 0;
+ return ret;
}
@@ -601,7 +601,7 @@ long set_tagged_addr_ctrl(unsigned long arg)
return -EINVAL;
if (system_supports_mte())
- valid_mask |= PR_MTE_TCF_MASK;
+ valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
if (arg & ~valid_mask)
return -EINVAL;
@@ -239,6 +239,9 @@ struct prctl_mm_map {
# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
+/* MTE tag inclusion mask */
+# define PR_MTE_TAG_SHIFT 3
+# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
/* Control reclaim behavior when allocating memory */
#define PR_SET_IO_FLUSHER 57
The IRG, ADDG and SUBG instructions insert a random tag in the resulting address. Certain tags can be excluded via the GCR_EL1.Exclude bitmap when, for example, the user wants a certain colour for freed buffers. Since the GCR_EL1 register is not accessible at EL0, extend the prctl(PR_SET_TAGGED_ADDR_CTRL) interface to include a 16-bit field in the first argument for controlling which tags can be generated by the above instruction (an include rather than exclude mask). Note that by default all non-zero tags are excluded. This setting is per-thread. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> --- Notes: v2: - Switch from an exclude mask to an include one for the prctl() interface. - Reset the allowed tags mask during flush_thread(). arch/arm64/include/asm/processor.h | 1 + arch/arm64/include/asm/sysreg.h | 7 ++++++ arch/arm64/kernel/mte.c | 35 +++++++++++++++++++++++++++--- arch/arm64/kernel/process.c | 2 +- include/uapi/linux/prctl.h | 3 +++ 5 files changed, 44 insertions(+), 4 deletions(-)