@@ -8,6 +8,8 @@
#include <asm/types.h>
#include <asm/uaccess.h>
+struct kernel_clone_args;
+
static inline void gcsb_dsync(void)
{
asm volatile(".inst 0xd503227f" : : : "memory");
@@ -58,6 +60,8 @@ static inline bool task_gcs_el0_enabled(struct task_struct *task)
void gcs_set_el0_mode(struct task_struct *task);
void gcs_free(struct task_struct *task);
void gcs_preserve_current_state(void);
+unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
+ const struct kernel_clone_args *args);
#else
@@ -69,6 +73,11 @@ static inline bool task_gcs_el0_enabled(struct task_struct *task)
static inline void gcs_set_el0_mode(struct task_struct *task) { }
static inline void gcs_free(struct task_struct *task) { }
static inline void gcs_preserve_current_state(void) { }
+static inline unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
+ const struct kernel_clone_args *args)
+{
+ return -ENOTSUPP;
+}
#endif
@@ -20,6 +20,7 @@
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
+#include <asm/gcs.h>
#include <asm/proc-fns.h>
#include <asm/cputype.h>
#include <asm/sysreg.h>
@@ -311,6 +312,14 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
return por_el0_allows_pkey(vma_pkey(vma), write, execute);
}
+#define deactivate_mm deactivate_mm
+static inline void deactivate_mm(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ gcs_free(tsk);
+}
+
+
#include <asm-generic/mmu_context.h>
#endif /* !__ASSEMBLY__ */
@@ -294,9 +294,35 @@ static void flush_gcs(void)
write_sysreg_s(0, SYS_GCSPR_EL0);
}
+static int copy_thread_gcs(struct task_struct *p,
+ const struct kernel_clone_args *args)
+{
+ unsigned long gcs;
+
+ if (!system_supports_gcs())
+ return 0;
+
+ p->thread.gcs_base = 0;
+ p->thread.gcs_size = 0;
+
+ gcs = gcs_alloc_thread_stack(p, args);
+ if (IS_ERR_VALUE(gcs))
+ return PTR_ERR((void *)gcs);
+
+ p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
+ p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
+
+ return 0;
+}
+
#else
static void flush_gcs(void) { }
+static int copy_thread_gcs(struct task_struct *p,
+ const struct kernel_clone_args *args)
+{
+ return 0;
+}
#endif
@@ -313,6 +339,7 @@ void flush_thread(void)
void arch_release_task_struct(struct task_struct *tsk)
{
fpsimd_release_task(tsk);
+ gcs_free(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -376,6 +403,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
unsigned long stack_start = args->stack;
unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
+ int ret;
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
@@ -420,6 +448,10 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.uw.tp_value = tls;
p->thread.tpidr2_el0 = 0;
}
+
+ ret = copy_thread_gcs(p, args);
+ if (ret != 0)
+ return ret;
} else {
/*
* A kthread has no context to ERET to, so ensure any buggy
@@ -5,9 +5,69 @@
#include <linux/syscalls.h>
#include <linux/types.h>
+#include <asm/cmpxchg.h>
#include <asm/cpufeature.h>
+#include <asm/gcs.h>
#include <asm/page.h>
+static unsigned long alloc_gcs(unsigned long addr, unsigned long size)
+{
+ int flags = MAP_ANONYMOUS | MAP_PRIVATE;
+ struct mm_struct *mm = current->mm;
+ unsigned long mapped_addr, unused;
+
+ if (addr)
+ flags |= MAP_FIXED_NOREPLACE;
+
+ mmap_write_lock(mm);
+ mapped_addr = do_mmap(NULL, addr, size, PROT_READ, flags,
+ VM_SHADOW_STACK | VM_WRITE, 0, &unused, NULL);
+ mmap_write_unlock(mm);
+
+ return mapped_addr;
+}
+
+static unsigned long gcs_size(unsigned long size)
+{
+ if (size)
+ return PAGE_ALIGN(size);
+
+ /* Allocate RLIMIT_STACK/2 with limits of PAGE_SIZE..2G */
+ size = PAGE_ALIGN(min_t(unsigned long long,
+ rlimit(RLIMIT_STACK) / 2, SZ_2G));
+ return max(PAGE_SIZE, size);
+}
+
+unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
+ const struct kernel_clone_args *args)
+{
+ unsigned long addr, size;
+
+ if (!system_supports_gcs())
+ return 0;
+
+ if (!task_gcs_el0_enabled(tsk))
+ return 0;
+
+ if ((args->flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM) {
+ tsk->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
+ return 0;
+ }
+
+ size = args->stack_size / 2;
+
+ size = gcs_size(size);
+ addr = alloc_gcs(0, size);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ tsk->thread.gcs_base = addr;
+ tsk->thread.gcs_size = size;
+ tsk->thread.gcspr_el0 = addr + size - sizeof(u64);
+
+ return addr;
+}
+
/*
* Apply the GCS mode configured for the specified task to the
* hardware.
@@ -33,6 +93,15 @@ void gcs_free(struct task_struct *task)
if (!system_supports_gcs())
return;
+ /*
+ * When fork() with CLONE_VM fails, the child (tsk) already
+ * has a GCS allocated, and exit_thread() calls this function
+ * to free it. In this case the parent (current) and the
+ * child share the same mm struct.
+ */
+ if (!task->mm || task->mm != current->mm)
+ return;
+
if (task->thread.gcs_base)
vm_munmap(task->thread.gcs_base, task->thread.gcs_size);