diff mbox series

loongarch: Support RANDOMIZE_KSTACK_OFFSET

Message ID 20240718124739.3834489-1-ruanjinjie@huawei.com (mailing list archive)
State Superseded
Headers show
Series loongarch: Support RANDOMIZE_KSTACK_OFFSET | expand

Commit Message

Jinjie Ruan July 18, 2024, 12:47 p.m. UTC
Add support of kernel stack offset randomization while handling syscall,
the offset is defaultly limited by KSTACK_OFFSET_MAX().

In order to avoid trigger stack canaries (due to __builtin_alloca) and
slowing down the entry path, use __no_stack_protector attribute to
disable stack protector for do_syscall() at function level.

With this patch, the REPORT_STACK test show that:
	`loongarch64 bits of stack entropy: 7`

Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
 arch/loongarch/Kconfig          |  1 +
 arch/loongarch/kernel/syscall.c | 16 +++++++++++++++-
 2 files changed, 16 insertions(+), 1 deletion(-)

Comments

Huacai Chen July 19, 2024, 1:22 a.m. UTC | #1
Hi, Jinjie,

Thank you for your patch, but I think it is better to use drdtime()
instead of get_random_u16()? drdtime() is similar to rdtsc() of x86
which is defined in arch/loongarch/include/asm/loongarch.h.

Huacai

On Thu, Jul 18, 2024 at 8:42 PM Jinjie Ruan <ruanjinjie@huawei.com> wrote:
>
> Add support of kernel stack offset randomization while handling syscall,
> the offset is defaultly limited by KSTACK_OFFSET_MAX().
>
> In order to avoid trigger stack canaries (due to __builtin_alloca) and
> slowing down the entry path, use __no_stack_protector attribute to
> disable stack protector for do_syscall() at function level.
>
> With this patch, the REPORT_STACK test show that:
>         `loongarch64 bits of stack entropy: 7`
>
> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
> ---
>  arch/loongarch/Kconfig          |  1 +
>  arch/loongarch/kernel/syscall.c | 16 +++++++++++++++-
>  2 files changed, 16 insertions(+), 1 deletion(-)
>
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index ddc042895d01..fcf6451b4e38 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -106,6 +106,7 @@ config LOONGARCH
>         select HAVE_ARCH_KFENCE
>         select HAVE_ARCH_KGDB if PERF_EVENTS
>         select HAVE_ARCH_MMAP_RND_BITS if MMU
> +       select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
>         select HAVE_ARCH_SECCOMP
>         select HAVE_ARCH_SECCOMP_FILTER
>         select HAVE_ARCH_TRACEHOOK
> diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
> index ec17cd5163b7..a332c6cb76ec 100644
> --- a/arch/loongarch/kernel/syscall.c
> +++ b/arch/loongarch/kernel/syscall.c
> @@ -9,6 +9,7 @@
>  #include <linux/entry-common.h>
>  #include <linux/errno.h>
>  #include <linux/linkage.h>
> +#include <linux/randomize_kstack.h>
>  #include <linux/syscalls.h>
>  #include <linux/unistd.h>
>
> @@ -39,7 +40,7 @@ void *sys_call_table[__NR_syscalls] = {
>  typedef long (*sys_call_fn)(unsigned long, unsigned long,
>         unsigned long, unsigned long, unsigned long, unsigned long);
>
> -void noinstr do_syscall(struct pt_regs *regs)
> +__no_stack_protector void noinstr do_syscall(struct pt_regs *regs)
>  {
>         unsigned long nr;
>         sys_call_fn syscall_fn;
> @@ -55,11 +56,24 @@ void noinstr do_syscall(struct pt_regs *regs)
>
>         nr = syscall_enter_from_user_mode(regs, nr);
>
> +       add_random_kstack_offset();
> +
>         if (nr < NR_syscalls) {
>                 syscall_fn = sys_call_table[nr];
>                 regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
>                                            regs->regs[7], regs->regs[8], regs->regs[9]);
>         }
>
> +       /*
> +        * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
> +        * bits. The actual entropy will be further reduced by the compiler
> +        * when applying stack alignment constraints: 16-byte (i.e. 4-bit)
> +        * aligned, which will remove the 4 low bits from any entropy chosen
> +        * here.
> +        *
> +        * The resulting 6 bits of entropy is seen in SP[9:4].
> +        */
> +       choose_random_kstack_offset(get_random_u16());
> +
>         syscall_exit_to_user_mode(regs);
>  }
> --
> 2.34.1
>
>
Jinjie Ruan July 19, 2024, 2:05 a.m. UTC | #2
On 2024/7/19 9:22, Huacai Chen wrote:
> Hi, Jinjie,
> 
> Thank you for your patch, but I think it is better to use drdtime()
> instead of get_random_u16()? drdtime() is similar to rdtsc() of x86
> which is defined in arch/loongarch/include/asm/loongarch.h.

Thank you, you are right.

"DRDTIME" can access independent stable counter of the processor core
according to "Loongson Processor 3C5000L Register Instruction Manual",
which is similar to "RDTSC" in x86.

I'll update it in next version.

> 
> Huacai
> 
> On Thu, Jul 18, 2024 at 8:42 PM Jinjie Ruan <ruanjinjie@huawei.com> wrote:
>>
>> Add support of kernel stack offset randomization while handling syscall,
>> the offset is defaultly limited by KSTACK_OFFSET_MAX().
>>
>> In order to avoid trigger stack canaries (due to __builtin_alloca) and
>> slowing down the entry path, use __no_stack_protector attribute to
>> disable stack protector for do_syscall() at function level.
>>
>> With this patch, the REPORT_STACK test show that:
>>         `loongarch64 bits of stack entropy: 7`
>>
>> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
>> ---
>>  arch/loongarch/Kconfig          |  1 +
>>  arch/loongarch/kernel/syscall.c | 16 +++++++++++++++-
>>  2 files changed, 16 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>> index ddc042895d01..fcf6451b4e38 100644
>> --- a/arch/loongarch/Kconfig
>> +++ b/arch/loongarch/Kconfig
>> @@ -106,6 +106,7 @@ config LOONGARCH
>>         select HAVE_ARCH_KFENCE
>>         select HAVE_ARCH_KGDB if PERF_EVENTS
>>         select HAVE_ARCH_MMAP_RND_BITS if MMU
>> +       select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
>>         select HAVE_ARCH_SECCOMP
>>         select HAVE_ARCH_SECCOMP_FILTER
>>         select HAVE_ARCH_TRACEHOOK
>> diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
>> index ec17cd5163b7..a332c6cb76ec 100644
>> --- a/arch/loongarch/kernel/syscall.c
>> +++ b/arch/loongarch/kernel/syscall.c
>> @@ -9,6 +9,7 @@
>>  #include <linux/entry-common.h>
>>  #include <linux/errno.h>
>>  #include <linux/linkage.h>
>> +#include <linux/randomize_kstack.h>
>>  #include <linux/syscalls.h>
>>  #include <linux/unistd.h>
>>
>> @@ -39,7 +40,7 @@ void *sys_call_table[__NR_syscalls] = {
>>  typedef long (*sys_call_fn)(unsigned long, unsigned long,
>>         unsigned long, unsigned long, unsigned long, unsigned long);
>>
>> -void noinstr do_syscall(struct pt_regs *regs)
>> +__no_stack_protector void noinstr do_syscall(struct pt_regs *regs)
>>  {
>>         unsigned long nr;
>>         sys_call_fn syscall_fn;
>> @@ -55,11 +56,24 @@ void noinstr do_syscall(struct pt_regs *regs)
>>
>>         nr = syscall_enter_from_user_mode(regs, nr);
>>
>> +       add_random_kstack_offset();
>> +
>>         if (nr < NR_syscalls) {
>>                 syscall_fn = sys_call_table[nr];
>>                 regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
>>                                            regs->regs[7], regs->regs[8], regs->regs[9]);
>>         }
>>
>> +       /*
>> +        * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
>> +        * bits. The actual entropy will be further reduced by the compiler
>> +        * when applying stack alignment constraints: 16-byte (i.e. 4-bit)
>> +        * aligned, which will remove the 4 low bits from any entropy chosen
>> +        * here.
>> +        *
>> +        * The resulting 6 bits of entropy is seen in SP[9:4].
>> +        */
>> +       choose_random_kstack_offset(get_random_u16());
>> +
>>         syscall_exit_to_user_mode(regs);
>>  }
>> --
>> 2.34.1
>>
>>
diff mbox series

Patch

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index ddc042895d01..fcf6451b4e38 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -106,6 +106,7 @@  config LOONGARCH
 	select HAVE_ARCH_KFENCE
 	select HAVE_ARCH_KGDB if PERF_EVENTS
 	select HAVE_ARCH_MMAP_RND_BITS if MMU
+	select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
 	select HAVE_ARCH_SECCOMP
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index ec17cd5163b7..a332c6cb76ec 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -9,6 +9,7 @@ 
 #include <linux/entry-common.h>
 #include <linux/errno.h>
 #include <linux/linkage.h>
+#include <linux/randomize_kstack.h>
 #include <linux/syscalls.h>
 #include <linux/unistd.h>
 
@@ -39,7 +40,7 @@  void *sys_call_table[__NR_syscalls] = {
 typedef long (*sys_call_fn)(unsigned long, unsigned long,
 	unsigned long, unsigned long, unsigned long, unsigned long);
 
-void noinstr do_syscall(struct pt_regs *regs)
+__no_stack_protector void noinstr do_syscall(struct pt_regs *regs)
 {
 	unsigned long nr;
 	sys_call_fn syscall_fn;
@@ -55,11 +56,24 @@  void noinstr do_syscall(struct pt_regs *regs)
 
 	nr = syscall_enter_from_user_mode(regs, nr);
 
+	add_random_kstack_offset();
+
 	if (nr < NR_syscalls) {
 		syscall_fn = sys_call_table[nr];
 		regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
 					   regs->regs[7], regs->regs[8], regs->regs[9]);
 	}
 
+	/*
+	 * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
+	 * bits. The actual entropy will be further reduced by the compiler
+	 * when applying stack alignment constraints: 16-byte (i.e. 4-bit)
+	 * aligned, which will remove the 4 low bits from any entropy chosen
+	 * here.
+	 *
+	 * The resulting 6 bits of entropy is seen in SP[9:4].
+	 */
+	choose_random_kstack_offset(get_random_u16());
+
 	syscall_exit_to_user_mode(regs);
 }