Message ID | 20231004151405.521596-3-cleger@rivosinc.com (mailing list archive) |
---|---|
State | Accepted |
Commit | 7c83232161f609bbc452a1255f823f41afc411dd |
Headers | show |
Series | Add support to handle misaligned accesses in S-mode | expand |
Clément Léger <cleger@rivosinc.com> writes: > Misalignment trap handling is only supported for M-mode and uses direct > accesses to user memory. In S-mode, when handling usermode fault, this > requires to use the get_user()/put_user() accessors. Implement > load_u8(), store_u8() and get_insn() using these accessors for > userspace and direct text access for kernel. > > Signed-off-by: Clément Léger <cleger@rivosinc.com> > --- > arch/riscv/Kconfig | 8 ++ > arch/riscv/include/asm/entry-common.h | 14 +++ > arch/riscv/kernel/Makefile | 2 +- > arch/riscv/kernel/traps.c | 9 -- > arch/riscv/kernel/traps_misaligned.c | 119 +++++++++++++++++++++++--- > 5 files changed, 129 insertions(+), 23 deletions(-) > > diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig > index d607ab0f7c6d..6e167358a897 100644 > --- a/arch/riscv/Kconfig > +++ b/arch/riscv/Kconfig > @@ -636,6 +636,14 @@ config THREAD_SIZE_ORDER > Specify the Pages of thread stack size (from 4KB to 64KB), which also > affects irq stack size, which is equal to thread stack size. > > +config RISCV_MISALIGNED > + bool "Support misaligned load/store traps for kernel and userspace" > + default y > + help > + Say Y here if you want the kernel to embed support for misaligned > + load/store for both kernel and userspace. When disable, misaligned > + accesses will generate SIGBUS in userspace and panic in kernel. > + > endmenu # "Platform type" > > menu "Kernel features" > diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h > index 6e4dee49d84b..7ab5e34318c8 100644 > --- a/arch/riscv/include/asm/entry-common.h > +++ b/arch/riscv/include/asm/entry-common.h > @@ -8,4 +8,18 @@ > void handle_page_fault(struct pt_regs *regs); > void handle_break(struct pt_regs *regs); > > +#ifdef CONFIG_RISCV_MISALIGNED > +int handle_misaligned_load(struct pt_regs *regs); > +int handle_misaligned_store(struct pt_regs *regs); > +#else > +static inline int handle_misaligned_load(struct pt_regs *regs) > +{ > + return -1; > +} > +static inline int handle_misaligned_store(struct pt_regs *regs) > +{ > + return -1; > +} > +#endif > + > #endif /* _ASM_RISCV_ENTRY_COMMON_H */ > diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile > index 95cf25d48405..0d874fb24b51 100644 > --- a/arch/riscv/kernel/Makefile > +++ b/arch/riscv/kernel/Makefile > @@ -59,7 +59,7 @@ obj-y += patch.o > obj-y += probes/ > obj-$(CONFIG_MMU) += vdso.o vdso/ > > -obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o > +obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o > obj-$(CONFIG_FPU) += fpu.o > obj-$(CONFIG_RISCV_ISA_V) += vector.o > obj-$(CONFIG_SMP) += smpboot.o > diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c > index 19807c4d3805..d69779e4b967 100644 > --- a/arch/riscv/kernel/traps.c > +++ b/arch/riscv/kernel/traps.c > @@ -179,14 +179,6 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re > > DO_ERROR_INFO(do_trap_load_fault, > SIGSEGV, SEGV_ACCERR, "load access fault"); > -#ifndef CONFIG_RISCV_M_MODE > -DO_ERROR_INFO(do_trap_load_misaligned, > - SIGBUS, BUS_ADRALN, "Oops - load address misaligned"); > -DO_ERROR_INFO(do_trap_store_misaligned, > - SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned"); > -#else > -int handle_misaligned_load(struct pt_regs *regs); > -int handle_misaligned_store(struct pt_regs *regs); > > asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) > { > @@ -229,7 +221,6 @@ asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs > irqentry_nmi_exit(regs, state); > } > } > -#endif > DO_ERROR_INFO(do_trap_store_fault, > SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); > DO_ERROR_INFO(do_trap_ecall_s, > diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c > index e7bfb33089c1..9daed7d756ae 100644 > --- a/arch/riscv/kernel/traps_misaligned.c > +++ b/arch/riscv/kernel/traps_misaligned.c > @@ -12,6 +12,7 @@ > #include <asm/processor.h> > #include <asm/ptrace.h> > #include <asm/csr.h> > +#include <asm/entry-common.h> > > #define INSN_MATCH_LB 0x3 > #define INSN_MASK_LB 0x707f > @@ -151,21 +152,25 @@ > #define PRECISION_S 0 > #define PRECISION_D 1 > > -static inline u8 load_u8(const u8 *addr) > +#ifdef CONFIG_RISCV_M_MODE > +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) > { > u8 val; > > asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr)); > + *r_val = val; > > - return val; > + return 0; > } > > -static inline void store_u8(u8 *addr, u8 val) > +static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) > { > asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); > + > + return 0; > } > > -static inline ulong get_insn(ulong mepc) > +static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn) > { > register ulong __mepc asm ("a2") = mepc; > ulong val, rvc_mask = 3, tmp; > @@ -194,9 +199,87 @@ static inline ulong get_insn(ulong mepc) > : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask), > [xlen_minus_16] "i" (XLEN_MINUS_16)); > > - return val; > + *r_insn = val; > + > + return 0; > +} > +#else > +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) > +{ > + if (user_mode(regs)) { > + return __get_user(*r_val, addr); > + } else { > + *r_val = *addr; > + return 0; > + } One nit (...well two) ;-) If you're respinning I'd get rid of the "inlines", and personally I think early exit is easier to read. Applies to the whole patch. | { | if (user_mode(regs)) | return __get_user(*r_val, addr); | | *r_val = *addr; | return 0; | } Regardless if you change or not, Reviewed-by: Björn Töpel <bjorn@rivosinc.com>
On 04/10/2023 19:00, Björn Töpel wrote: > Clément Léger <cleger@rivosinc.com> writes: > >> Misalignment trap handling is only supported for M-mode and uses direct >> accesses to user memory. In S-mode, when handling usermode fault, this >> requires to use the get_user()/put_user() accessors. Implement >> load_u8(), store_u8() and get_insn() using these accessors for >> userspace and direct text access for kernel. >> >> Signed-off-by: Clément Léger <cleger@rivosinc.com> >> --- >> arch/riscv/Kconfig | 8 ++ >> arch/riscv/include/asm/entry-common.h | 14 +++ >> arch/riscv/kernel/Makefile | 2 +- >> arch/riscv/kernel/traps.c | 9 -- >> arch/riscv/kernel/traps_misaligned.c | 119 +++++++++++++++++++++++--- >> 5 files changed, 129 insertions(+), 23 deletions(-) >> >> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig >> index d607ab0f7c6d..6e167358a897 100644 >> --- a/arch/riscv/Kconfig >> +++ b/arch/riscv/Kconfig >> @@ -636,6 +636,14 @@ config THREAD_SIZE_ORDER >> Specify the Pages of thread stack size (from 4KB to 64KB), which also >> affects irq stack size, which is equal to thread stack size. >> >> +config RISCV_MISALIGNED >> + bool "Support misaligned load/store traps for kernel and userspace" >> + default y >> + help >> + Say Y here if you want the kernel to embed support for misaligned >> + load/store for both kernel and userspace. When disable, misaligned >> + accesses will generate SIGBUS in userspace and panic in kernel. >> + >> endmenu # "Platform type" >> >> menu "Kernel features" >> diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h >> index 6e4dee49d84b..7ab5e34318c8 100644 >> --- a/arch/riscv/include/asm/entry-common.h >> +++ b/arch/riscv/include/asm/entry-common.h >> @@ -8,4 +8,18 @@ >> void handle_page_fault(struct pt_regs *regs); >> void handle_break(struct pt_regs *regs); >> >> +#ifdef CONFIG_RISCV_MISALIGNED >> +int handle_misaligned_load(struct pt_regs *regs); >> +int handle_misaligned_store(struct pt_regs *regs); >> +#else >> +static inline int handle_misaligned_load(struct pt_regs *regs) >> +{ >> + return -1; >> +} >> +static inline int handle_misaligned_store(struct pt_regs *regs) >> +{ >> + return -1; >> +} >> +#endif >> + >> #endif /* _ASM_RISCV_ENTRY_COMMON_H */ >> diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile >> index 95cf25d48405..0d874fb24b51 100644 >> --- a/arch/riscv/kernel/Makefile >> +++ b/arch/riscv/kernel/Makefile >> @@ -59,7 +59,7 @@ obj-y += patch.o >> obj-y += probes/ >> obj-$(CONFIG_MMU) += vdso.o vdso/ >> >> -obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o >> +obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o >> obj-$(CONFIG_FPU) += fpu.o >> obj-$(CONFIG_RISCV_ISA_V) += vector.o >> obj-$(CONFIG_SMP) += smpboot.o >> diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c >> index 19807c4d3805..d69779e4b967 100644 >> --- a/arch/riscv/kernel/traps.c >> +++ b/arch/riscv/kernel/traps.c >> @@ -179,14 +179,6 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re >> >> DO_ERROR_INFO(do_trap_load_fault, >> SIGSEGV, SEGV_ACCERR, "load access fault"); >> -#ifndef CONFIG_RISCV_M_MODE >> -DO_ERROR_INFO(do_trap_load_misaligned, >> - SIGBUS, BUS_ADRALN, "Oops - load address misaligned"); >> -DO_ERROR_INFO(do_trap_store_misaligned, >> - SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned"); >> -#else >> -int handle_misaligned_load(struct pt_regs *regs); >> -int handle_misaligned_store(struct pt_regs *regs); >> >> asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) >> { >> @@ -229,7 +221,6 @@ asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs >> irqentry_nmi_exit(regs, state); >> } >> } >> -#endif >> DO_ERROR_INFO(do_trap_store_fault, >> SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); >> DO_ERROR_INFO(do_trap_ecall_s, >> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c >> index e7bfb33089c1..9daed7d756ae 100644 >> --- a/arch/riscv/kernel/traps_misaligned.c >> +++ b/arch/riscv/kernel/traps_misaligned.c >> @@ -12,6 +12,7 @@ >> #include <asm/processor.h> >> #include <asm/ptrace.h> >> #include <asm/csr.h> >> +#include <asm/entry-common.h> >> >> #define INSN_MATCH_LB 0x3 >> #define INSN_MASK_LB 0x707f >> @@ -151,21 +152,25 @@ >> #define PRECISION_S 0 >> #define PRECISION_D 1 >> >> -static inline u8 load_u8(const u8 *addr) >> +#ifdef CONFIG_RISCV_M_MODE >> +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) >> { >> u8 val; >> >> asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr)); >> + *r_val = val; >> >> - return val; >> + return 0; >> } >> >> -static inline void store_u8(u8 *addr, u8 val) >> +static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) >> { >> asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); >> + >> + return 0; >> } >> >> -static inline ulong get_insn(ulong mepc) >> +static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn) >> { >> register ulong __mepc asm ("a2") = mepc; >> ulong val, rvc_mask = 3, tmp; >> @@ -194,9 +199,87 @@ static inline ulong get_insn(ulong mepc) >> : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask), >> [xlen_minus_16] "i" (XLEN_MINUS_16)); >> >> - return val; >> + *r_insn = val; >> + >> + return 0; >> +} >> +#else >> +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) >> +{ >> + if (user_mode(regs)) { >> + return __get_user(*r_val, addr); >> + } else { >> + *r_val = *addr; >> + return 0; >> + } > > One nit (...well two) ;-) > > If you're respinning I'd get rid of the "inlines",and personally I > think early exit is easier to read. Applies to the whole patch. Noted, I'll fix that (checkpatch suggested it so that makes sense). Clément > > | { > | if (user_mode(regs)) > | return __get_user(*r_val, addr); > | > | *r_val = *addr; > | return 0; > | } > > > Regardless if you change or not, > > Reviewed-by: Björn Töpel <bjorn@rivosinc.com>
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index d607ab0f7c6d..6e167358a897 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -636,6 +636,14 @@ config THREAD_SIZE_ORDER Specify the Pages of thread stack size (from 4KB to 64KB), which also affects irq stack size, which is equal to thread stack size. +config RISCV_MISALIGNED + bool "Support misaligned load/store traps for kernel and userspace" + default y + help + Say Y here if you want the kernel to embed support for misaligned + load/store for both kernel and userspace. When disable, misaligned + accesses will generate SIGBUS in userspace and panic in kernel. + endmenu # "Platform type" menu "Kernel features" diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 6e4dee49d84b..7ab5e34318c8 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -8,4 +8,18 @@ void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); +#ifdef CONFIG_RISCV_MISALIGNED +int handle_misaligned_load(struct pt_regs *regs); +int handle_misaligned_store(struct pt_regs *regs); +#else +static inline int handle_misaligned_load(struct pt_regs *regs) +{ + return -1; +} +static inline int handle_misaligned_store(struct pt_regs *regs) +{ + return -1; +} +#endif + #endif /* _ASM_RISCV_ENTRY_COMMON_H */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 95cf25d48405..0d874fb24b51 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -59,7 +59,7 @@ obj-y += patch.o obj-y += probes/ obj-$(CONFIG_MMU) += vdso.o vdso/ -obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o +obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_RISCV_ISA_V) += vector.o obj-$(CONFIG_SMP) += smpboot.o diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 19807c4d3805..d69779e4b967 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -179,14 +179,6 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re DO_ERROR_INFO(do_trap_load_fault, SIGSEGV, SEGV_ACCERR, "load access fault"); -#ifndef CONFIG_RISCV_M_MODE -DO_ERROR_INFO(do_trap_load_misaligned, - SIGBUS, BUS_ADRALN, "Oops - load address misaligned"); -DO_ERROR_INFO(do_trap_store_misaligned, - SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned"); -#else -int handle_misaligned_load(struct pt_regs *regs); -int handle_misaligned_store(struct pt_regs *regs); asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) { @@ -229,7 +221,6 @@ asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs irqentry_nmi_exit(regs, state); } } -#endif DO_ERROR_INFO(do_trap_store_fault, SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); DO_ERROR_INFO(do_trap_ecall_s, diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index e7bfb33089c1..9daed7d756ae 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -12,6 +12,7 @@ #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/csr.h> +#include <asm/entry-common.h> #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f @@ -151,21 +152,25 @@ #define PRECISION_S 0 #define PRECISION_D 1 -static inline u8 load_u8(const u8 *addr) +#ifdef CONFIG_RISCV_M_MODE +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) { u8 val; asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr)); + *r_val = val; - return val; + return 0; } -static inline void store_u8(u8 *addr, u8 val) +static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) { asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); + + return 0; } -static inline ulong get_insn(ulong mepc) +static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn) { register ulong __mepc asm ("a2") = mepc; ulong val, rvc_mask = 3, tmp; @@ -194,9 +199,87 @@ static inline ulong get_insn(ulong mepc) : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask), [xlen_minus_16] "i" (XLEN_MINUS_16)); - return val; + *r_insn = val; + + return 0; +} +#else +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) +{ + if (user_mode(regs)) { + return __get_user(*r_val, addr); + } else { + *r_val = *addr; + return 0; + } } +static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) +{ + if (user_mode(regs)) { + return __put_user(val, addr); + } else { + *addr = val; + return 0; + } +} + +#define __read_insn(regs, insn, insn_addr) \ +({ \ + int __ret; \ + \ + if (user_mode(regs)) { \ + __ret = __get_user(insn, insn_addr); \ + } else { \ + insn = *insn_addr; \ + __ret = 0; \ + } \ + \ + __ret; \ +}) + +static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn) +{ + ulong insn = 0; + + if (epc & 0x2) { + ulong tmp = 0; + u16 __user *insn_addr = (u16 __user *)epc; + + if (__read_insn(regs, insn, insn_addr)) + return -EFAULT; + /* __get_user() uses regular "lw" which sign extend the loaded + * value make sure to clear higher order bits in case we "or" it + * below with the upper 16 bits half. + */ + insn &= GENMASK(15, 0); + if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) { + *r_insn = insn; + return 0; + } + insn_addr++; + if (__read_insn(regs, tmp, insn_addr)) + return -EFAULT; + *r_insn = (tmp << 16) | insn; + + return 0; + } else { + u32 __user *insn_addr = (u32 __user *)epc; + + if (__read_insn(regs, insn, insn_addr)) + return -EFAULT; + if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) { + *r_insn = insn; + return 0; + } + insn &= GENMASK(15, 0); + *r_insn = insn; + + return 0; + } +} +#endif + union reg_data { u8 data_bytes[8]; ulong data_ulong; @@ -207,10 +290,13 @@ int handle_misaligned_load(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; - unsigned long insn = get_insn(epc); - unsigned long addr = csr_read(mtval); + unsigned long insn; + unsigned long addr = regs->badaddr; int i, fp = 0, shift = 0, len = 0; + if (get_insn(regs, epc, &insn)) + return -1; + regs->epc = 0; if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) { @@ -274,8 +360,10 @@ int handle_misaligned_load(struct pt_regs *regs) } val.data_u64 = 0; - for (i = 0; i < len; i++) - val.data_bytes[i] = load_u8((void *)(addr + i)); + for (i = 0; i < len; i++) { + if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i])) + return -1; + } if (fp) return -1; @@ -290,10 +378,13 @@ int handle_misaligned_store(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; - unsigned long insn = get_insn(epc); - unsigned long addr = csr_read(mtval); + unsigned long insn; + unsigned long addr = regs->badaddr; int i, len = 0; + if (get_insn(regs, epc, &insn)) + return -1; + regs->epc = 0; val.data_ulong = GET_RS2(insn, regs); @@ -327,8 +418,10 @@ int handle_misaligned_store(struct pt_regs *regs) return -1; } - for (i = 0; i < len; i++) - store_u8((void *)(addr + i), val.data_bytes[i]); + for (i = 0; i < len; i++) { + if (store_u8(regs, (void *)(addr + i), val.data_bytes[i])) + return -1; + } regs->epc = epc + INSN_LEN(insn);
Misalignment trap handling is only supported for M-mode and uses direct accesses to user memory. In S-mode, when handling usermode fault, this requires to use the get_user()/put_user() accessors. Implement load_u8(), store_u8() and get_insn() using these accessors for userspace and direct text access for kernel. Signed-off-by: Clément Léger <cleger@rivosinc.com> --- arch/riscv/Kconfig | 8 ++ arch/riscv/include/asm/entry-common.h | 14 +++ arch/riscv/kernel/Makefile | 2 +- arch/riscv/kernel/traps.c | 9 -- arch/riscv/kernel/traps_misaligned.c | 119 +++++++++++++++++++++++--- 5 files changed, 129 insertions(+), 23 deletions(-)