diff mbox series

[v2] riscv: misaligned: disable pagefault before accessing user memory

Message ID 20250106170911.1403467-1-cleger@rivosinc.com (mailing list archive)
State New
Headers show
Series [v2] riscv: misaligned: disable pagefault before accessing user memory | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR success PR summary
conchuod/patch-1-test-1 success .github/scripts/patches/tests/build_rv32_defconfig.sh took 102.56s
conchuod/patch-1-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh took 1027.15s
conchuod/patch-1-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh took 1213.45s
conchuod/patch-1-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh took 15.75s
conchuod/patch-1-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh took 17.49s
conchuod/patch-1-test-6 success .github/scripts/patches/tests/checkpatch.sh took 0.44s
conchuod/patch-1-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh took 35.92s
conchuod/patch-1-test-8 success .github/scripts/patches/tests/header_inline.sh took 0.00s
conchuod/patch-1-test-9 success .github/scripts/patches/tests/kdoc.sh took 0.51s
conchuod/patch-1-test-10 success .github/scripts/patches/tests/module_param.sh took 0.01s
conchuod/patch-1-test-11 success .github/scripts/patches/tests/verify_fixes.sh took 0.02s
conchuod/patch-1-test-12 success .github/scripts/patches/tests/verify_signedoff.sh took 0.02s

Commit Message

Clément Léger Jan. 6, 2025, 5:09 p.m. UTC
Calling copy_{from/to}_user() in interrupt context might actually sleep
and display a BUG message:

[   10.377019] BUG: sleeping function called from invalid context at include/linux/uaccess.h:162
[   10.379868] in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 88, name: ssh-keygen
[   10.380009] preempt_count: 0, expected: 0
[   10.380324] CPU: 0 UID: 0 PID: 88 Comm: ssh-keygen Not tainted 6.13.0-rc5-00013-g3435cd5f1331-dirty #19
[   10.380639] Hardware name: riscv-virtio,qemu (DT)
[   10.380798] Call Trace:
[   10.381108] [<ffffffff80013d30>] dump_backtrace+0x1c/0x24
[   10.381690] [<ffffffff800022d8>] show_stack+0x28/0x34
[   10.381812] [<ffffffff8000ee1c>] dump_stack_lvl+0x4a/0x68
[   10.381958] [<ffffffff8000ee4e>] dump_stack+0x14/0x1c
[   10.382047] [<ffffffff80065e0a>] __might_resched+0xfa/0x104
[   10.382172] [<ffffffff80065e56>] __might_sleep+0x42/0x66
[   10.382267] [<ffffffff801b0c5e>] __might_fault+0x1c/0x24
[   10.382363] [<ffffffff804415e4>] _copy_from_user+0x28/0xc2
[   10.382459] [<ffffffff800152ca>] handle_misaligned_load+0x1ca/0x2fc
[   10.382565] [<ffffffff80a033a0>] do_trap_load_misaligned+0x24/0xee
[   10.382714] [<ffffffff80a0dc66>] handle_exception+0x146/0x152

In order to safely handle user memory access from this context, disable
page fault while copying user memory. Although this might lead to copy
failure in some cases (offlined page), this is the best we can try to be
safe. While at it, replace __get_user by get_user() to actually check
for memory before accessing it and disable fault.

Fixes: b686ecdeacf6 ("riscv: misaligned: Restrict user access to kernel memory")
Signed-off-by: Clément Léger <cleger@rivosinc.com>

---

V2:
 - Add pagefault disable/enable around get_user()

 arch/riscv/kernel/traps_misaligned.c | 30 +++++++++++++++++++++++-----
 1 file changed, 25 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 7cc108aed74e..d9a6e44ae745 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -268,7 +268,9 @@  static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
 	int __ret;					\
 							\
 	if (user_mode(regs)) {				\
-		__ret = __get_user(insn, (type __user *) insn_addr); \
+		pagefault_disable();			\
+		__ret = get_user(insn, (type __user *) insn_addr); \
+		pagefault_enable();			\
 	} else {					\
 		insn = *(type *)insn_addr;		\
 		__ret = 0;				\
@@ -355,7 +357,7 @@  static int handle_scalar_misaligned_load(struct pt_regs *regs)
 {
 	union reg_data val;
 	unsigned long epc = regs->epc;
-	unsigned long insn;
+	unsigned long insn, copy_len;
 	unsigned long addr = regs->badaddr;
 	int fp = 0, shift = 0, len = 0;
 
@@ -441,7 +443,16 @@  static int handle_scalar_misaligned_load(struct pt_regs *regs)
 
 	val.data_u64 = 0;
 	if (user_mode(regs)) {
-		if (copy_from_user(&val, (u8 __user *)addr, len))
+		/*
+		 * We can not sleep in exception context. Disable pagefault to
+		 * avoid a potential sleep while accessing user memory. Side
+		 * effect is that if it would have sleep, then the copy will
+		 * fail.
+		 */
+		pagefault_disable();
+		copy_len = copy_from_user(&val, (u8 __user *)addr, len);
+		pagefault_enable();
+		if (copy_len)
 			return -1;
 	} else {
 		memcpy(&val, (u8 *)addr, len);
@@ -463,7 +474,7 @@  static int handle_scalar_misaligned_store(struct pt_regs *regs)
 {
 	union reg_data val;
 	unsigned long epc = regs->epc;
-	unsigned long insn;
+	unsigned long insn, copy_len;
 	unsigned long addr = regs->badaddr;
 	int len = 0, fp = 0;
 
@@ -539,7 +550,16 @@  static int handle_scalar_misaligned_store(struct pt_regs *regs)
 		return -EOPNOTSUPP;
 
 	if (user_mode(regs)) {
-		if (copy_to_user((u8 __user *)addr, &val, len))
+		/*
+		 * We can not sleep in exception context. Disable pagefault to
+		 * avoid a potential sleep while accessing user memory. Side
+		 * effect is that if it would have sleep, then the copy will
+		 * fail.
+		 */
+		pagefault_disable();
+		copy_len = copy_to_user((u8 __user *)addr, &val, len);
+		pagefault_enable();
+		if (copy_len)
 			return -1;
 	} else {
 		memcpy((u8 *)addr, &val, len);