diff mbox series

MIPS: Fix inline assembly in uaccess.h

Message ID 19656f7bfbc17f90ca4e2f2d576171d7151d6684.1648178000.git.chenfeiyang@loongson.cn (mailing list archive)
State New
Headers show
Series MIPS: Fix inline assembly in uaccess.h | expand

Commit Message

Feiyang Chen March 25, 2022, 3:15 a.m. UTC
The inline assembly of __put_data_asm() and __put_data_asm_ll32()
treat memory addresses to be written as input operands, which may
cause the compiler to incorrectly optimize.

Treat these addresses as output operands. BTW, rewrite the inline
assembly to improve readability.

Signed-off-by: Feiyang Chen <chenfeiyang@loongson.cn>
---
 arch/mips/include/asm/uaccess.h | 38 +++++++++++++++++----------------
 1 file changed, 20 insertions(+), 18 deletions(-)

Comments

Maciej W. Rozycki April 11, 2022, 4:49 p.m. UTC | #1
On Fri, 25 Mar 2022, Feiyang Chen wrote:

> The inline assembly of __put_data_asm() and __put_data_asm_ll32()
> treat memory addresses to be written as input operands, which may
> cause the compiler to incorrectly optimize.

 This part of the change seems conceptually right and qualifies as a bug 
fix.

> Treat these addresses as output operands. BTW, rewrite the inline
> assembly to improve readability.

 However combining it with improvements or clean-ups makes it impossible 
to accept.  We require that changes are self-contained and made one at a 
time.  So if you'd like to move forward with your proposal, then you need 
to split it into individual changes and post them as a patch series, with 
the bug fix first (so that it can be possibly backported as it is) with 
clean-ups following.

 Please have a look through Documentation/process/submitting-patches.rst 
for further details.

> diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
> index c0cede273c7c..dc5bca09f39a 100644
> --- a/arch/mips/include/asm/uaccess.h
> +++ b/arch/mips/include/asm/uaccess.h
> @@ -207,19 +207,19 @@ struct __large_struct { unsigned long buf[100]; };
>  	long __gu_tmp;							\
>  									\
>  	__asm__ __volatile__(						\
> -	"1:	"insn("%1", "%3")"				\n"	\
> +	"1:	"insn("%1", "%2")"				\n"	\
>  	"2:							\n"	\
>  	"	.insn						\n"	\
>  	"	.section .fixup,\"ax\"				\n"	\
> -	"3:	li	%0, %4					\n"	\
> +	"3:	li	%0, %3					\n"	\
>  	"	move	%1, $0					\n"	\
>  	"	j	2b					\n"	\
>  	"	.previous					\n"	\
>  	"	.section __ex_table,\"a\"			\n"	\
>  	"	"__UA_ADDR "\t1b, 3b				\n"	\
>  	"	.previous					\n"	\
> -	: "=r" (__gu_err), "=r" (__gu_tmp)				\
> -	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
> +	: "+r" (__gu_err), "=r" (__gu_tmp)				\
> +	: "m" (__m(addr)), "i" (-EFAULT));				\

 For example you remove input operand #2 by making operand #0 input/output 
one, which is just a syntactic change.  Many years ago GCC had issues with 
input/output operands, which is why we have a separate input and output 
operand here.  I am fairly sure we do not support versions of GCC anymore 
that had those issues, so it is a clean-up perhaps worth making, but such 
a change has to be made with a separate patch.

 You also change the `o' constraint here into `m'.  This actually changes 
the semantics, by permitting any memory reference to be used with `insn'.  
This change is probably invalid, as it could turn `insn' into an assembly 
macro.  Consequently the exception table won't work anymore as there will 
be no entry for the instruction at `1:', which could be a LUI in 32-bit 
code.

 However the use of the `o' constraint here isn't completely safe here 
either, because with the MIPS target it doesn't guarantee an assembly 
instruction will be used that produces a single machine instruction.  The 
reason for using `o' regardless is again GCC, which used not to provide 
better means or they were buggy.

 What we ought to use nowadays is `R' if non-EVA, and more problematically 
GCC doesn't actually provide a suitable constraint for the EVA case (where 
a 9-bit only offset is used in the regular MIPS instruction encoding; for 
microMIPS code `ZC would do), so we'd have to resort to a hack there until 
GCC has been improved.  Obviously our code has worked by chance and I find 
it rather interesting that nobody has noticed the lack of a suitable 
constraint for EVA operations in many years now.

 Overall given that these accesses, and certainly the user ones, will be 
made via pointers I think that for safety the change needs to go in the 
opposite direction and code the memory reference with an explicit base and 
offset like in `__get_data_asm_ll32'.

  Maciej
diff mbox series

Patch

diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index c0cede273c7c..dc5bca09f39a 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -207,19 +207,19 @@  struct __large_struct { unsigned long buf[100]; };
 	long __gu_tmp;							\
 									\
 	__asm__ __volatile__(						\
-	"1:	"insn("%1", "%3")"				\n"	\
+	"1:	"insn("%1", "%2")"				\n"	\
 	"2:							\n"	\
 	"	.insn						\n"	\
 	"	.section .fixup,\"ax\"				\n"	\
-	"3:	li	%0, %4					\n"	\
+	"3:	li	%0, %3					\n"	\
 	"	move	%1, $0					\n"	\
 	"	j	2b					\n"	\
 	"	.previous					\n"	\
 	"	.section __ex_table,\"a\"			\n"	\
 	"	"__UA_ADDR "\t1b, 3b				\n"	\
 	"	.previous					\n"	\
-	: "=r" (__gu_err), "=r" (__gu_tmp)				\
-	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
+	: "+r" (__gu_err), "=r" (__gu_tmp)				\
+	: "m" (__m(addr)), "i" (-EFAULT));				\
 									\
 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
 }
@@ -234,9 +234,11 @@  struct __large_struct { unsigned long buf[100]; };
 		__typeof__(*(addr))	t;				\
 	} __gu_tmp;							\
 									\
+	void *__addr = (void *)addr;					\
+									\
 	__asm__ __volatile__(						\
-	"1:	" insn("%1", "(%3)")"				\n"	\
-	"2:	" insn("%D1", "4(%3)")"				\n"	\
+	"1:	" insn("%1", "%2")"				\n"	\
+	"2:	" insn("%D1", "%3")"				\n"	\
 	"3:							\n"	\
 	"	.insn						\n"	\
 	"	.section	.fixup,\"ax\"			\n"	\
@@ -249,8 +251,8 @@  struct __large_struct { unsigned long buf[100]; };
 	"	" __UA_ADDR "	1b, 4b				\n"	\
 	"	" __UA_ADDR "	2b, 4b				\n"	\
 	"	.previous					\n"	\
-	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
-	: "0" (0), "r" (addr), "i" (-EFAULT));				\
+	: "+r" (__gu_err), "=&r" (__gu_tmp.l)				\
+	: "m" (__m(__addr)), "m" (__m(__addr + 4)), "i" (-EFAULT));	\
 									\
 	(val) = __gu_tmp.t;						\
 }
@@ -298,26 +300,27 @@  do {									\
 #define __put_data_asm(insn, ptr)					\
 {									\
 	__asm__ __volatile__(						\
-	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
+	"1:	"insn("%z2", "%1")"	# __put_data_asm	\n"	\
 	"2:							\n"	\
 	"	.insn						\n"	\
 	"	.section	.fixup,\"ax\"			\n"	\
-	"3:	li	%0, %4					\n"	\
+	"3:	li	%0, %3					\n"	\
 	"	j	2b					\n"	\
 	"	.previous					\n"	\
 	"	.section	__ex_table,\"a\"		\n"	\
 	"	" __UA_ADDR "	1b, 3b				\n"	\
 	"	.previous					\n"	\
-	: "=r" (__pu_err)						\
-	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
-	  "i" (-EFAULT));						\
+	: "+r" (__pu_err), "=m" (__m(ptr))				\
+	: "Jr" (__pu_val), "i" (-EFAULT));				\
 }
 
 #define __put_data_asm_ll32(insn, ptr)					\
 {									\
+	void *__ptr = (void *)ptr;					\
+									\
 	__asm__ __volatile__(						\
-	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
-	"2:	"insn("%D2", "4(%3)")"				\n"	\
+	"1:	"insn("%3", "%1")"	# __put_data_asm_ll32	\n"	\
+	"2:	"insn("%D3", "%2")"				\n"	\
 	"3:							\n"	\
 	"	.insn						\n"	\
 	"	.section	.fixup,\"ax\"			\n"	\
@@ -328,9 +331,8 @@  do {									\
 	"	" __UA_ADDR "	1b, 4b				\n"	\
 	"	" __UA_ADDR "	2b, 4b				\n"	\
 	"	.previous"						\
-	: "=r" (__pu_err)						\
-	: "0" (0), "r" (__pu_val), "r" (ptr),				\
-	  "i" (-EFAULT));						\
+	: "+r" (__pu_err), "=m" (__m(__ptr)), "=m" (__m(__ptr + 4))	\
+	: "r" (__pu_val),  "i" (-EFAULT));				\
 }
 
 #define __put_kernel_nofault(dst, src, type, err_label)			\