diff mbox series

[-next,v4,6/7] arm64: add {get, put}_user to machine check safe

Message ID 20220420030418.3189040-7-tongtiangen@huawei.com (mailing list archive)
State New, archived
Headers show
Series arm64: add machine check safe support | expand

Commit Message

Tong Tiangen April 20, 2022, 3:04 a.m. UTC
Add {get, put}_user() to machine check safe.

If get/put fail due to hardware memory error, only the relevant processes
are affected, so killing the user process and isolate the user page with
hardware memory errors is a more reasonable choice than kernel panic.

Add new extable type EX_TYPE_UACCESS_MC_ERR_ZERO which can be used for
uaccess that can be recovered from hardware memory errors. The difference
from EX_TYPE_UACCESS_MC is that this type also sets additional two target
register which save error code and value needs to be set zero.

Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
---
 arch/arm64/include/asm/asm-extable.h | 14 ++++++++++++++
 arch/arm64/include/asm/uaccess.h     |  4 ++--
 arch/arm64/mm/extable.c              |  4 ++++
 3 files changed, 20 insertions(+), 2 deletions(-)

Comments

Mark Rutland May 13, 2022, 3:39 p.m. UTC | #1
On Wed, Apr 20, 2022 at 03:04:17AM +0000, Tong Tiangen wrote:
> Add {get, put}_user() to machine check safe.
> 
> If get/put fail due to hardware memory error, only the relevant processes
> are affected, so killing the user process and isolate the user page with
> hardware memory errors is a more reasonable choice than kernel panic.
> 
> Add new extable type EX_TYPE_UACCESS_MC_ERR_ZERO which can be used for
> uaccess that can be recovered from hardware memory errors. The difference
> from EX_TYPE_UACCESS_MC is that this type also sets additional two target
> register which save error code and value needs to be set zero.

Why does this need to be in any way distinct from the existing
EX_TYPE_UACCESS_ERR_ZERO ?

Other than the case where we currently (ab)use that for
copy_{to,from}_kernel_nofault(), where do we *not* want to use
EX_TYPE_UACCESS_ERR_ZERO and *not* recover from a memory error?

Thanks,
Mark.

> 
> Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
> ---
>  arch/arm64/include/asm/asm-extable.h | 14 ++++++++++++++
>  arch/arm64/include/asm/uaccess.h     |  4 ++--
>  arch/arm64/mm/extable.c              |  4 ++++
>  3 files changed, 20 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
> index 75b2c00e9523..80410899a9ad 100644
> --- a/arch/arm64/include/asm/asm-extable.h
> +++ b/arch/arm64/include/asm/asm-extable.h
> @@ -13,6 +13,7 @@
>  
>  /* _MC indicates that can fixup from machine check errors */
>  #define EX_TYPE_UACCESS_MC		5
> +#define EX_TYPE_UACCESS_MC_ERR_ZERO	6
>  
>  #ifdef __ASSEMBLY__
>  
> @@ -78,6 +79,15 @@
>  #define EX_DATA_REG(reg, gpr)						\
>  	"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
>  
> +#define _ASM_EXTABLE_UACCESS_MC_ERR_ZERO(insn, fixup, err, zero)		\
> +	__DEFINE_ASM_GPR_NUMS							\
> +	__ASM_EXTABLE_RAW(#insn, #fixup,					\
> +			  __stringify(EX_TYPE_UACCESS_MC_ERR_ZERO),		\
> +			  "("							\
> +			    EX_DATA_REG(ERR, err) " | "				\
> +			    EX_DATA_REG(ZERO, zero)				\
> +			  ")")
> +
>  #define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)		\
>  	__DEFINE_ASM_GPR_NUMS						\
>  	__ASM_EXTABLE_RAW(#insn, #fixup, 				\
> @@ -90,6 +100,10 @@
>  #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)			\
>  	_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
>  
> +
> +#define _ASM_EXTABLE_UACCESS_MC_ERR(insn, fixup, err)			\
> +	_ASM_EXTABLE_UACCESS_MC_ERR_ZERO(insn, fixup, err, wzr)
> +
>  #define EX_DATA_REG_DATA_SHIFT	0
>  #define EX_DATA_REG_DATA	GENMASK(4, 0)
>  #define EX_DATA_REG_ADDR_SHIFT	5
> diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
> index e8dce0cc5eaa..e41b47df48b0 100644
> --- a/arch/arm64/include/asm/uaccess.h
> +++ b/arch/arm64/include/asm/uaccess.h
> @@ -236,7 +236,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
>  	asm volatile(							\
>  	"1:	" load "	" reg "1, [%2]\n"			\
>  	"2:\n"								\
> -	_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1)			\
> +	_ASM_EXTABLE_UACCESS_MC_ERR_ZERO(1b, 2b, %w0, %w1)		\
>  	: "+r" (err), "=&r" (x)						\
>  	: "r" (addr))
>  
> @@ -325,7 +325,7 @@ do {									\
>  	asm volatile(							\
>  	"1:	" store "	" reg "1, [%2]\n"			\
>  	"2:\n"								\
> -	_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)				\
> +	_ASM_EXTABLE_UACCESS_MC_ERR(1b, 2b, %w0)			\
>  	: "+r" (err)							\
>  	: "r" (x), "r" (addr))
>  
> diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
> index 525876c3ebf4..1023ccdb2f89 100644
> --- a/arch/arm64/mm/extable.c
> +++ b/arch/arm64/mm/extable.c
> @@ -88,6 +88,7 @@ bool fixup_exception(struct pt_regs *regs)
>  	case EX_TYPE_BPF:
>  		return ex_handler_bpf(ex, regs);
>  	case EX_TYPE_UACCESS_ERR_ZERO:
> +	case EX_TYPE_UACCESS_MC_ERR_ZERO:
>  		return ex_handler_uaccess_err_zero(ex, regs);
>  	case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
>  		return ex_handler_load_unaligned_zeropad(ex, regs);
> @@ -107,6 +108,9 @@ bool fixup_exception_mc(struct pt_regs *regs)
>  	switch (ex->type) {
>  	case EX_TYPE_UACCESS_MC:
>  		return ex_handler_uaccess_type(ex, regs, FIXUP_TYPE_MC);
> +	case EX_TYPE_UACCESS_MC_ERR_ZERO:
> +		return ex_handler_uaccess_err_zero(ex, regs);
> +
>  	}
>  
>  	return false;
> -- 
> 2.25.1
>
Tong Tiangen May 19, 2022, 7:09 a.m. UTC | #2
在 2022/5/13 23:39, Mark Rutland 写道:
> On Wed, Apr 20, 2022 at 03:04:17AM +0000, Tong Tiangen wrote:
>> Add {get, put}_user() to machine check safe.
>>
>> If get/put fail due to hardware memory error, only the relevant processes
>> are affected, so killing the user process and isolate the user page with
>> hardware memory errors is a more reasonable choice than kernel panic.
>>
>> Add new extable type EX_TYPE_UACCESS_MC_ERR_ZERO which can be used for
>> uaccess that can be recovered from hardware memory errors. The difference
>> from EX_TYPE_UACCESS_MC is that this type also sets additional two target
>> register which save error code and value needs to be set zero.
> 
> Why does this need to be in any way distinct from the existing
> EX_TYPE_UACCESS_ERR_ZERO ?
> 
> Other than the case where we currently (ab)use that for
> copy_{to,from}_kernel_nofault(), where do we *not* want to use
> EX_TYPE_UACCESS_ERR_ZERO and *not* recover from a memory error?
> 
> Thanks,
> Mark.

There are some cases (futex/__user_cache_maint()/__user_swpX_asm()) 
using EX_TYPE_UACCESS_ERR_ZERO, for these cases, whether to restore is 
not yet determined, let's discuss in patch 3/7.

Thanks,
Tong.

> 
>>
>> Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
>> ---
>>   arch/arm64/include/asm/asm-extable.h | 14 ++++++++++++++
>>   arch/arm64/include/asm/uaccess.h     |  4 ++--
>>   arch/arm64/mm/extable.c              |  4 ++++
>>   3 files changed, 20 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
>> index 75b2c00e9523..80410899a9ad 100644
>> --- a/arch/arm64/include/asm/asm-extable.h
>> +++ b/arch/arm64/include/asm/asm-extable.h
>> @@ -13,6 +13,7 @@
>>   
>>   /* _MC indicates that can fixup from machine check errors */
>>   #define EX_TYPE_UACCESS_MC		5
>> +#define EX_TYPE_UACCESS_MC_ERR_ZERO	6
>>   
>>   #ifdef __ASSEMBLY__
>>   
>> @@ -78,6 +79,15 @@
>>   #define EX_DATA_REG(reg, gpr)						\
>>   	"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
>>   
>> +#define _ASM_EXTABLE_UACCESS_MC_ERR_ZERO(insn, fixup, err, zero)		\
>> +	__DEFINE_ASM_GPR_NUMS							\
>> +	__ASM_EXTABLE_RAW(#insn, #fixup,					\
>> +			  __stringify(EX_TYPE_UACCESS_MC_ERR_ZERO),		\
>> +			  "("							\
>> +			    EX_DATA_REG(ERR, err) " | "				\
>> +			    EX_DATA_REG(ZERO, zero)				\
>> +			  ")")
>> +
>>   #define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)		\
>>   	__DEFINE_ASM_GPR_NUMS						\
>>   	__ASM_EXTABLE_RAW(#insn, #fixup, 				\
>> @@ -90,6 +100,10 @@
>>   #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)			\
>>   	_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
>>   
>> +
>> +#define _ASM_EXTABLE_UACCESS_MC_ERR(insn, fixup, err)			\
>> +	_ASM_EXTABLE_UACCESS_MC_ERR_ZERO(insn, fixup, err, wzr)
>> +
>>   #define EX_DATA_REG_DATA_SHIFT	0
>>   #define EX_DATA_REG_DATA	GENMASK(4, 0)
>>   #define EX_DATA_REG_ADDR_SHIFT	5
>> diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
>> index e8dce0cc5eaa..e41b47df48b0 100644
>> --- a/arch/arm64/include/asm/uaccess.h
>> +++ b/arch/arm64/include/asm/uaccess.h
>> @@ -236,7 +236,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
>>   	asm volatile(							\
>>   	"1:	" load "	" reg "1, [%2]\n"			\
>>   	"2:\n"								\
>> -	_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1)			\
>> +	_ASM_EXTABLE_UACCESS_MC_ERR_ZERO(1b, 2b, %w0, %w1)		\
>>   	: "+r" (err), "=&r" (x)						\
>>   	: "r" (addr))
>>   
>> @@ -325,7 +325,7 @@ do {									\
>>   	asm volatile(							\
>>   	"1:	" store "	" reg "1, [%2]\n"			\
>>   	"2:\n"								\
>> -	_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)				\
>> +	_ASM_EXTABLE_UACCESS_MC_ERR(1b, 2b, %w0)			\
>>   	: "+r" (err)							\
>>   	: "r" (x), "r" (addr))
>>   
>> diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
>> index 525876c3ebf4..1023ccdb2f89 100644
>> --- a/arch/arm64/mm/extable.c
>> +++ b/arch/arm64/mm/extable.c
>> @@ -88,6 +88,7 @@ bool fixup_exception(struct pt_regs *regs)
>>   	case EX_TYPE_BPF:
>>   		return ex_handler_bpf(ex, regs);
>>   	case EX_TYPE_UACCESS_ERR_ZERO:
>> +	case EX_TYPE_UACCESS_MC_ERR_ZERO:
>>   		return ex_handler_uaccess_err_zero(ex, regs);
>>   	case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
>>   		return ex_handler_load_unaligned_zeropad(ex, regs);
>> @@ -107,6 +108,9 @@ bool fixup_exception_mc(struct pt_regs *regs)
>>   	switch (ex->type) {
>>   	case EX_TYPE_UACCESS_MC:
>>   		return ex_handler_uaccess_type(ex, regs, FIXUP_TYPE_MC);
>> +	case EX_TYPE_UACCESS_MC_ERR_ZERO:
>> +		return ex_handler_uaccess_err_zero(ex, regs);
>> +
>>   	}
>>   
>>   	return false;
>> -- 
>> 2.25.1
>>
> .
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index 75b2c00e9523..80410899a9ad 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -13,6 +13,7 @@ 
 
 /* _MC indicates that can fixup from machine check errors */
 #define EX_TYPE_UACCESS_MC		5
+#define EX_TYPE_UACCESS_MC_ERR_ZERO	6
 
 #ifdef __ASSEMBLY__
 
@@ -78,6 +79,15 @@ 
 #define EX_DATA_REG(reg, gpr)						\
 	"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
 
+#define _ASM_EXTABLE_UACCESS_MC_ERR_ZERO(insn, fixup, err, zero)		\
+	__DEFINE_ASM_GPR_NUMS							\
+	__ASM_EXTABLE_RAW(#insn, #fixup,					\
+			  __stringify(EX_TYPE_UACCESS_MC_ERR_ZERO),		\
+			  "("							\
+			    EX_DATA_REG(ERR, err) " | "				\
+			    EX_DATA_REG(ZERO, zero)				\
+			  ")")
+
 #define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)		\
 	__DEFINE_ASM_GPR_NUMS						\
 	__ASM_EXTABLE_RAW(#insn, #fixup, 				\
@@ -90,6 +100,10 @@ 
 #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)			\
 	_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
 
+
+#define _ASM_EXTABLE_UACCESS_MC_ERR(insn, fixup, err)			\
+	_ASM_EXTABLE_UACCESS_MC_ERR_ZERO(insn, fixup, err, wzr)
+
 #define EX_DATA_REG_DATA_SHIFT	0
 #define EX_DATA_REG_DATA	GENMASK(4, 0)
 #define EX_DATA_REG_ADDR_SHIFT	5
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index e8dce0cc5eaa..e41b47df48b0 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -236,7 +236,7 @@  static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
 	asm volatile(							\
 	"1:	" load "	" reg "1, [%2]\n"			\
 	"2:\n"								\
-	_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1)			\
+	_ASM_EXTABLE_UACCESS_MC_ERR_ZERO(1b, 2b, %w0, %w1)		\
 	: "+r" (err), "=&r" (x)						\
 	: "r" (addr))
 
@@ -325,7 +325,7 @@  do {									\
 	asm volatile(							\
 	"1:	" store "	" reg "1, [%2]\n"			\
 	"2:\n"								\
-	_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)				\
+	_ASM_EXTABLE_UACCESS_MC_ERR(1b, 2b, %w0)			\
 	: "+r" (err)							\
 	: "r" (x), "r" (addr))
 
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 525876c3ebf4..1023ccdb2f89 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -88,6 +88,7 @@  bool fixup_exception(struct pt_regs *regs)
 	case EX_TYPE_BPF:
 		return ex_handler_bpf(ex, regs);
 	case EX_TYPE_UACCESS_ERR_ZERO:
+	case EX_TYPE_UACCESS_MC_ERR_ZERO:
 		return ex_handler_uaccess_err_zero(ex, regs);
 	case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
 		return ex_handler_load_unaligned_zeropad(ex, regs);
@@ -107,6 +108,9 @@  bool fixup_exception_mc(struct pt_regs *regs)
 	switch (ex->type) {
 	case EX_TYPE_UACCESS_MC:
 		return ex_handler_uaccess_type(ex, regs, FIXUP_TYPE_MC);
+	case EX_TYPE_UACCESS_MC_ERR_ZERO:
+		return ex_handler_uaccess_err_zero(ex, regs);
+
 	}
 
 	return false;