diff mbox series

[RFC,-next,V3,4/6] arm64: add copy_{to, from}_user to machine check safe

Message ID 20220412072552.2526871-5-tongtiangen@huawei.com (mailing list archive)
State New
Headers show
Series arm64: add machine check safe support | expand

Commit Message

Tong Tiangen April 12, 2022, 7:25 a.m. UTC
Add copy_{to, from}_user() to machine check safe.

If copy fail due to hardware memory error, only the relevant processes are
affected, so killing the user process and isolate the user page with
hardware memory errors is a more reasonable choice than kernel panic.

Add new extable type EX_TYPE_UACCESS_MC which can be used for uaccess that
can be recovered from hardware memory errors.

Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
---
 arch/arm64/include/asm/asm-extable.h | 11 +++++++++++
 arch/arm64/include/asm/asm-uaccess.h | 16 ++++++++++++++++
 arch/arm64/lib/copy_from_user.S      | 15 ++++++++++-----
 arch/arm64/lib/copy_to_user.S        | 25 +++++++++++++++++--------
 4 files changed, 54 insertions(+), 13 deletions(-)

Comments

Robin Murphy April 12, 2022, 5:08 p.m. UTC | #1
On 12/04/2022 8:25 am, Tong Tiangen wrote:
[...]
> diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
> index 0557af834e03..bb17f0829042 100644
> --- a/arch/arm64/include/asm/asm-uaccess.h
> +++ b/arch/arm64/include/asm/asm-uaccess.h
> @@ -92,4 +92,20 @@ alternative_else_nop_endif
>   
>   		_asm_extable	8888b,\l;
>   	.endm
> +
> +	.macro user_ldp_mc l, reg1, reg2, addr, post_inc
> +8888:		ldtr	\reg1, [\addr];
> +8889:		ldtr	\reg2, [\addr, #8];
> +		add	\addr, \addr, \post_inc;
> +
> +		_asm_extable_uaccess_mc	8888b, \l;
> +		_asm_extable_uaccess_mc	8889b, \l;
> +	.endm

You're replacing the only user of this, so please just 
s/_asm_extable/_asm_extable_uaccess_mc/ in the existing macro and save 
the rest of the churn.

Furthermore, how come you're not similarly updating user_stp, given that 
you *are* updating the other stores in copy_to_user?

> +
> +	.macro user_ldst_mc l, inst, reg, addr, post_inc
> +8888:		\inst		\reg, [\addr];
> +		add		\addr, \addr, \post_inc;
> +
> +		_asm_extable_uaccess_mc	8888b, \l;
> +	.endm

Similarly, I think we can just update user_ldst itself. The two 
instances that you're not replacing here are bogus anyway, and deserve 
to be fixed with the patch below first.

[...]
> @@ -62,7 +63,11 @@ SYM_FUNC_START(__arch_copy_from_user)
>   	ret
>   
>   	// Exception fixups
> -9997:	cmp	dst, dstin
> +9997:	mrs esr, esr_el1			// Check exception first
> +	and esr, esr, #ESR_ELx_FSC
> +	cmp esr, #ESR_ELx_FSC_EXTABT

Should we be checking EC to make sure it's a data abort - and thus FSC 
is valid - in the first place? I'm a little fuzzy on all the possible 
paths into fixup_exception(), and it's not entirely obvious whether this 
is actually safe or not.

Thanks,
Robin.

----->8-----
Subject: [PATCH] arm64: mte: Clean up user tag accessors

Invoking user_ldst to explicitly add a post-increment of 0 is silly.
Just use a normal USER() annotation and save the redundant instruction.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
---
  arch/arm64/lib/mte.S | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 8590af3c98c0..eeb9e45bcce8 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -93,7 +93,7 @@ SYM_FUNC_START(mte_copy_tags_from_user)
  	mov	x3, x1
  	cbz	x2, 2f
  1:
-	user_ldst 2f, ldtrb, w4, x1, 0
+USER(2f, ldtrb	w4, [x1])
  	lsl	x4, x4, #MTE_TAG_SHIFT
  	stg	x4, [x0], #MTE_GRANULE_SIZE
  	add	x1, x1, #1
@@ -120,7 +120,7 @@ SYM_FUNC_START(mte_copy_tags_to_user)
  1:
  	ldg	x4, [x1]
  	ubfx	x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
-	user_ldst 2f, sttrb, w4, x0, 0
+USER(2f, sttrb	w4, [x0])
  	add	x0, x0, #1
  	add	x1, x1, #MTE_GRANULE_SIZE
  	subs	x2, x2, #1
Robin Murphy April 12, 2022, 5:17 p.m. UTC | #2
On 12/04/2022 6:08 pm, Robin Murphy wrote:
[...]
>> @@ -62,7 +63,11 @@ SYM_FUNC_START(__arch_copy_from_user)
>>       ret
>>       // Exception fixups
>> -9997:    cmp    dst, dstin
>> +9997:    mrs esr, esr_el1            // Check exception first
>> +    and esr, esr, #ESR_ELx_FSC
>> +    cmp esr, #ESR_ELx_FSC_EXTABT
> 
> Should we be checking EC to make sure it's a data abort - and thus FSC 
> is valid - in the first place? I'm a little fuzzy on all the possible 
> paths into fixup_exception(), and it's not entirely obvious whether this 
> is actually safe or not.

In fact, thinking some more about that, I don't think there should be 
any need for this sort of logic in these handlers at all. The 
fixup_exception() machinery should already know enough about the 
exception that's happened and the extable entry to figure this out and 
not bother calling the handler at all.

Thanks,
Robin.
Tong Tiangen April 13, 2022, 6:36 a.m. UTC | #3
在 2022/4/13 1:08, Robin Murphy 写道:
> On 12/04/2022 8:25 am, Tong Tiangen wrote:
> [...]
>> diff --git a/arch/arm64/include/asm/asm-uaccess.h 
>> b/arch/arm64/include/asm/asm-uaccess.h
>> index 0557af834e03..bb17f0829042 100644
>> --- a/arch/arm64/include/asm/asm-uaccess.h
>> +++ b/arch/arm64/include/asm/asm-uaccess.h
>> @@ -92,4 +92,20 @@ alternative_else_nop_endif
>>           _asm_extable    8888b,\l;
>>       .endm
>> +
>> +    .macro user_ldp_mc l, reg1, reg2, addr, post_inc
>> +8888:        ldtr    \reg1, [\addr];
>> +8889:        ldtr    \reg2, [\addr, #8];
>> +        add    \addr, \addr, \post_inc;
>> +
>> +        _asm_extable_uaccess_mc    8888b, \l;
>> +        _asm_extable_uaccess_mc    8889b, \l;
>> +    .endm
> 
> You're replacing the only user of this, so please just 
> s/_asm_extable/_asm_extable_uaccess_mc/ in the existing macro and save 
> the rest of the churn.

Agreed, *user_ldp* -- This name has clearly explained the scences where 
this macro is used. It is more appropriate to modify it directly.

> 
> Furthermore, how come you're not similarly updating user_stp, given that 
> you *are* updating the other stores in copy_to_user?
> 
>> +
>> +    .macro user_ldst_mc l, inst, reg, addr, post_inc
>> +8888:        \inst        \reg, [\addr];
>> +        add        \addr, \addr, \post_inc;
>> +
>> +        _asm_extable_uaccess_mc    8888b, \l;
>> +    .endm
> 
> Similarly, I think we can just update user_ldst itself. The two 
> instances that you're not replacing here are bogus anyway, and deserve 
> to be fixed with the patch below first.

OK, great thanks. will do next version.

> 
> [...]
>> @@ -62,7 +63,11 @@ SYM_FUNC_START(__arch_copy_from_user)
>>       ret
>>       // Exception fixups
>> -9997:    cmp    dst, dstin
>> +9997:    mrs esr, esr_el1            // Check exception first
>> +    and esr, esr, #ESR_ELx_FSC
>> +    cmp esr, #ESR_ELx_FSC_EXTABT
> 
> Should we be checking EC to make sure it's a data abort - and thus FSC 
> is valid - in the first place? I'm a little fuzzy on all the possible 
> paths into fixup_exception(), and it's not entirely obvious whether this 
> is actually safe or not.
> 
> Thanks,
> Robin.

I think checking EC here is more rigorous in code logic and it's doesn't 
appear to be harmful.

It is really not appropriate to check the ESR at this stage (it has been 
checked where the exception processing starts). At present, I haven't 
thought of a better way. If anyone has a better way, please reply to me :)

Thanks Robin.
Tong.

> 
> ----->8-----
> Subject: [PATCH] arm64: mte: Clean up user tag accessors
> 
> Invoking user_ldst to explicitly add a post-increment of 0 is silly.
> Just use a normal USER() annotation and save the redundant instruction.
> 
> Signed-off-by: Robin Murphy <robin.murphy@arm.com>
> ---
>   arch/arm64/lib/mte.S | 4 ++--
>   1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
> index 8590af3c98c0..eeb9e45bcce8 100644
> --- a/arch/arm64/lib/mte.S
> +++ b/arch/arm64/lib/mte.S
> @@ -93,7 +93,7 @@ SYM_FUNC_START(mte_copy_tags_from_user)
>       mov    x3, x1
>       cbz    x2, 2f
>   1:
> -    user_ldst 2f, ldtrb, w4, x1, 0
> +USER(2f, ldtrb    w4, [x1])
>       lsl    x4, x4, #MTE_TAG_SHIFT
>       stg    x4, [x0], #MTE_GRANULE_SIZE
>       add    x1, x1, #1
> @@ -120,7 +120,7 @@ SYM_FUNC_START(mte_copy_tags_to_user)
>   1:
>       ldg    x4, [x1]
>       ubfx    x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE
> -    user_ldst 2f, sttrb, w4, x0, 0
> +USER(2f, sttrb    w4, [x0])
>       add    x0, x0, #1
>       add    x1, x1, #MTE_GRANULE_SIZE
>       subs    x2, x2, #1
Tong Tiangen April 13, 2022, 7:30 a.m. UTC | #4
在 2022/4/13 1:08, Robin Murphy 写道:
> On 12/04/2022 8:25 am, Tong Tiangen wrote:
> [...]
>> diff --git a/arch/arm64/include/asm/asm-uaccess.h 
>> b/arch/arm64/include/asm/asm-uaccess.h
>> index 0557af834e03..bb17f0829042 100644
>> --- a/arch/arm64/include/asm/asm-uaccess.h
>> +++ b/arch/arm64/include/asm/asm-uaccess.h
>> @@ -92,4 +92,20 @@ alternative_else_nop_endif
>>           _asm_extable    8888b,\l;
>>       .endm
>> +
>> +    .macro user_ldp_mc l, reg1, reg2, addr, post_inc
>> +8888:        ldtr    \reg1, [\addr];
>> +8889:        ldtr    \reg2, [\addr, #8];
>> +        add    \addr, \addr, \post_inc;
>> +
>> +        _asm_extable_uaccess_mc    8888b, \l;
>> +        _asm_extable_uaccess_mc    8889b, \l;
>> +    .endm
> 
> You're replacing the only user of this, so please just 
> s/_asm_extable/_asm_extable_uaccess_mc/ in the existing macro and save 
> the rest of the churn.
> 
> Furthermore, how come you're not similarly updating user_stp, given that 
> you *are* updating the other stores in copy_to_user?

I think all load/store instructions should be handled.

Generally speaking, the load operation will receive a sea when consuming 
a hardware memory error, and the store operation will not receive a sea 
when consuming a hardware error. Depends on chip behavior.

So add store class instructions to processed is no harm.

If there is any problem with my understanding, correct me.

Thanks,
Tong.

> 
>> +
>> +    .macro user_ldst_mc l, inst, reg, addr, post_inc
>> +8888:        \inst        \reg, [\addr];
>> +        add        \addr, \addr, \post_inc;
>> +
>> +        _asm_extable_uaccess_mc    8888b, \l;
>> +    .endm
>
[...]
Tong Tiangen April 16, 2022, 7:41 a.m. UTC | #5
在 2022/4/13 1:17, Robin Murphy 写道:
> On 12/04/2022 6:08 pm, Robin Murphy wrote:
> [...]
>>> @@ -62,7 +63,11 @@ SYM_FUNC_START(__arch_copy_from_user)
>>>       ret
>>>       // Exception fixups
>>> -9997:    cmp    dst, dstin
>>> +9997:    mrs esr, esr_el1            // Check exception first
>>> +    and esr, esr, #ESR_ELx_FSC
>>> +    cmp esr, #ESR_ELx_FSC_EXTABT
>>
>> Should we be checking EC to make sure it's a data abort - and thus FSC 
>> is valid - in the first place? I'm a little fuzzy on all the possible 
>> paths into fixup_exception(), and it's not entirely obvious whether 
>> this is actually safe or not.
> 
> In fact, thinking some more about that, I don't think there should be 
> any need for this sort of logic in these handlers at all. The 
> fixup_exception() machinery should already know enough about the 
> exception that's happened and the extable entry to figure this out and 
> not bother calling the handler at all.
> 
> Thanks,
> Robin.
> .

Hi Robin:
As you said, it seems that it's not good to judge esr here, how about 
using the following method, i need your suggestion :)

+#define FIXUP_TYPE_NORMAL	0
+#define FIXUP_TYPE_MC		1

arch/arm64/mm/extable.c
static bool ex_handler_fixup(const struct exception_table_entry *ex,
-	struct pt_regs *regs)
+	struct pt_regs *regs, int fixuptype)
{
+	regs->regs[16] = fixuptype;
	[...]
}

bool fixup_exception(struct pt_regs *regs)
{
	[...]
	switch(ex->type) {
	case EX_TYPE_UACCESS_MC:
-		return ex_handler_fixup(ex, regs)
+		return ex_handler_fixup(ex, regs, FIXUP_TYPE_NORMAL)
	break;
	}
	[...]
}

bool fixup_exception_mc(struct pt_regs *regs)
{
	[...]
	switch(ex->type) {
	case EX_TYPE_UACCESS_MC:
-		return ex_handler_fixup(ex, regs)
+		return ex_handler_fixup(ex, regs, FIXUP_TYPE_MC)
	break;
	}
	[...]
}

arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_to_user.S

+fixup_type      .req    x16

// Exception fixups
//x16: fixup type written by ex_handler_fixup
-9997:  cmp     dst, dstin
+9997:	cmp fixup_type, #FIXUP_TYPE_MC
+	b.eq 9998f
+ 	cmp     dst, dstin
  	b.ne    9998f

Thanks,
Tong.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index c39f2437e08e..8af4e7cc9578 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -8,6 +8,9 @@ 
 #define EX_TYPE_UACCESS_ERR_ZERO	3
 #define EX_TYPE_LOAD_UNALIGNED_ZEROPAD	4
 
+/* _MC indicates that can fixup from machine check errors */
+#define EX_TYPE_UACCESS_MC		5
+
 #ifdef __ASSEMBLY__
 
 #define __ASM_EXTABLE_RAW(insn, fixup, type, data)	\
@@ -27,6 +30,14 @@ 
 	__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
 	.endm
 
+/*
+ * Create an exception table entry for `insn`, which will branch to `fixup`
+ * when an unhandled fault(include sea fault) is taken.
+ */
+	.macro          _asm_extable_uaccess_mc, insn, fixup
+	__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_UACCESS_MC, 0)
+	.endm
+
 /*
  * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
  * do nothing.
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 0557af834e03..bb17f0829042 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -92,4 +92,20 @@  alternative_else_nop_endif
 
 		_asm_extable	8888b,\l;
 	.endm
+
+	.macro user_ldp_mc l, reg1, reg2, addr, post_inc
+8888:		ldtr	\reg1, [\addr];
+8889:		ldtr	\reg2, [\addr, #8];
+		add	\addr, \addr, \post_inc;
+
+		_asm_extable_uaccess_mc	8888b, \l;
+		_asm_extable_uaccess_mc	8889b, \l;
+	.endm
+
+	.macro user_ldst_mc l, inst, reg, addr, post_inc
+8888:		\inst		\reg, [\addr];
+		add		\addr, \addr, \post_inc;
+
+		_asm_extable_uaccess_mc	8888b, \l;
+	.endm
 #endif
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 34e317907524..e32c0747a5f1 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -21,7 +21,7 @@ 
  */
 
 	.macro ldrb1 reg, ptr, val
-	user_ldst 9998f, ldtrb, \reg, \ptr, \val
+	user_ldst_mc 9998f, ldtrb, \reg, \ptr, \val
 	.endm
 
 	.macro strb1 reg, ptr, val
@@ -29,7 +29,7 @@ 
 	.endm
 
 	.macro ldrh1 reg, ptr, val
-	user_ldst 9997f, ldtrh, \reg, \ptr, \val
+	user_ldst_mc 9997f, ldtrh, \reg, \ptr, \val
 	.endm
 
 	.macro strh1 reg, ptr, val
@@ -37,7 +37,7 @@ 
 	.endm
 
 	.macro ldr1 reg, ptr, val
-	user_ldst 9997f, ldtr, \reg, \ptr, \val
+	user_ldst_mc 9997f, ldtr, \reg, \ptr, \val
 	.endm
 
 	.macro str1 reg, ptr, val
@@ -45,7 +45,7 @@ 
 	.endm
 
 	.macro ldp1 reg1, reg2, ptr, val
-	user_ldp 9997f, \reg1, \reg2, \ptr, \val
+	user_ldp_mc 9997f, \reg1, \reg2, \ptr, \val
 	.endm
 
 	.macro stp1 reg1, reg2, ptr, val
@@ -54,6 +54,7 @@ 
 
 end	.req	x5
 srcin	.req	x15
+esr	.req	x16
 SYM_FUNC_START(__arch_copy_from_user)
 	add	end, x0, x2
 	mov	srcin, x1
@@ -62,7 +63,11 @@  SYM_FUNC_START(__arch_copy_from_user)
 	ret
 
 	// Exception fixups
-9997:	cmp	dst, dstin
+9997:	mrs esr, esr_el1			// Check exception first
+	and esr, esr, #ESR_ELx_FSC
+	cmp esr, #ESR_ELx_FSC_EXTABT
+	b.eq 9998f
+	cmp	dst, dstin
 	b.ne	9998f
 	// Before being absolutely sure we couldn't copy anything, try harder
 USER(9998f, ldtrb tmp1w, [srcin])
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 802231772608..afb53e45a21f 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -20,31 +20,35 @@ 
  *	x0 - bytes not copied
  */
 	.macro ldrb1 reg, ptr, val
-	ldrb  \reg, [\ptr], \val
+	1000:	ldrb  \reg, [\ptr], \val
+	_asm_extable_uaccess_mc 1000b, 9998f;
 	.endm
 
 	.macro strb1 reg, ptr, val
-	user_ldst 9998f, sttrb, \reg, \ptr, \val
+	user_ldst_mc 9998f, sttrb, \reg, \ptr, \val
 	.endm
 
 	.macro ldrh1 reg, ptr, val
-	ldrh  \reg, [\ptr], \val
+	1001:	ldrh  \reg, [\ptr], \val
+	_asm_extable_uaccess_mc 1001b, 9998f;
 	.endm
 
 	.macro strh1 reg, ptr, val
-	user_ldst 9997f, sttrh, \reg, \ptr, \val
+	user_ldst_mc 9997f, sttrh, \reg, \ptr, \val
 	.endm
 
 	.macro ldr1 reg, ptr, val
-	ldr \reg, [\ptr], \val
+	1002:	ldr \reg, [\ptr], \val
+	_asm_extable_uaccess_mc 1002b, 9998f;
 	.endm
 
 	.macro str1 reg, ptr, val
-	user_ldst 9997f, sttr, \reg, \ptr, \val
+	user_ldst_mc 9997f, sttr, \reg, \ptr, \val
 	.endm
 
 	.macro ldp1 reg1, reg2, ptr, val
-	ldp \reg1, \reg2, [\ptr], \val
+	1003:	ldp \reg1, \reg2, [\ptr], \val
+	_asm_extable_uaccess_mc 1003b, 9998f;
 	.endm
 
 	.macro stp1 reg1, reg2, ptr, val
@@ -53,6 +57,7 @@ 
 
 end	.req	x5
 srcin	.req	x15
+esr	.req	x16
 SYM_FUNC_START(__arch_copy_to_user)
 	add	end, x0, x2
 	mov	srcin, x1
@@ -61,7 +66,11 @@  SYM_FUNC_START(__arch_copy_to_user)
 	ret
 
 	// Exception fixups
-9997:	cmp	dst, dstin
+9997:	mrs esr, esr_el1			// Check exception first
+	and esr, esr, #ESR_ELx_FSC
+	cmp esr, #ESR_ELx_FSC_EXTABT
+	b.eq 9998f
+	cmp	dst, dstin
 	b.ne	9998f
 	// Before being absolutely sure we couldn't copy anything, try harder
 	ldrb	tmp1w, [srcin]