Message ID | 20220406091311.3354723-5-tongtiangen@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: add machine check safe support | expand |
On Wed, Apr 06, 2022 at 09:13:08AM +0000, Tong Tiangen wrote: > Add scenarios copy_from_user to machine check safe. > > The data copied is user data and is machine check safe, so just kill > the user process and isolate the error page, not necessary panic. > > Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> > --- > arch/arm64/include/asm/asm-uaccess.h | 16 ++++++++++++++++ > arch/arm64/lib/copy_from_user.S | 11 ++++++----- > 2 files changed, 22 insertions(+), 5 deletions(-) > > diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h > index 0557af834e03..f31c8978e1af 100644 > --- a/arch/arm64/include/asm/asm-uaccess.h > +++ b/arch/arm64/include/asm/asm-uaccess.h > @@ -92,4 +92,20 @@ alternative_else_nop_endif > > _asm_extable 8888b,\l; > .endm > + > + .macro user_ldp_mc l, reg1, reg2, addr, post_inc > +8888: ldtr \reg1, [\addr]; > +8889: ldtr \reg2, [\addr, #8]; > + add \addr, \addr, \post_inc; > + > + _asm_extable_mc 8888b, \l; > + _asm_extable_mc 8889b, \l; > + .endm > + > + .macro user_ldst_mc l, inst, reg, addr, post_inc > +8888: \inst \reg, [\addr]; > + add \addr, \addr, \post_inc; > + > + _asm_extable_mc 8888b, \l; > + .endm > #endif > diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S > index 34e317907524..d9d7c5291871 100644 > --- a/arch/arm64/lib/copy_from_user.S > +++ b/arch/arm64/lib/copy_from_user.S > @@ -21,7 +21,7 @@ > */ > > .macro ldrb1 reg, ptr, val > - user_ldst 9998f, ldtrb, \reg, \ptr, \val > + user_ldst_mc 9998f, ldtrb, \reg, \ptr, \val > .endm > > .macro strb1 reg, ptr, val > @@ -29,7 +29,7 @@ > .endm > > .macro ldrh1 reg, ptr, val > - user_ldst 9997f, ldtrh, \reg, \ptr, \val > + user_ldst_mc 9997f, ldtrh, \reg, \ptr, \val > .endm > > .macro strh1 reg, ptr, val > @@ -37,7 +37,7 @@ > .endm > > .macro ldr1 reg, ptr, val > - user_ldst 9997f, ldtr, \reg, \ptr, \val > + user_ldst_mc 9997f, ldtr, \reg, \ptr, \val > .endm > > .macro str1 reg, ptr, val > @@ -45,7 +45,7 @@ > .endm > > .macro ldp1 reg1, reg2, ptr, val > - user_ldp 9997f, \reg1, \reg2, \ptr, \val > + user_ldp_mc 9997f, \reg1, \reg2, \ptr, \val > .endm > > .macro stp1 reg1, reg2, ptr, val > @@ -62,7 +62,8 @@ SYM_FUNC_START(__arch_copy_from_user) > ret > > // Exception fixups > -9997: cmp dst, dstin > +9997: cbz x0, 9998f // Check machine check exception > + cmp dst, dstin > b.ne 9998f If you look at the copy template, you'd see that `dstin` *is* x0. Consier if we took a non-SEA fault. The the fixup handler will overwrite x0, it's likely `dst` != `dstin`, and we'll branch to the byte-by-byte copy. Or if we're doing something odd and mmap_min_addr is 0, we can do the wrong thing the other way around and *not* branch to the byte-by-byte copy when we should. So this is at best confusing, but likely broken too. Thanks, Mark. > // Before being absolutely sure we couldn't copy anything, try harder > USER(9998f, ldtrb tmp1w, [srcin]) > -- > 2.18.0.huawei.25 > > > _______________________________________________ > linux-arm-kernel mailing list > linux-arm-kernel@lists.infradead.org > http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
在 2022/4/6 19:19, Mark Rutland 写道: > On Wed, Apr 06, 2022 at 09:13:08AM +0000, Tong Tiangen wrote: >> Add scenarios copy_from_user to machine check safe. >> >> The data copied is user data and is machine check safe, so just kill >> the user process and isolate the error page, not necessary panic. >> >> Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> >> --- >> arch/arm64/include/asm/asm-uaccess.h | 16 ++++++++++++++++ >> arch/arm64/lib/copy_from_user.S | 11 ++++++----- >> 2 files changed, 22 insertions(+), 5 deletions(-) >> >> diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h >> index 0557af834e03..f31c8978e1af 100644 >> --- a/arch/arm64/include/asm/asm-uaccess.h >> +++ b/arch/arm64/include/asm/asm-uaccess.h >> @@ -92,4 +92,20 @@ alternative_else_nop_endif >> >> _asm_extable 8888b,\l; >> .endm >> + >> + .macro user_ldp_mc l, reg1, reg2, addr, post_inc >> +8888: ldtr \reg1, [\addr]; >> +8889: ldtr \reg2, [\addr, #8]; >> + add \addr, \addr, \post_inc; >> + >> + _asm_extable_mc 8888b, \l; >> + _asm_extable_mc 8889b, \l; >> + .endm >> + >> + .macro user_ldst_mc l, inst, reg, addr, post_inc >> +8888: \inst \reg, [\addr]; >> + add \addr, \addr, \post_inc; >> + >> + _asm_extable_mc 8888b, \l; >> + .endm >> #endif >> diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S >> index 34e317907524..d9d7c5291871 100644 >> --- a/arch/arm64/lib/copy_from_user.S >> +++ b/arch/arm64/lib/copy_from_user.S >> @@ -21,7 +21,7 @@ >> */ >> >> .macro ldrb1 reg, ptr, val >> - user_ldst 9998f, ldtrb, \reg, \ptr, \val >> + user_ldst_mc 9998f, ldtrb, \reg, \ptr, \val >> .endm >> >> .macro strb1 reg, ptr, val >> @@ -29,7 +29,7 @@ >> .endm >> >> .macro ldrh1 reg, ptr, val >> - user_ldst 9997f, ldtrh, \reg, \ptr, \val >> + user_ldst_mc 9997f, ldtrh, \reg, \ptr, \val >> .endm >> >> .macro strh1 reg, ptr, val >> @@ -37,7 +37,7 @@ >> .endm >> >> .macro ldr1 reg, ptr, val >> - user_ldst 9997f, ldtr, \reg, \ptr, \val >> + user_ldst_mc 9997f, ldtr, \reg, \ptr, \val >> .endm >> >> .macro str1 reg, ptr, val >> @@ -45,7 +45,7 @@ >> .endm >> >> .macro ldp1 reg1, reg2, ptr, val >> - user_ldp 9997f, \reg1, \reg2, \ptr, \val >> + user_ldp_mc 9997f, \reg1, \reg2, \ptr, \val >> .endm >> >> .macro stp1 reg1, reg2, ptr, val >> @@ -62,7 +62,8 @@ SYM_FUNC_START(__arch_copy_from_user) >> ret >> >> // Exception fixups >> -9997: cmp dst, dstin >> +9997: cbz x0, 9998f // Check machine check exception >> + cmp dst, dstin >> b.ne 9998f > > If you look at the copy template, you'd see that `dstin` *is* x0. > > Consier if we took a non-SEA fault. The the fixup handler will overwrite x0, > it's likely `dst` != `dstin`, and we'll branch to the byte-by-byte copy. Or if > we're doing something odd and mmap_min_addr is 0, we can do the wrong thing the > other way around and *not* branch to the byte-by-byte copy when we should. > > So this is at best confusing, but likely broken too. > > Thanks, > Mark. OK, missing that, will be fixed in next verison. Thanks, Tong. > >> // Before being absolutely sure we couldn't copy anything, try harder >> USER(9998f, ldtrb tmp1w, [srcin]) >> -- >> 2.18.0.huawei.25 >> >> >> _______________________________________________ >> linux-arm-kernel mailing list >> linux-arm-kernel@lists.infradead.org >> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel > .
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 0557af834e03..f31c8978e1af 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -92,4 +92,20 @@ alternative_else_nop_endif _asm_extable 8888b,\l; .endm + + .macro user_ldp_mc l, reg1, reg2, addr, post_inc +8888: ldtr \reg1, [\addr]; +8889: ldtr \reg2, [\addr, #8]; + add \addr, \addr, \post_inc; + + _asm_extable_mc 8888b, \l; + _asm_extable_mc 8889b, \l; + .endm + + .macro user_ldst_mc l, inst, reg, addr, post_inc +8888: \inst \reg, [\addr]; + add \addr, \addr, \post_inc; + + _asm_extable_mc 8888b, \l; + .endm #endif diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 34e317907524..d9d7c5291871 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -21,7 +21,7 @@ */ .macro ldrb1 reg, ptr, val - user_ldst 9998f, ldtrb, \reg, \ptr, \val + user_ldst_mc 9998f, ldtrb, \reg, \ptr, \val .endm .macro strb1 reg, ptr, val @@ -29,7 +29,7 @@ .endm .macro ldrh1 reg, ptr, val - user_ldst 9997f, ldtrh, \reg, \ptr, \val + user_ldst_mc 9997f, ldtrh, \reg, \ptr, \val .endm .macro strh1 reg, ptr, val @@ -37,7 +37,7 @@ .endm .macro ldr1 reg, ptr, val - user_ldst 9997f, ldtr, \reg, \ptr, \val + user_ldst_mc 9997f, ldtr, \reg, \ptr, \val .endm .macro str1 reg, ptr, val @@ -45,7 +45,7 @@ .endm .macro ldp1 reg1, reg2, ptr, val - user_ldp 9997f, \reg1, \reg2, \ptr, \val + user_ldp_mc 9997f, \reg1, \reg2, \ptr, \val .endm .macro stp1 reg1, reg2, ptr, val @@ -62,7 +62,8 @@ SYM_FUNC_START(__arch_copy_from_user) ret // Exception fixups -9997: cmp dst, dstin +9997: cbz x0, 9998f // Check machine check exception + cmp dst, dstin b.ne 9998f // Before being absolutely sure we couldn't copy anything, try harder USER(9998f, ldtrb tmp1w, [srcin])
Add scenarios copy_from_user to machine check safe. The data copied is user data and is machine check safe, so just kill the user process and isolate the error page, not necessary panic. Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> --- arch/arm64/include/asm/asm-uaccess.h | 16 ++++++++++++++++ arch/arm64/lib/copy_from_user.S | 11 ++++++----- 2 files changed, 22 insertions(+), 5 deletions(-)