Message ID | 1639143361-17773-2-git-send-email-yangtiezhu@loongson.cn (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | kdump: simplify code | expand |
On Fri, 10 Dec 2021 21:36:00 +0800 Tiezhu Yang <yangtiezhu@loongson.cn> wrote: > In arch/*/kernel/crash_dump*.c, there exist similar code about > copy_oldmem_page(), move copy_to() from vmcore.c to uaccess.h, > and then we can use copy_to() to simplify the related code. > > ... > > --- a/fs/proc/vmcore.c > +++ b/fs/proc/vmcore.c > @@ -238,20 +238,6 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, > return copy_oldmem_page(pfn, buf, csize, offset, userbuf); > } > > -/* > - * Copy to either kernel or user space > - */ > -static int copy_to(void *target, void *src, size_t size, int userbuf) > -{ > - if (userbuf) { > - if (copy_to_user((char __user *) target, src, size)) > - return -EFAULT; > - } else { > - memcpy(target, src, size); > - } > - return 0; > -} > - > #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP > static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) > { > diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h > index ac03940..4a6c3e4 100644 > --- a/include/linux/uaccess.h > +++ b/include/linux/uaccess.h > @@ -201,6 +201,20 @@ copy_to_user(void __user *to, const void *from, unsigned long n) > return n; > } > > +/* > + * Copy to either kernel or user space > + */ > +static inline int copy_to(void *target, void *src, size_t size, int userbuf) > +{ > + if (userbuf) { > + if (copy_to_user((char __user *) target, src, size)) > + return -EFAULT; > + } else { > + memcpy(target, src, size); > + } > + return 0; > +} > + Ordinarily I'd say "this is too large to be inlined". But the function has only a single callsite per architecture so inlining it won't cause bloat at present. But hopefully copy_to() will get additional callers in the future, in which case it shouldn't be inlined. So I'm thinking it would be best to start out with this as a regular non-inlined function, in lib/usercopy.c. Also, copy_to() is a very poor name for a globally-visible helper function. Better would be copy_to_user_or_kernel(), although that's perhaps a bit long. And the `userbuf' arg should have type bool, yes?
On 12/11/2021 12:59 AM, Andrew Morton wrote: > On Fri, 10 Dec 2021 21:36:00 +0800 Tiezhu Yang <yangtiezhu@loongson.cn> wrote: > >> In arch/*/kernel/crash_dump*.c, there exist similar code about >> copy_oldmem_page(), move copy_to() from vmcore.c to uaccess.h, >> and then we can use copy_to() to simplify the related code. >> >> ... >> >> --- a/fs/proc/vmcore.c >> +++ b/fs/proc/vmcore.c >> @@ -238,20 +238,6 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, >> return copy_oldmem_page(pfn, buf, csize, offset, userbuf); >> } >> >> -/* >> - * Copy to either kernel or user space >> - */ >> -static int copy_to(void *target, void *src, size_t size, int userbuf) >> -{ >> - if (userbuf) { >> - if (copy_to_user((char __user *) target, src, size)) >> - return -EFAULT; >> - } else { >> - memcpy(target, src, size); >> - } >> - return 0; >> -} >> - >> #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP >> static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) >> { >> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h >> index ac03940..4a6c3e4 100644 >> --- a/include/linux/uaccess.h >> +++ b/include/linux/uaccess.h >> @@ -201,6 +201,20 @@ copy_to_user(void __user *to, const void *from, unsigned long n) >> return n; >> } >> >> +/* >> + * Copy to either kernel or user space >> + */ >> +static inline int copy_to(void *target, void *src, size_t size, int userbuf) >> +{ >> + if (userbuf) { >> + if (copy_to_user((char __user *) target, src, size)) >> + return -EFAULT; >> + } else { >> + memcpy(target, src, size); >> + } >> + return 0; >> +} >> + > > Ordinarily I'd say "this is too large to be inlined". But the function > has only a single callsite per architecture so inlining it won't cause > bloat at present. > > But hopefully copy_to() will get additional callers in the future, in > which case it shouldn't be inlined. So I'm thinking it would be best > to start out with this as a regular non-inlined function, in > lib/usercopy.c. > > Also, copy_to() is a very poor name for a globally-visible helper > function. Better would be copy_to_user_or_kernel(), although that's > perhaps a bit long. > > And the `userbuf' arg should have type bool, yes? > Hi Andrew, Thank you very much for your reply and suggestion, I agree with you, I will send v2 later. Thanks, Tiezhu
Le 10/12/2021 à 17:59, Andrew Morton a écrit : > On Fri, 10 Dec 2021 21:36:00 +0800 Tiezhu Yang <yangtiezhu@loongson.cn> wrote: > >> In arch/*/kernel/crash_dump*.c, there exist similar code about >> copy_oldmem_page(), move copy_to() from vmcore.c to uaccess.h, >> and then we can use copy_to() to simplify the related code. >> >> ... >> >> --- a/fs/proc/vmcore.c >> +++ b/fs/proc/vmcore.c >> @@ -238,20 +238,6 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, >> return copy_oldmem_page(pfn, buf, csize, offset, userbuf); >> } >> >> -/* >> - * Copy to either kernel or user space >> - */ >> -static int copy_to(void *target, void *src, size_t size, int userbuf) >> -{ >> - if (userbuf) { >> - if (copy_to_user((char __user *) target, src, size)) >> - return -EFAULT; >> - } else { >> - memcpy(target, src, size); >> - } >> - return 0; >> -} >> - >> #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP >> static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) >> { >> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h >> index ac03940..4a6c3e4 100644 >> --- a/include/linux/uaccess.h >> +++ b/include/linux/uaccess.h >> @@ -201,6 +201,20 @@ copy_to_user(void __user *to, const void *from, unsigned long n) >> return n; >> } >> >> +/* >> + * Copy to either kernel or user space >> + */ >> +static inline int copy_to(void *target, void *src, size_t size, int userbuf) >> +{ >> + if (userbuf) { >> + if (copy_to_user((char __user *) target, src, size)) >> + return -EFAULT; >> + } else { >> + memcpy(target, src, size); >> + } >> + return 0; >> +} >> + > > Ordinarily I'd say "this is too large to be inlined". But the function > has only a single callsite per architecture so inlining it won't cause > bloat at present. > > But hopefully copy_to() will get additional callers in the future, in > which case it shouldn't be inlined. So I'm thinking it would be best > to start out with this as a regular non-inlined function, in > lib/usercopy.c. > > Also, copy_to() is a very poor name for a globally-visible helper > function. Better would be copy_to_user_or_kernel(), although that's > perhaps a bit long. > > And the `userbuf' arg should have type bool, yes? > I think keeping it inlined is better. copy_oldmem_page() is bigger with v2 (outlined) than with v1 (inlined), see both below: v1: 00000000 <copy_oldmem_page>: 0: 94 21 ff e0 stwu r1,-32(r1) 4: 93 e1 00 1c stw r31,28(r1) 8: 7c bf 2b 79 mr. r31,r5 c: 40 82 00 14 bne 20 <copy_oldmem_page+0x20> 10: 83 e1 00 1c lwz r31,28(r1) 14: 38 60 00 00 li r3,0 18: 38 21 00 20 addi r1,r1,32 1c: 4e 80 00 20 blr 20: 28 1f 10 00 cmplwi r31,4096 24: 93 61 00 0c stw r27,12(r1) 28: 7c 08 02 a6 mflr r0 2c: 93 81 00 10 stw r28,16(r1) 30: 93 a1 00 14 stw r29,20(r1) 34: 7c 9b 23 78 mr r27,r4 38: 90 01 00 24 stw r0,36(r1) 3c: 7c dd 33 78 mr r29,r6 40: 93 c1 00 18 stw r30,24(r1) 44: 7c fc 3b 78 mr r28,r7 48: 40 81 00 08 ble 50 <copy_oldmem_page+0x50> 4c: 3b e0 10 00 li r31,4096 50: 54 7e 60 26 rlwinm r30,r3,12,0,19 54: 7f c3 f3 78 mr r3,r30 58: 7f e4 fb 78 mr r4,r31 5c: 48 00 00 01 bl 5c <copy_oldmem_page+0x5c> 5c: R_PPC_REL24 memblock_is_region_memory 60: 2c 03 00 00 cmpwi r3,0 64: 41 82 00 30 beq 94 <copy_oldmem_page+0x94> 68: 2c 1c 00 00 cmpwi r28,0 6c: 3f de c0 00 addis r30,r30,-16384 70: 7f 63 db 78 mr r3,r27 74: 7f e5 fb 78 mr r5,r31 78: 7c 9e ea 14 add r4,r30,r29 7c: 41 82 00 7c beq f8 <copy_oldmem_page+0xf8> 80: 48 00 00 01 bl 80 <copy_oldmem_page+0x80> 80: R_PPC_REL24 _copy_to_user 84: 2c 03 00 00 cmpwi r3,0 88: 41 a2 00 48 beq d0 <copy_oldmem_page+0xd0> 8c: 3b e0 ff f2 li r31,-14 90: 48 00 00 40 b d0 <copy_oldmem_page+0xd0> 94: 7f c3 f3 78 mr r3,r30 98: 38 a0 05 91 li r5,1425 9c: 38 80 10 00 li r4,4096 a0: 48 00 00 01 bl a0 <copy_oldmem_page+0xa0> a0: R_PPC_REL24 ioremap_prot a4: 2c 1c 00 00 cmpwi r28,0 a8: 7c 7e 1b 78 mr r30,r3 ac: 7c 83 ea 14 add r4,r3,r29 b0: 7f e5 fb 78 mr r5,r31 b4: 7f 63 db 78 mr r3,r27 b8: 41 82 00 48 beq 100 <copy_oldmem_page+0x100> bc: 48 00 00 01 bl bc <copy_oldmem_page+0xbc> bc: R_PPC_REL24 _copy_to_user c0: 2c 03 00 00 cmpwi r3,0 c4: 40 82 00 44 bne 108 <copy_oldmem_page+0x108> c8: 7f c3 f3 78 mr r3,r30 cc: 48 00 00 01 bl cc <copy_oldmem_page+0xcc> cc: R_PPC_REL24 iounmap d0: 80 01 00 24 lwz r0,36(r1) d4: 7f e3 fb 78 mr r3,r31 d8: 83 61 00 0c lwz r27,12(r1) dc: 83 81 00 10 lwz r28,16(r1) e0: 7c 08 03 a6 mtlr r0 e4: 83 a1 00 14 lwz r29,20(r1) e8: 83 c1 00 18 lwz r30,24(r1) ec: 83 e1 00 1c lwz r31,28(r1) f0: 38 21 00 20 addi r1,r1,32 f4: 4e 80 00 20 blr f8: 48 00 00 01 bl f8 <copy_oldmem_page+0xf8> f8: R_PPC_REL24 memcpy fc: 4b ff ff d4 b d0 <copy_oldmem_page+0xd0> 100: 48 00 00 01 bl 100 <copy_oldmem_page+0x100> 100: R_PPC_REL24 memcpy 104: 4b ff ff c4 b c8 <copy_oldmem_page+0xc8> 108: 3b e0 ff f2 li r31,-14 10c: 4b ff ff bc b c8 <copy_oldmem_page+0xc8> v2: 00000000 <copy_oldmem_page>: 0: 94 21 ff e0 stwu r1,-32(r1) 4: 93 e1 00 1c stw r31,28(r1) 8: 7c bf 2b 79 mr. r31,r5 c: 93 c1 00 18 stw r30,24(r1) 10: 3b c0 00 00 li r30,0 14: 40 82 00 18 bne 2c <copy_oldmem_page+0x2c> 18: 7f c3 f3 78 mr r3,r30 1c: 83 e1 00 1c lwz r31,28(r1) 20: 83 c1 00 18 lwz r30,24(r1) 24: 38 21 00 20 addi r1,r1,32 28: 4e 80 00 20 blr 2c: 28 1f 10 00 cmplwi r31,4096 30: 93 61 00 0c stw r27,12(r1) 34: 7c 08 02 a6 mflr r0 38: 93 81 00 10 stw r28,16(r1) 3c: 93 a1 00 14 stw r29,20(r1) 40: 7c db 33 78 mr r27,r6 44: 90 01 00 24 stw r0,36(r1) 48: 7c 9d 23 78 mr r29,r4 4c: 7c fc 3b 78 mr r28,r7 50: 40 81 00 08 ble 58 <copy_oldmem_page+0x58> 54: 3b e0 10 00 li r31,4096 58: 54 7e 60 26 rlwinm r30,r3,12,0,19 5c: 7f c3 f3 78 mr r3,r30 60: 7f e4 fb 78 mr r4,r31 64: 48 00 00 01 bl 64 <copy_oldmem_page+0x64> 64: R_PPC_REL24 memblock_is_region_memory 68: 2c 03 00 00 cmpwi r3,0 6c: 41 82 00 54 beq c0 <copy_oldmem_page+0xc0> 70: 3f de c0 00 addis r30,r30,-16384 74: 7c 9e da 14 add r4,r30,r27 78: 7f 86 e3 78 mr r6,r28 7c: 7f a3 eb 78 mr r3,r29 80: 7f e5 fb 78 mr r5,r31 84: 48 00 00 01 bl 84 <copy_oldmem_page+0x84> 84: R_PPC_REL24 copy_to_user_or_kernel 88: 3b c0 ff f2 li r30,-14 8c: 2c 03 00 00 cmpwi r3,0 90: 40 82 00 08 bne 98 <copy_oldmem_page+0x98> 94: 7f fe fb 78 mr r30,r31 98: 80 01 00 24 lwz r0,36(r1) 9c: 83 61 00 0c lwz r27,12(r1) a0: 83 81 00 10 lwz r28,16(r1) a4: 7c 08 03 a6 mtlr r0 a8: 83 a1 00 14 lwz r29,20(r1) ac: 7f c3 f3 78 mr r3,r30 b0: 83 e1 00 1c lwz r31,28(r1) b4: 83 c1 00 18 lwz r30,24(r1) b8: 38 21 00 20 addi r1,r1,32 bc: 4e 80 00 20 blr c0: 7f c3 f3 78 mr r3,r30 c4: 93 41 00 08 stw r26,8(r1) c8: 38 a0 05 91 li r5,1425 cc: 38 80 10 00 li r4,4096 d0: 48 00 00 01 bl d0 <copy_oldmem_page+0xd0> d0: R_PPC_REL24 ioremap_prot d4: 7f 86 e3 78 mr r6,r28 d8: 7c 83 da 14 add r4,r3,r27 dc: 7c 7a 1b 78 mr r26,r3 e0: 7f e5 fb 78 mr r5,r31 e4: 7f a3 eb 78 mr r3,r29 e8: 48 00 00 01 bl e8 <copy_oldmem_page+0xe8> e8: R_PPC_REL24 copy_to_user_or_kernel ec: 3b c0 ff f2 li r30,-14 f0: 2c 03 00 00 cmpwi r3,0 f4: 40 82 00 08 bne fc <copy_oldmem_page+0xfc> f8: 7f fe fb 78 mr r30,r31 fc: 7f 43 d3 78 mr r3,r26 100: 48 00 00 01 bl 100 <copy_oldmem_page+0x100> 100: R_PPC_REL24 iounmap 104: 80 01 00 24 lwz r0,36(r1) 108: 83 41 00 08 lwz r26,8(r1) 10c: 83 61 00 0c lwz r27,12(r1) 110: 7c 08 03 a6 mtlr r0 114: 83 81 00 10 lwz r28,16(r1) 118: 83 a1 00 14 lwz r29,20(r1) 11c: 4b ff ff 90 b ac <copy_oldmem_page+0xac> Christophe
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 509f851..c5976a8 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -238,20 +238,6 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, return copy_oldmem_page(pfn, buf, csize, offset, userbuf); } -/* - * Copy to either kernel or user space - */ -static int copy_to(void *target, void *src, size_t size, int userbuf) -{ - if (userbuf) { - if (copy_to_user((char __user *) target, src, size)) - return -EFAULT; - } else { - memcpy(target, src, size); - } - return 0; -} - #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) { diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index ac03940..4a6c3e4 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -201,6 +201,20 @@ copy_to_user(void __user *to, const void *from, unsigned long n) return n; } +/* + * Copy to either kernel or user space + */ +static inline int copy_to(void *target, void *src, size_t size, int userbuf) +{ + if (userbuf) { + if (copy_to_user((char __user *) target, src, size)) + return -EFAULT; + } else { + memcpy(target, src, size); + } + return 0; +} + #ifndef copy_mc_to_kernel /* * Without arch opt-in this generic copy_mc_to_kernel() will not handle
In arch/*/kernel/crash_dump*.c, there exist similar code about copy_oldmem_page(), move copy_to() from vmcore.c to uaccess.h, and then we can use copy_to() to simplify the related code. Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn> --- fs/proc/vmcore.c | 14 -------------- include/linux/uaccess.h | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 14 deletions(-)