Message ID | 20220216131332.1489939-6-arnd@kernel.org (mailing list archive) |
---|---|
State | Handled Elsewhere |
Headers | show |
Series | clean up asm/uaccess.h, kill set_fs for good | expand |
On Wed, Feb 16, 2022 at 02:13:19PM +0100, Arnd Bergmann wrote: > From: Arnd Bergmann <arnd@arndb.de> > > The __range_not_ok() helper is an x86 (and sparc64) specific interface > that does roughly the same thing as __access_ok(), but with different > calling conventions. > > Change this to use the normal interface in order for consistency as we > clean up all access_ok() implementations. > > This changes the limit from TASK_SIZE to TASK_SIZE_MAX, which Al points > out is the right thing do do here anyway. > > The callers have to use __access_ok() instead of the normal access_ok() > though, because on x86 that contains a WARN_ON_IN_IRQ() check that cannot > be used inside of NMI context while tracing. > > Suggested-by: Al Viro <viro@zeniv.linux.org.uk> > Suggested-by: Christoph Hellwig <hch@infradead.org> > Link: https://lore.kernel.org/lkml/YgsUKcXGR7r4nINj@zeniv-ca.linux.org.uk/ > Signed-off-by: Arnd Bergmann <arnd@arndb.de> > --- > arch/x86/events/core.c | 2 +- > arch/x86/include/asm/uaccess.h | 10 ++++++---- > arch/x86/kernel/dumpstack.c | 2 +- > arch/x86/kernel/stacktrace.c | 2 +- > arch/x86/lib/usercopy.c | 2 +- > 5 files changed, 10 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c > index e686c5e0537b..eef816fc216d 100644 > --- a/arch/x86/events/core.c > +++ b/arch/x86/events/core.c > @@ -2794,7 +2794,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re > static inline int > valid_user_frame(const void __user *fp, unsigned long size) > { > - return (__range_not_ok(fp, size, TASK_SIZE) == 0); > + return __access_ok(fp, size); > } valid_user_frame just need to go away and the following __get_user calls replaced with normal get_user ones. > diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c > index 53de044e5654..da534fb7b5c6 100644 > --- a/arch/x86/kernel/dumpstack.c > +++ b/arch/x86/kernel/dumpstack.c > @@ -85,7 +85,7 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, > * Make sure userspace isn't trying to trick us into dumping kernel > * memory by pointing the userspace instruction pointer at it. > */ > - if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX)) > + if (!__access_ok((void __user *)src, nbytes)) > return -EINVAL; This one is not needed at all as copy_from_user_nmi already checks the access range. > diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c > index 15b058eefc4e..ee117fcf46ed 100644 > --- a/arch/x86/kernel/stacktrace.c > +++ b/arch/x86/kernel/stacktrace.c > @@ -90,7 +90,7 @@ copy_stack_frame(const struct stack_frame_user __user *fp, > { > int ret; > > - if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) > + if (!__access_ok(fp, sizeof(*frame))) > return 0; Just switch the __get_user calls below to get_user instead.
On Fri, Feb 18, 2022 at 7:28 AM Christoph Hellwig <hch@lst.de> wrote: > On Wed, Feb 16, 2022 at 02:13:19PM +0100, Arnd Bergmann wrote: > > --- a/arch/x86/events/core.c > > +++ b/arch/x86/events/core.c > > @@ -2794,7 +2794,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re > > static inline int > > valid_user_frame(const void __user *fp, unsigned long size) > > { > > - return (__range_not_ok(fp, size, TASK_SIZE) == 0); > > + return __access_ok(fp, size); > > } > > valid_user_frame just need to go away and the following __get_user calls > replaced with normal get_user ones. As I understand it, that would not work here because get_user() calls access_ok() rather than __access_ok(), and on x86 that can not be called in NMI context. It is a bit odd that x86 is the only architecture that has this check, but adding it was clearly intentional, see 7c4788950ba5 ("x86/uaccess, sched/preempt: Verify access_ok() context"). > > diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c > > index 53de044e5654..da534fb7b5c6 100644 > > --- a/arch/x86/kernel/dumpstack.c > > +++ b/arch/x86/kernel/dumpstack.c > > @@ -85,7 +85,7 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, > > * Make sure userspace isn't trying to trick us into dumping kernel > > * memory by pointing the userspace instruction pointer at it. > > */ > > - if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX)) > > + if (!__access_ok((void __user *)src, nbytes)) > > return -EINVAL; > > This one is not needed at all as copy_from_user_nmi already checks the > access range. Ok, removing this. > > diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c > > index 15b058eefc4e..ee117fcf46ed 100644 > > --- a/arch/x86/kernel/stacktrace.c > > +++ b/arch/x86/kernel/stacktrace.c > > @@ -90,7 +90,7 @@ copy_stack_frame(const struct stack_frame_user __user *fp, > > { > > int ret; > > > > - if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) > > + if (!__access_ok(fp, sizeof(*frame))) > > return 0; > > Just switch the __get_user calls below to get_user instead. Same as the first one, I think we can't do this in NMI context. Arnd
From: Christoph Hellwig > Sent: 18 February 2022 06:29 ... > > > diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c > > index 15b058eefc4e..ee117fcf46ed 100644 > > --- a/arch/x86/kernel/stacktrace.c > > +++ b/arch/x86/kernel/stacktrace.c > > @@ -90,7 +90,7 @@ copy_stack_frame(const struct stack_frame_user __user *fp, > > { > > int ret; > > > > - if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) > > + if (!__access_ok(fp, sizeof(*frame))) > > return 0; > > Just switch the __get_user calls below to get_user instead. Is this worth doing at all? How much userspace code is actually compiled with stack frames? Won't work well for a 32bit process on a 64bit kernel either. David - Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK Registration No: 1397386 (Wales)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index e686c5e0537b..eef816fc216d 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2794,7 +2794,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re static inline int valid_user_frame(const void __user *fp, unsigned long size) { - return (__range_not_ok(fp, size, TASK_SIZE) == 0); + return __access_ok(fp, size); } static unsigned long get_segment_base(unsigned int segment) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index ac96f9b2d64b..79c4869ccdd6 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -16,8 +16,10 @@ * Test whether a block of memory is a valid user space address. * Returns 0 if the range is valid, nonzero otherwise. */ -static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) +static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size) { + unsigned long limit = TASK_SIZE_MAX; + /* * If we have used "sizeof()" for the size, * we know it won't overflow the limit (but @@ -35,10 +37,10 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un return unlikely(addr > limit); } -#define __range_not_ok(addr, size, limit) \ +#define __access_ok(addr, size) \ ({ \ __chk_user_ptr(addr); \ - __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ + !__chk_range_not_ok((unsigned long __force)(addr), size); \ }) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP @@ -69,7 +71,7 @@ static inline bool pagefault_disabled(void); #define access_ok(addr, size) \ ({ \ WARN_ON_IN_IRQ(); \ - likely(!__range_not_ok(addr, size, TASK_SIZE_MAX)); \ + likely(__access_ok(addr, size)); \ }) extern int __get_user_1(void); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 53de044e5654..da534fb7b5c6 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -85,7 +85,7 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, * Make sure userspace isn't trying to trick us into dumping kernel * memory by pointing the userspace instruction pointer at it. */ - if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX)) + if (!__access_ok((void __user *)src, nbytes)) return -EINVAL; /* diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 15b058eefc4e..ee117fcf46ed 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -90,7 +90,7 @@ copy_stack_frame(const struct stack_frame_user __user *fp, { int ret; - if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) + if (!__access_ok(fp, sizeof(*frame))) return 0; ret = 1; diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index c3e8a62ca561..ad0139d25401 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -32,7 +32,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) { unsigned long ret; - if (__range_not_ok(from, n, TASK_SIZE)) + if (!__access_ok(from, n)) return n; if (!nmi_uaccess_okay())