diff mbox series

[v11,3/8] powerpc/perf: consolidate read_user_stack_32

Message ID 184347595442b4ca664613008a09e8cea7188c36.1584620202.git.msuchanek@suse.de (mailing list archive)
State New, archived
Headers show
Series Disable compat cruft on ppc64le v11 | expand

Commit Message

Michal Suchanek March 19, 2020, 12:19 p.m. UTC
There are two almost identical copies for 32bit and 64bit.

The function is used only in 32bit code which will be split out in next
patch so consolidate to one function.

Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
v6:  new patch
v8:  move the consolidated function out of the ifdef block.
v11: rebase on top of def0bfdbd603
---
 arch/powerpc/perf/callchain.c | 48 +++++++++++++++++------------------
 1 file changed, 24 insertions(+), 24 deletions(-)

Comments

Nicholas Piggin March 24, 2020, 8:48 a.m. UTC | #1
Michal Suchanek's on March 19, 2020 10:19 pm:
> There are two almost identical copies for 32bit and 64bit.
> 
> The function is used only in 32bit code which will be split out in next
> patch so consolidate to one function.
> 
> Signed-off-by: Michal Suchanek <msuchanek@suse.de>
> Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
> ---
> v6:  new patch
> v8:  move the consolidated function out of the ifdef block.
> v11: rebase on top of def0bfdbd603
> ---
>  arch/powerpc/perf/callchain.c | 48 +++++++++++++++++------------------
>  1 file changed, 24 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
> index cbc251981209..c9a78c6e4361 100644
> --- a/arch/powerpc/perf/callchain.c
> +++ b/arch/powerpc/perf/callchain.c
> @@ -161,18 +161,6 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
>  	return read_user_stack_slow(ptr, ret, 8);
>  }
>  
> -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> -{
> -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> -	    ((unsigned long)ptr & 3))
> -		return -EFAULT;
> -
> -	if (!probe_user_read(ret, ptr, sizeof(*ret)))
> -		return 0;
> -
> -	return read_user_stack_slow(ptr, ret, 4);
> -}
> -
>  static inline int valid_user_sp(unsigned long sp, int is_64)
>  {
>  	if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
> @@ -277,19 +265,9 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
>  }
>  
>  #else  /* CONFIG_PPC64 */
> -/*
> - * On 32-bit we just access the address and let hash_page create a
> - * HPTE if necessary, so there is no need to fall back to reading
> - * the page tables.  Since this is called at interrupt level,
> - * do_page_fault() won't treat a DSI as a page fault.
> - */
> -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> +static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
>  {
> -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> -	    ((unsigned long)ptr & 3))
> -		return -EFAULT;
> -
> -	return probe_user_read(ret, ptr, sizeof(*ret));
> +	return 0;
>  }
>  
>  static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> @@ -312,6 +290,28 @@ static inline int valid_user_sp(unsigned long sp, int is_64)
>  
>  #endif /* CONFIG_PPC64 */
>  
> +/*
> + * On 32-bit we just access the address and let hash_page create a
> + * HPTE if necessary, so there is no need to fall back to reading
> + * the page tables.  Since this is called at interrupt level,
> + * do_page_fault() won't treat a DSI as a page fault.
> + */

The comment is actually probably better to stay in the 32-bit
read_user_stack_slow implementation. Is that function defined
on 32-bit purely so that you can use IS_ENABLED()? In that case
I would prefer to put a BUG() there which makes it self documenting.

Thanks,
Nick

> +static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> +{
> +	int rc;
> +
> +	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> +	    ((unsigned long)ptr & 3))
> +		return -EFAULT;
> +
> +	rc = probe_user_read(ret, ptr, sizeof(*ret));
> +
> +	if (IS_ENABLED(CONFIG_PPC64) && rc)
> +		return read_user_stack_slow(ptr, ret, 4);
> +
> +	return rc;
> +}
> +
>  /*
>   * Layout for non-RT signal frames
>   */
> -- 
> 2.23.0
> 
>
Michal Suchanek March 24, 2020, 7:38 p.m. UTC | #2
On Tue, Mar 24, 2020 at 06:48:20PM +1000, Nicholas Piggin wrote:
> Michal Suchanek's on March 19, 2020 10:19 pm:
> > There are two almost identical copies for 32bit and 64bit.
> > 
> > The function is used only in 32bit code which will be split out in next
> > patch so consolidate to one function.
> > 
> > Signed-off-by: Michal Suchanek <msuchanek@suse.de>
> > Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
> > ---
> > v6:  new patch
> > v8:  move the consolidated function out of the ifdef block.
> > v11: rebase on top of def0bfdbd603
> > ---
> >  arch/powerpc/perf/callchain.c | 48 +++++++++++++++++------------------
> >  1 file changed, 24 insertions(+), 24 deletions(-)
> > 
> > diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
> > index cbc251981209..c9a78c6e4361 100644
> > --- a/arch/powerpc/perf/callchain.c
> > +++ b/arch/powerpc/perf/callchain.c
> > @@ -161,18 +161,6 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
> >  	return read_user_stack_slow(ptr, ret, 8);
> >  }
> >  
> > -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> > -{
> > -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> > -	    ((unsigned long)ptr & 3))
> > -		return -EFAULT;
> > -
> > -	if (!probe_user_read(ret, ptr, sizeof(*ret)))
> > -		return 0;
> > -
> > -	return read_user_stack_slow(ptr, ret, 4);
> > -}
> > -
> >  static inline int valid_user_sp(unsigned long sp, int is_64)
> >  {
> >  	if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
> > @@ -277,19 +265,9 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> >  }
> >  
> >  #else  /* CONFIG_PPC64 */
> > -/*
> > - * On 32-bit we just access the address and let hash_page create a
> > - * HPTE if necessary, so there is no need to fall back to reading
> > - * the page tables.  Since this is called at interrupt level,
> > - * do_page_fault() won't treat a DSI as a page fault.
> > - */
> > -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> > +static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
> >  {
> > -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> > -	    ((unsigned long)ptr & 3))
> > -		return -EFAULT;
> > -
> > -	return probe_user_read(ret, ptr, sizeof(*ret));
> > +	return 0;
> >  }
> >  
> >  static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> > @@ -312,6 +290,28 @@ static inline int valid_user_sp(unsigned long sp, int is_64)
> >  
> >  #endif /* CONFIG_PPC64 */
> >  
> > +/*
> > + * On 32-bit we just access the address and let hash_page create a
> > + * HPTE if necessary, so there is no need to fall back to reading
> > + * the page tables.  Since this is called at interrupt level,
> > + * do_page_fault() won't treat a DSI as a page fault.
> > + */
> 
> The comment is actually probably better to stay in the 32-bit
> read_user_stack_slow implementation. Is that function defined
> on 32-bit purely so that you can use IS_ENABLED()? In that case
It documents the IS_ENABLED() and that's where it is. The 32bit
definition is only a technical detail.
> I would prefer to put a BUG() there which makes it self documenting.
Which will cause checkpatch complaints about introducing new BUG() which
is frowned on.

Thanks

Michal
Nicholas Piggin April 3, 2020, 7:13 a.m. UTC | #3
Michal Suchánek's on March 25, 2020 5:38 am:
> On Tue, Mar 24, 2020 at 06:48:20PM +1000, Nicholas Piggin wrote:
>> Michal Suchanek's on March 19, 2020 10:19 pm:
>> > There are two almost identical copies for 32bit and 64bit.
>> > 
>> > The function is used only in 32bit code which will be split out in next
>> > patch so consolidate to one function.
>> > 
>> > Signed-off-by: Michal Suchanek <msuchanek@suse.de>
>> > Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
>> > ---
>> > v6:  new patch
>> > v8:  move the consolidated function out of the ifdef block.
>> > v11: rebase on top of def0bfdbd603
>> > ---
>> >  arch/powerpc/perf/callchain.c | 48 +++++++++++++++++------------------
>> >  1 file changed, 24 insertions(+), 24 deletions(-)
>> > 
>> > diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
>> > index cbc251981209..c9a78c6e4361 100644
>> > --- a/arch/powerpc/perf/callchain.c
>> > +++ b/arch/powerpc/perf/callchain.c
>> > @@ -161,18 +161,6 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
>> >  	return read_user_stack_slow(ptr, ret, 8);
>> >  }
>> >  
>> > -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
>> > -{
>> > -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
>> > -	    ((unsigned long)ptr & 3))
>> > -		return -EFAULT;
>> > -
>> > -	if (!probe_user_read(ret, ptr, sizeof(*ret)))
>> > -		return 0;
>> > -
>> > -	return read_user_stack_slow(ptr, ret, 4);
>> > -}
>> > -
>> >  static inline int valid_user_sp(unsigned long sp, int is_64)
>> >  {
>> >  	if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
>> > @@ -277,19 +265,9 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
>> >  }
>> >  
>> >  #else  /* CONFIG_PPC64 */
>> > -/*
>> > - * On 32-bit we just access the address and let hash_page create a
>> > - * HPTE if necessary, so there is no need to fall back to reading
>> > - * the page tables.  Since this is called at interrupt level,
>> > - * do_page_fault() won't treat a DSI as a page fault.
>> > - */
>> > -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
>> > +static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
>> >  {
>> > -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
>> > -	    ((unsigned long)ptr & 3))
>> > -		return -EFAULT;
>> > -
>> > -	return probe_user_read(ret, ptr, sizeof(*ret));
>> > +	return 0;
>> >  }
>> >  
>> >  static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
>> > @@ -312,6 +290,28 @@ static inline int valid_user_sp(unsigned long sp, int is_64)
>> >  
>> >  #endif /* CONFIG_PPC64 */
>> >  
>> > +/*
>> > + * On 32-bit we just access the address and let hash_page create a
>> > + * HPTE if necessary, so there is no need to fall back to reading
>> > + * the page tables.  Since this is called at interrupt level,
>> > + * do_page_fault() won't treat a DSI as a page fault.
>> > + */
>> 
>> The comment is actually probably better to stay in the 32-bit
>> read_user_stack_slow implementation. Is that function defined
>> on 32-bit purely so that you can use IS_ENABLED()? In that case
> It documents the IS_ENABLED() and that's where it is. The 32bit
> definition is only a technical detail.

Sorry for the late reply, busy trying to fix bugs in the C rewrite
series. I don't think it is the right place, it should be in the
ppc32 implementation detail. ppc64 has an equivalent comment at the
top of its read_user_stack functions.

>> I would prefer to put a BUG() there which makes it self documenting.
> Which will cause checkpatch complaints about introducing new BUG() which
> is frowned on.

It's fine in this case, that warning is about not introducing
runtime bugs, but this wouldn't be.

But... I actually don't like adding read_user_stack_slow on 32-bit
and especially not just to make IS_ENABLED work.

IMO this would be better if you really want to consolidate it

---

diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index cbc251981209..ca3a599b3f54 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -108,7 +108,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
  * interrupt context, so if the access faults, we read the page tables
  * to find which page (if any) is mapped and access it directly.
  */
-static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
+static int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
 {
 	int ret = -EFAULT;
 	pgd_t *pgdir;
@@ -149,28 +149,21 @@ static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
 	return ret;
 }
 
-static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
+static int __read_user_stack(const void __user *ptr, void *ret, size_t size)
 {
-	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
-	    ((unsigned long)ptr & 7))
+	if ((unsigned long)ptr > TASK_SIZE - size ||
+	    ((unsigned long)ptr & (size - 1)))
 		return -EFAULT;
 
-	if (!probe_user_read(ret, ptr, sizeof(*ret)))
+	if (!probe_user_read(ret, ptr, size))
 		return 0;
 
-	return read_user_stack_slow(ptr, ret, 8);
+	return read_user_stack_slow(ptr, ret, size);
 }
 
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
 {
-	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
-	    ((unsigned long)ptr & 3))
-		return -EFAULT;
-
-	if (!probe_user_read(ret, ptr, sizeof(*ret)))
-		return 0;
-
-	return read_user_stack_slow(ptr, ret, 4);
+	return __read_user_stack(ptr, ret, sizeof(*ret));
 }
 
 static inline int valid_user_sp(unsigned long sp, int is_64)
@@ -283,13 +276,13 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
  * the page tables.  Since this is called at interrupt level,
  * do_page_fault() won't treat a DSI as a page fault.
  */
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+static int __read_user_stack(const void __user *ptr, void *ret, size_t size)
 {
-	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
-	    ((unsigned long)ptr & 3))
+	if ((unsigned long)ptr > TASK_SIZE - size ||
+	    ((unsigned long)ptr & (size - 1)))
 		return -EFAULT;
 
-	return probe_user_read(ret, ptr, sizeof(*ret));
+	return probe_user_read(ret, ptr, size);
 }
 
 static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
@@ -312,6 +305,11 @@ static inline int valid_user_sp(unsigned long sp, int is_64)
 
 #endif /* CONFIG_PPC64 */
 
+static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+{
+	return __read_user_stack(ptr, ret, sizeof(*ret));
+}
+
 /*
  * Layout for non-RT signal frames
  */
Michal Suchanek April 3, 2020, 10:52 a.m. UTC | #4
Hello,

there are 3 variants of the function

read_user_stack_64

32bit read_user_stack_32
64bit read_user_Stack_32

On Fri, Apr 03, 2020 at 05:13:25PM +1000, Nicholas Piggin wrote:
> Michal Suchánek's on March 25, 2020 5:38 am:
> > On Tue, Mar 24, 2020 at 06:48:20PM +1000, Nicholas Piggin wrote:
> >> Michal Suchanek's on March 19, 2020 10:19 pm:
> >> > There are two almost identical copies for 32bit and 64bit.
> >> > 
> >> > The function is used only in 32bit code which will be split out in next
> >> > patch so consolidate to one function.
> >> > 
> >> > Signed-off-by: Michal Suchanek <msuchanek@suse.de>
> >> > Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
> >> > ---
> >> > v6:  new patch
> >> > v8:  move the consolidated function out of the ifdef block.
> >> > v11: rebase on top of def0bfdbd603
> >> > ---
> >> >  arch/powerpc/perf/callchain.c | 48 +++++++++++++++++------------------
> >> >  1 file changed, 24 insertions(+), 24 deletions(-)
> >> > 
> >> > diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
> >> > index cbc251981209..c9a78c6e4361 100644
> >> > --- a/arch/powerpc/perf/callchain.c
> >> > +++ b/arch/powerpc/perf/callchain.c
> >> > @@ -161,18 +161,6 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
> >> >  	return read_user_stack_slow(ptr, ret, 8);
> >> >  }
> >> >  
> >> > -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> >> > -{
> >> > -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> >> > -	    ((unsigned long)ptr & 3))
> >> > -		return -EFAULT;
> >> > -
> >> > -	if (!probe_user_read(ret, ptr, sizeof(*ret)))
> >> > -		return 0;
> >> > -
> >> > -	return read_user_stack_slow(ptr, ret, 4);
> >> > -}
> >> > -
> >> >  static inline int valid_user_sp(unsigned long sp, int is_64)
> >> >  {
> >> >  	if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
> >> > @@ -277,19 +265,9 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> >> >  }
> >> >  
> >> >  #else  /* CONFIG_PPC64 */
> >> > -/*
> >> > - * On 32-bit we just access the address and let hash_page create a
> >> > - * HPTE if necessary, so there is no need to fall back to reading
> >> > - * the page tables.  Since this is called at interrupt level,
> >> > - * do_page_fault() won't treat a DSI as a page fault.
> >> > - */
> >> > -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> >> > +static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
> >> >  {
> >> > -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> >> > -	    ((unsigned long)ptr & 3))
> >> > -		return -EFAULT;
> >> > -
> >> > -	return probe_user_read(ret, ptr, sizeof(*ret));
> >> > +	return 0;
> >> >  }
> >> >  
> >> >  static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> >> > @@ -312,6 +290,28 @@ static inline int valid_user_sp(unsigned long sp, int is_64)
> >> >  
> >> >  #endif /* CONFIG_PPC64 */
> >> >  
> >> > +/*
> >> > + * On 32-bit we just access the address and let hash_page create a
> >> > + * HPTE if necessary, so there is no need to fall back to reading
> >> > + * the page tables.  Since this is called at interrupt level,
> >> > + * do_page_fault() won't treat a DSI as a page fault.
> >> > + */
> >> 
> >> The comment is actually probably better to stay in the 32-bit
> >> read_user_stack_slow implementation. Is that function defined
> >> on 32-bit purely so that you can use IS_ENABLED()? In that case
> > It documents the IS_ENABLED() and that's where it is. The 32bit
> > definition is only a technical detail.
> 
> Sorry for the late reply, busy trying to fix bugs in the C rewrite
> series. I don't think it is the right place, it should be in the
> ppc32 implementation detail. ppc64 has an equivalent comment at the
> top of its read_user_stack functions.
> 
> >> I would prefer to put a BUG() there which makes it self documenting.
> > Which will cause checkpatch complaints about introducing new BUG() which
> > is frowned on.
> 
> It's fine in this case, that warning is about not introducing
> runtime bugs, but this wouldn't be.
> 
> But... I actually don't like adding read_user_stack_slow on 32-bit
> and especially not just to make IS_ENABLED work.
> 
> IMO this would be better if you really want to consolidate it
> 
> ---
> 
> diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
> index cbc251981209..ca3a599b3f54 100644
> --- a/arch/powerpc/perf/callchain.c
> +++ b/arch/powerpc/perf/callchain.c
> @@ -108,7 +108,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
>   * interrupt context, so if the access faults, we read the page tables
>   * to find which page (if any) is mapped and access it directly.
>   */
> -static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
> +static int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
>  {
>  	int ret = -EFAULT;
>  	pgd_t *pgdir;
> @@ -149,28 +149,21 @@ static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
>  	return ret;
>  }
>  
> -static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
> +static int __read_user_stack(const void __user *ptr, void *ret, size_t size)
>  {
> -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
> -	    ((unsigned long)ptr & 7))
> +	if ((unsigned long)ptr > TASK_SIZE - size ||
> +	    ((unsigned long)ptr & (size - 1)))
>  		return -EFAULT;
>  
> -	if (!probe_user_read(ret, ptr, sizeof(*ret)))
> +	if (!probe_user_read(ret, ptr, size))
>  		return 0;
>  
> -	return read_user_stack_slow(ptr, ret, 8);
> +	return read_user_stack_slow(ptr, ret, size);
>  }
>  
> -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> +static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
>  {
> -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> -	    ((unsigned long)ptr & 3))
> -		return -EFAULT;
> -
> -	if (!probe_user_read(ret, ptr, sizeof(*ret)))
> -		return 0;
> -
> -	return read_user_stack_slow(ptr, ret, 4);
> +	return __read_user_stack(ptr, ret, sizeof(*ret));
>  }
>  
>  static inline int valid_user_sp(unsigned long sp, int is_64)
> @@ -283,13 +276,13 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
>   * the page tables.  Since this is called at interrupt level,
>   * do_page_fault() won't treat a DSI as a page fault.
>   */
> -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> +static int __read_user_stack(const void __user *ptr, void *ret, size_t size)
>  {
> -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> -	    ((unsigned long)ptr & 3))
> +	if ((unsigned long)ptr > TASK_SIZE - size ||
> +	    ((unsigned long)ptr & (size - 1)))
>  		return -EFAULT;
>  
> -	return probe_user_read(ret, ptr, sizeof(*ret));
> +	return probe_user_read(ret, ptr, size);
>  }
>  
>  static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> @@ -312,6 +305,11 @@ static inline int valid_user_sp(unsigned long sp, int is_64)
>  
>  #endif /* CONFIG_PPC64 */
>  
> +static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> +{
> +	return __read_user_stack(ptr, ret, sizeof(*ret));
Does not work for 64bit read_user_stack_32 ^ this should be 4.

Other than that it should preserve the existing logic just fine.

Thanks

Michal
Nicholas Piggin April 3, 2020, 11:26 a.m. UTC | #5
Michal Suchánek's on April 3, 2020 8:52 pm:
> Hello,
> 
> there are 3 variants of the function
> 
> read_user_stack_64
> 
> 32bit read_user_stack_32
> 64bit read_user_Stack_32

Right.

> On Fri, Apr 03, 2020 at 05:13:25PM +1000, Nicholas Piggin wrote:
[...]
>>  #endif /* CONFIG_PPC64 */
>>  
>> +static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
>> +{
>> +	return __read_user_stack(ptr, ret, sizeof(*ret));
> Does not work for 64bit read_user_stack_32 ^ this should be 4.
> 
> Other than that it should preserve the existing logic just fine.

sizeof(int) == 4 on 64bit so it should work.

Thanks,
Nick
Michal Suchanek April 3, 2020, 11:51 a.m. UTC | #6
On Fri, Apr 03, 2020 at 09:26:27PM +1000, Nicholas Piggin wrote:
> Michal Suchánek's on April 3, 2020 8:52 pm:
> > Hello,
> > 
> > there are 3 variants of the function
> > 
> > read_user_stack_64
> > 
> > 32bit read_user_stack_32
> > 64bit read_user_Stack_32
> 
> Right.
> 
> > On Fri, Apr 03, 2020 at 05:13:25PM +1000, Nicholas Piggin wrote:
> [...]
> >>  #endif /* CONFIG_PPC64 */
> >>  
> >> +static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> >> +{
> >> +	return __read_user_stack(ptr, ret, sizeof(*ret));
> > Does not work for 64bit read_user_stack_32 ^ this should be 4.
> > 
> > Other than that it should preserve the existing logic just fine.
> 
> sizeof(int) == 4 on 64bit so it should work.
> 
Right, the type is different for the 32bit and 64bit version.

Thanks

Michal
Michal Suchanek April 6, 2020, 8:52 p.m. UTC | #7
On Fri, Apr 03, 2020 at 05:13:25PM +1000, Nicholas Piggin wrote:
> Michal Suchánek's on March 25, 2020 5:38 am:
> > On Tue, Mar 24, 2020 at 06:48:20PM +1000, Nicholas Piggin wrote:
> >> Michal Suchanek's on March 19, 2020 10:19 pm:
> >> > There are two almost identical copies for 32bit and 64bit.
> >> > 
> >> > The function is used only in 32bit code which will be split out in next
> >> > patch so consolidate to one function.
> >> > 
> >> > Signed-off-by: Michal Suchanek <msuchanek@suse.de>
> >> > Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
> >> > ---
> >> > v6:  new patch
> >> > v8:  move the consolidated function out of the ifdef block.
> >> > v11: rebase on top of def0bfdbd603
> >> > ---
> >> >  arch/powerpc/perf/callchain.c | 48 +++++++++++++++++------------------
> >> >  1 file changed, 24 insertions(+), 24 deletions(-)
> >> > 
> >> > diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
> >> > index cbc251981209..c9a78c6e4361 100644
> >> > --- a/arch/powerpc/perf/callchain.c
> >> > +++ b/arch/powerpc/perf/callchain.c
> >> > @@ -161,18 +161,6 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
> >> >  	return read_user_stack_slow(ptr, ret, 8);
> >> >  }
> >> >  
> >> > -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> >> > -{
> >> > -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> >> > -	    ((unsigned long)ptr & 3))
> >> > -		return -EFAULT;
> >> > -
> >> > -	if (!probe_user_read(ret, ptr, sizeof(*ret)))
> >> > -		return 0;
> >> > -
> >> > -	return read_user_stack_slow(ptr, ret, 4);
> >> > -}
> >> > -
> >> >  static inline int valid_user_sp(unsigned long sp, int is_64)
> >> >  {
> >> >  	if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
> >> > @@ -277,19 +265,9 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> >> >  }
> >> >  
> >> >  #else  /* CONFIG_PPC64 */
> >> > -/*
> >> > - * On 32-bit we just access the address and let hash_page create a
> >> > - * HPTE if necessary, so there is no need to fall back to reading
> >> > - * the page tables.  Since this is called at interrupt level,
> >> > - * do_page_fault() won't treat a DSI as a page fault.
> >> > - */
> >> > -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> >> > +static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
> >> >  {
> >> > -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> >> > -	    ((unsigned long)ptr & 3))
> >> > -		return -EFAULT;
> >> > -
> >> > -	return probe_user_read(ret, ptr, sizeof(*ret));
> >> > +	return 0;
> >> >  }
> >> >  
> >> >  static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> >> > @@ -312,6 +290,28 @@ static inline int valid_user_sp(unsigned long sp, int is_64)
> >> >  
> >> >  #endif /* CONFIG_PPC64 */
> >> >  
> >> > +/*
> >> > + * On 32-bit we just access the address and let hash_page create a
> >> > + * HPTE if necessary, so there is no need to fall back to reading
> >> > + * the page tables.  Since this is called at interrupt level,
> >> > + * do_page_fault() won't treat a DSI as a page fault.
> >> > + */
> >> 
> >> The comment is actually probably better to stay in the 32-bit
> >> read_user_stack_slow implementation. Is that function defined
> >> on 32-bit purely so that you can use IS_ENABLED()? In that case
> > It documents the IS_ENABLED() and that's where it is. The 32bit
> > definition is only a technical detail.
> 
> Sorry for the late reply, busy trying to fix bugs in the C rewrite
> series. I don't think it is the right place, it should be in the
> ppc32 implementation detail.
Which does not exist anymore after the 32bit and 64bit part is split.
> ppc64 has an equivalent comment at the top of its read_user_stack functions.
> 
> >> I would prefer to put a BUG() there which makes it self documenting.
> > Which will cause checkpatch complaints about introducing new BUG() which
> > is frowned on.
> 
> It's fine in this case, that warning is about not introducing
> runtime bugs, but this wouldn't be.
> 
> But... I actually don't like adding read_user_stack_slow on 32-bit
> and especially not just to make IS_ENABLED work.
That's to not break build at this point. Later the function is removed.
> 
> IMO this would be better if you really want to consolidate it
> 
> ---
> 
> diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
> index cbc251981209..ca3a599b3f54 100644
> --- a/arch/powerpc/perf/callchain.c
> +++ b/arch/powerpc/perf/callchain.c
> @@ -108,7 +108,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
>   * interrupt context, so if the access faults, we read the page tables
>   * to find which page (if any) is mapped and access it directly.
>   */
> -static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
> +static int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
>  {
>  	int ret = -EFAULT;
>  	pgd_t *pgdir;
> @@ -149,28 +149,21 @@ static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
>  	return ret;
>  }
>  
> -static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
> +static int __read_user_stack(const void __user *ptr, void *ret, size_t size)
>  {
> -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
> -	    ((unsigned long)ptr & 7))
> +	if ((unsigned long)ptr > TASK_SIZE - size ||
> +	    ((unsigned long)ptr & (size - 1)))
>  		return -EFAULT;
>  
> -	if (!probe_user_read(ret, ptr, sizeof(*ret)))
> +	if (!probe_user_read(ret, ptr, size))
>  		return 0;
>  
> -	return read_user_stack_slow(ptr, ret, 8);
> +	return read_user_stack_slow(ptr, ret, size);
>  }
>  
> -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> +static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
>  {
> -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> -	    ((unsigned long)ptr & 3))
> -		return -EFAULT;
> -
> -	if (!probe_user_read(ret, ptr, sizeof(*ret)))
> -		return 0;
> -
> -	return read_user_stack_slow(ptr, ret, 4);
> +	return __read_user_stack(ptr, ret, sizeof(*ret));
>  }
>  
>  static inline int valid_user_sp(unsigned long sp, int is_64)
> @@ -283,13 +276,13 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
>   * the page tables.  Since this is called at interrupt level,
>   * do_page_fault() won't treat a DSI as a page fault.
>   */
> -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> +static int __read_user_stack(const void __user *ptr, void *ret, size_t size)
>  {
> -	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
> -	    ((unsigned long)ptr & 3))
> +	if ((unsigned long)ptr > TASK_SIZE - size ||
> +	    ((unsigned long)ptr & (size - 1)))
>  		return -EFAULT;
>  
> -	return probe_user_read(ret, ptr, sizeof(*ret));
> +	return probe_user_read(ret, ptr, size);
>  }
>  
>  static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
> @@ -312,6 +305,11 @@ static inline int valid_user_sp(unsigned long sp, int is_64)
>  
>  #endif /* CONFIG_PPC64 */
>  
> +static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
> +{
> +	return __read_user_stack(ptr, ret, sizeof(*ret));
> +}
> +
>  /*
>   * Layout for non-RT signal frames
>   */
diff mbox series

Patch

diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index cbc251981209..c9a78c6e4361 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -161,18 +161,6 @@  static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
 	return read_user_stack_slow(ptr, ret, 8);
 }
 
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
-{
-	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
-	    ((unsigned long)ptr & 3))
-		return -EFAULT;
-
-	if (!probe_user_read(ret, ptr, sizeof(*ret)))
-		return 0;
-
-	return read_user_stack_slow(ptr, ret, 4);
-}
-
 static inline int valid_user_sp(unsigned long sp, int is_64)
 {
 	if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
@@ -277,19 +265,9 @@  static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
 }
 
 #else  /* CONFIG_PPC64 */
-/*
- * On 32-bit we just access the address and let hash_page create a
- * HPTE if necessary, so there is no need to fall back to reading
- * the page tables.  Since this is called at interrupt level,
- * do_page_fault() won't treat a DSI as a page fault.
- */
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
 {
-	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
-	    ((unsigned long)ptr & 3))
-		return -EFAULT;
-
-	return probe_user_read(ret, ptr, sizeof(*ret));
+	return 0;
 }
 
 static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
@@ -312,6 +290,28 @@  static inline int valid_user_sp(unsigned long sp, int is_64)
 
 #endif /* CONFIG_PPC64 */
 
+/*
+ * On 32-bit we just access the address and let hash_page create a
+ * HPTE if necessary, so there is no need to fall back to reading
+ * the page tables.  Since this is called at interrupt level,
+ * do_page_fault() won't treat a DSI as a page fault.
+ */
+static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+{
+	int rc;
+
+	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
+	    ((unsigned long)ptr & 3))
+		return -EFAULT;
+
+	rc = probe_user_read(ret, ptr, sizeof(*ret));
+
+	if (IS_ENABLED(CONFIG_PPC64) && rc)
+		return read_user_stack_slow(ptr, ret, 4);
+
+	return rc;
+}
+
 /*
  * Layout for non-RT signal frames
  */