diff mbox series

[net-next,1/2] net/iucv: Avoid explicit cpumask var allocation on stack

Message ID 20240329105610.922675-2-dawei.li@shingroup.cn (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series Avoid explicit cpumask var allocation on stack | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 7 of 7 maintainers
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 63 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-03-31--03-00 (tests: 953)

Commit Message

Dawei Li March 29, 2024, 10:56 a.m. UTC
For CONFIG_CPUMASK_OFFSTACK=y kernel, explicit allocation of cpumask
variable on stack is not recommended since it can cause potential stack
overflow.

Instead, kernel code should always use *cpumask_var API(s) to allocate
cpumask var in config-neutral way, leaving allocation strategy to
CONFIG_CPUMASK_OFFSTACK.

Use *cpumask_var API(s) to address it.

Signed-off-by: Dawei Li <dawei.li@shingroup.cn>
---
 net/iucv/iucv.c | 37 ++++++++++++++++++++++++++-----------
 1 file changed, 26 insertions(+), 11 deletions(-)

Comments

Eric Dumazet March 29, 2024, 1:21 p.m. UTC | #1
On Fri, Mar 29, 2024 at 11:57 AM Dawei Li <dawei.li@shingroup.cn> wrote:
>
> For CONFIG_CPUMASK_OFFSTACK=y kernel, explicit allocation of cpumask
> variable on stack is not recommended since it can cause potential stack
> overflow.
>
> Instead, kernel code should always use *cpumask_var API(s) to allocate
> cpumask var in config-neutral way, leaving allocation strategy to
> CONFIG_CPUMASK_OFFSTACK.
>
> Use *cpumask_var API(s) to address it.
>
> Signed-off-by: Dawei Li <dawei.li@shingroup.cn>
> ---
>  net/iucv/iucv.c | 37 ++++++++++++++++++++++++++-----------
>  1 file changed, 26 insertions(+), 11 deletions(-)
>
> diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
> index a4ab615ca3e3..b51f46ec32f9 100644
> --- a/net/iucv/iucv.c
> +++ b/net/iucv/iucv.c
> @@ -520,14 +520,19 @@ static void iucv_setmask_mp(void)
>   */
>  static void iucv_setmask_up(void)
>  {
> -       cpumask_t cpumask;
> +       cpumask_var_t cpumask;
>         int cpu;
>
> +       if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
> +               return;

This can not be right. iucv_setmask_up() is not supposed to fail.

Since iucv_setmask_up() is only called with iucv_register_mutex held,
you could simply add a 'static' for @cpumask variable.



> +
>         /* Disable all cpu but the first in cpu_irq_cpumask. */
> -       cpumask_copy(&cpumask, &iucv_irq_cpumask);
> -       cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
> -       for_each_cpu(cpu, &cpumask)
> +       cpumask_copy(cpumask, &iucv_irq_cpumask);
> +       cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), cpumask);
> +       for_each_cpu(cpu, cpumask)
>                 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
> +
> +       free_cpumask_var(cpumask);
>  }
Dawei Li March 30, 2024, 5:07 a.m. UTC | #2
Hi Eric,

On Fri, Mar 29, 2024 at 02:21:28PM +0100, Eric Dumazet wrote:
> On Fri, Mar 29, 2024 at 11:57 AM Dawei Li <dawei.li@shingroup.cn> wrote:
> >
> > For CONFIG_CPUMASK_OFFSTACK=y kernel, explicit allocation of cpumask
> > variable on stack is not recommended since it can cause potential stack
> > overflow.
> >
> > Instead, kernel code should always use *cpumask_var API(s) to allocate
> > cpumask var in config-neutral way, leaving allocation strategy to
> > CONFIG_CPUMASK_OFFSTACK.
> >
> > Use *cpumask_var API(s) to address it.
> >
> > Signed-off-by: Dawei Li <dawei.li@shingroup.cn>
> > ---
> >  net/iucv/iucv.c | 37 ++++++++++++++++++++++++++-----------
> >  1 file changed, 26 insertions(+), 11 deletions(-)
> >
> > diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
> > index a4ab615ca3e3..b51f46ec32f9 100644
> > --- a/net/iucv/iucv.c
> > +++ b/net/iucv/iucv.c
> > @@ -520,14 +520,19 @@ static void iucv_setmask_mp(void)
> >   */
> >  static void iucv_setmask_up(void)
> >  {
> > -       cpumask_t cpumask;
> > +       cpumask_var_t cpumask;
> >         int cpu;
> >
> > +       if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
> > +               return;
> 
> This can not be right. iucv_setmask_up() is not supposed to fail.
> 
> Since iucv_setmask_up() is only called with iucv_register_mutex held,
> you could simply add a 'static' for @cpumask variable.

Correct, iucv_register_mutex is a global lock and can serialize access
on static cpumask var.

I will respin V2 as you suggested.

Thanks,

    Dawei
> 
> 
> 
> > +
> >         /* Disable all cpu but the first in cpu_irq_cpumask. */
> > -       cpumask_copy(&cpumask, &iucv_irq_cpumask);
> > -       cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
> > -       for_each_cpu(cpu, &cpumask)
> > +       cpumask_copy(cpumask, &iucv_irq_cpumask);
> > +       cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), cpumask);
> > +       for_each_cpu(cpu, cpumask)
> >                 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
> > +
> > +       free_cpumask_var(cpumask);
> >  }
>
diff mbox series

Patch

diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a4ab615ca3e3..b51f46ec32f9 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -520,14 +520,19 @@  static void iucv_setmask_mp(void)
  */
 static void iucv_setmask_up(void)
 {
-	cpumask_t cpumask;
+	cpumask_var_t cpumask;
 	int cpu;
 
+	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+		return;
+
 	/* Disable all cpu but the first in cpu_irq_cpumask. */
-	cpumask_copy(&cpumask, &iucv_irq_cpumask);
-	cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
-	for_each_cpu(cpu, &cpumask)
+	cpumask_copy(cpumask, &iucv_irq_cpumask);
+	cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), cpumask);
+	for_each_cpu(cpu, cpumask)
 		smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
+
+	free_cpumask_var(cpumask);
 }
 
 /*
@@ -628,23 +633,33 @@  static int iucv_cpu_online(unsigned int cpu)
 
 static int iucv_cpu_down_prep(unsigned int cpu)
 {
-	cpumask_t cpumask;
+	cpumask_var_t cpumask;
+	int ret = 0;
 
 	if (!iucv_path_table)
 		return 0;
 
-	cpumask_copy(&cpumask, &iucv_buffer_cpumask);
-	cpumask_clear_cpu(cpu, &cpumask);
-	if (cpumask_empty(&cpumask))
+	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+		return -ENOMEM;
+
+	cpumask_copy(cpumask, &iucv_buffer_cpumask);
+	cpumask_clear_cpu(cpu, cpumask);
+	if (cpumask_empty(cpumask)) {
 		/* Can't offline last IUCV enabled cpu. */
-		return -EINVAL;
+		ret = -EINVAL;
+		goto __free_cpumask;
+	}
 
 	iucv_retrieve_cpu(NULL);
 	if (!cpumask_empty(&iucv_irq_cpumask))
-		return 0;
+		goto __free_cpumask;
+
 	smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
 				 iucv_allow_cpu, NULL, 1);
-	return 0;
+
+__free_cpumask:
+	free_cpumask_var(cpumask);
+	return ret;
 }
 
 /**