Message ID | 81f7db31-a258-4dc8-b6e1-c1ef1844a9d2@web.de (mailing list archive) |
---|---|
State | Rejected |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net/iucv: Adjustments for iucv_enable() | expand |
@@ -555,13 +555,16 @@ static int iucv_enable(void) > if (cpumask_empty(&iucv_buffer_cpumask)) > /* No cpu could declare an iucv buffer. */ > goto out; >+ >+ rc = 0; >+unlock: > cpus_read_unlock(); >- return 0; >+ return rc; >+ > out: > kfree(iucv_path_table); > iucv_path_table = NULL; >- cpus_read_unlock(); >- return rc; >+ goto unlock; [Suman] This looks confusing. What is the issue with retaining the original change? > } > > /* >-- >2.43.0 >
> @@ -555,13 +555,16 @@ static int iucv_enable(void) >> if (cpumask_empty(&iucv_buffer_cpumask)) >> /* No cpu could declare an iucv buffer. */ >> goto out; >> + >> + rc = 0; >> +unlock: >> cpus_read_unlock(); >> - return 0; >> + return rc; >> + >> out: >> kfree(iucv_path_table); >> iucv_path_table = NULL; >> - cpus_read_unlock(); >> - return rc; >> + goto unlock; > [Suman] This looks confusing. What is the issue with retaining the original change? I propose to reduce the number of cpus_read_unlock() calls (in the source code). Regards, Markus
>>> if (cpumask_empty(&iucv_buffer_cpumask)) >>> /* No cpu could declare an iucv buffer. */ >>> goto out; >>> + >>> + rc = 0; >>> +unlock: >>> cpus_read_unlock(); >>> - return 0; >>> + return rc; >>> + >>> out: >>> kfree(iucv_path_table); >>> iucv_path_table = NULL; >>> - cpus_read_unlock(); >>> - return rc; >>> + goto unlock; >> [Suman] This looks confusing. What is the issue with retaining the >original change? > >I propose to reduce the number of cpus_read_unlock() calls (in the >source code). > >Regards, >Markus [Suman] Then I think we should do something like this. Changing the code flow back-and-forth using "goto" does not seem correct. static int iucv_enable(void) { size_t alloc_size; int cpu, rc = 0; cpus_read_lock(); alloc_size = iucv_max_pathid * sizeof(struct iucv_path); iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); if (!iucv_path_table) { rc = -ENOMEM; goto out; } /* Declare per cpu buffers. */ for_each_online_cpu(cpu) smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); if (cpumask_empty(&iucv_buffer_cpumask)) /* No cpu could declare an iucv buffer. */ rc = -EIO; out: if (rc) { kfree(iucv_path_table); //kfree is itself NULL protected. So, kzalloc failure should also be handled. iucv_path_table = NULL; } cpus_read_unlock(); return rc; }
On 02.01.24 09:27, Suman Ghosh wrote: >>> [Suman] This looks confusing. What is the issue with retaining the >> original change? >> >> I propose to reduce the number of cpus_read_unlock() calls (in the >> source code). >> >> Regards, >> Markus > [Suman] Then I think we should do something like this. Changing the code flow back-and-forth using "goto" does not seem correct. I share Suman's concern that jumping backwards goto is confusing. But I think the Coccinelle finding of freeing a null-pointer should be addressed (see patch 2/2) Thank you Markus for reporting it. The allocation does require holding the cpus_read_lock. For some reason Markus wants to reduce the number of cpus_read_unlock() calls (why?), so what about something like this for both issues: diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 0ed6e34d6edd..1030403b826b 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -542,24 +542,22 @@ static int iucv_enable(void) size_t alloc_size; int cpu, rc; - cpus_read_lock(); - rc = -ENOMEM; alloc_size = iucv_max_pathid * sizeof(struct iucv_path); iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); if (!iucv_path_table) - goto out; + return -ENOMEM; /* Declare per cpu buffers. */ - rc = -EIO; + cpus_read_lock(); for_each_online_cpu(cpu) smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); - if (cpumask_empty(&iucv_buffer_cpumask)) + if (cpumask_empty(&iucv_buffer_cpumask)) { /* No cpu could declare an iucv buffer. */ - goto out; - cpus_read_unlock(); - return 0; -out: - kfree(iucv_path_table); - iucv_path_table = NULL; + kfree(iucv_path_table); + iucv_path_table = NULL; + rc = -EIO; + } else { + rc = 0; + } cpus_read_unlock(); return rc; }
> I share Suman's concern that jumping backwards goto is confusing. > But I think the Coccinelle finding of freeing a null-pointer should be addressed (see patch 2/2) > Thank you Markus for reporting it. > > The allocation does require holding the cpus_read_lock. How does this information fit to your following suggestion to adjust the lock scope? > For some reason Markus wants to reduce the number of cpus_read_unlock() calls (why?), One cpus_read_unlock() call is required here. Would you like to benefit more from a smaller executable code size? > so what about something like this for both issues: > > diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c > index 0ed6e34d6edd..1030403b826b 100644 > --- a/net/iucv/iucv.c > +++ b/net/iucv/iucv.c > @@ -542,24 +542,22 @@ static int iucv_enable(void) > size_t alloc_size; > int cpu, rc; > > - cpus_read_lock(); > - rc = -ENOMEM; > alloc_size = iucv_max_pathid * sizeof(struct iucv_path); > iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); > if (!iucv_path_table) > - goto out; > + return -ENOMEM; > /* Declare per cpu buffers. */ > - rc = -EIO; > + cpus_read_lock(); > for_each_online_cpu(cpu) > smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); > - if (cpumask_empty(&iucv_buffer_cpumask)) > + if (cpumask_empty(&iucv_buffer_cpumask)) { > /* No cpu could declare an iucv buffer. */ > - goto out; > - cpus_read_unlock(); > - return 0; > -out: > - kfree(iucv_path_table); > - iucv_path_table = NULL; > + kfree(iucv_path_table); > + iucv_path_table = NULL; > + rc = -EIO; > + } else { > + rc = 0; > + } > cpus_read_unlock(); > return rc; > } I suggest to reconsider patch squashing a bit more. Regards, Markus
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 0ed6e34d6edd..71ba309e05ee 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -555,13 +555,16 @@ static int iucv_enable(void) if (cpumask_empty(&iucv_buffer_cpumask)) /* No cpu could declare an iucv buffer. */ goto out; + + rc = 0; +unlock: cpus_read_unlock(); - return 0; + return rc; + out: kfree(iucv_path_table); iucv_path_table = NULL; - cpus_read_unlock(); - return rc; + goto unlock; } /*