diff mbox series

[v3,4/4] iio: inkern: move to the cleanup.h magic

Message ID 20240229-iio-use-cleanup-magic-v3-4-c3d34889ae3c@analog.com (mailing list archive)
State Accepted
Headers show
Series iio: move IIO to the cleanup.h magic | expand

Commit Message

Nuno Sa Feb. 29, 2024, 3:10 p.m. UTC
Use the new cleanup magic for handling mutexes in IIO. This allows us to
greatly simplify some code paths.

While at it, also use __free(kfree) where allocations are done and drop
obvious comment in iio_channel_read_min().

Signed-off-by: Nuno Sa <nuno.sa@analog.com>
---
 drivers/iio/inkern.c | 255 +++++++++++++++++----------------------------------
 1 file changed, 85 insertions(+), 170 deletions(-)

Comments

Jonathan Cameron March 3, 2024, 2:24 p.m. UTC | #1
On Thu, 29 Feb 2024 16:10:28 +0100
Nuno Sa <nuno.sa@analog.com> wrote:

> Use the new cleanup magic for handling mutexes in IIO. This allows us to
> greatly simplify some code paths.
> 
> While at it, also use __free(kfree) where allocations are done and drop
> obvious comment in iio_channel_read_min().
> 
> Signed-off-by: Nuno Sa <nuno.sa@analog.com>

Hi Nuno

Series looks very nice. One trivial thing inline - I can tidy that up whilst
applying if nothing else comes up.

Given this obviously touches a lot of core code, so even though simple it's
high risk for queuing up late. I also have a complex mess already queued up
for the coming merge window. Hence I'm going to hold off on applying this
series until the start of the next cycle.

Nothing outside IIO is going to depend on it, so it's rather simpler decision
to hold it than for the ones that add new general purpose infrastructure.

Jonathan




>  EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
>  
> @@ -757,29 +711,24 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
>  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
>  	int ret;
>  
> -	mutex_lock(&iio_dev_opaque->info_exist_lock);
> -	if (!chan->indio_dev->info) {
> -		ret = -ENODEV;
> -		goto err_unlock;
> -	}
> +	guard(mutex)(&iio_dev_opaque->info_exist_lock);
> +	if (!chan->indio_dev->info)
> +		return -ENODEV;
>  
>  	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
>  		ret = iio_channel_read(chan, val, NULL,
>  				       IIO_CHAN_INFO_PROCESSED);
>  		if (ret < 0)
> -			goto err_unlock;
> +			return ret;
>  		*val *= scale;

		return 0;

>  	} else {
could drop the else.

>  		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
>  		if (ret < 0)
> -			goto err_unlock;
> +			return ret;
>  		ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
>  							    scale);
		return iio_convert_raw_to_proc...

>  	}
>  
> -err_unlock:
> -	mutex_unlock(&iio_dev_opaque->info_exist_lock);
> -
>  	return ret;
Drop this.
>  }
Nuno Sá March 4, 2024, 8:04 a.m. UTC | #2
On Sun, 2024-03-03 at 14:24 +0000, Jonathan Cameron wrote:
> On Thu, 29 Feb 2024 16:10:28 +0100
> Nuno Sa <nuno.sa@analog.com> wrote:
> 
> > Use the new cleanup magic for handling mutexes in IIO. This allows us to
> > greatly simplify some code paths.
> > 
> > While at it, also use __free(kfree) where allocations are done and drop
> > obvious comment in iio_channel_read_min().
> > 
> > Signed-off-by: Nuno Sa <nuno.sa@analog.com>
> 
> Hi Nuno
> 
> Series looks very nice. One trivial thing inline - I can tidy that up whilst
> applying if nothing else comes up.
> 
> Given this obviously touches a lot of core code, so even though simple it's
> high risk for queuing up late. I also have a complex mess already queued up
> for the coming merge window. Hence I'm going to hold off on applying this
> series until the start of the next cycle.
> 
> Nothing outside IIO is going to depend on it, so it's rather simpler decision
> to hold it than for the ones that add new general purpose infrastructure.
> 
> 

Seems reasonable... It may even give us some time to see how the cond_guard()
and scoped_cond_guard() will end up.

> 
> 
> >  EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
> >  
> > @@ -757,29 +711,24 @@ int iio_read_channel_processed_scale(struct
> > iio_channel *chan, int *val,
> >  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan-
> > >indio_dev);
> >  	int ret;
> >  
> > -	mutex_lock(&iio_dev_opaque->info_exist_lock);
> > -	if (!chan->indio_dev->info) {
> > -		ret = -ENODEV;
> > -		goto err_unlock;
> > -	}
> > +	guard(mutex)(&iio_dev_opaque->info_exist_lock);
> > +	if (!chan->indio_dev->info)
> > +		return -ENODEV;
> >  
> >  	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
> >  		ret = iio_channel_read(chan, val, NULL,
> >  				       IIO_CHAN_INFO_PROCESSED);
> >  		if (ret < 0)
> > -			goto err_unlock;
> > +			return ret;
> >  		*val *= scale;
> 
> 		return 0;
> 
> >  	} else {
> could drop the else.
> 
> >  		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
> >  		if (ret < 0)
> > -			goto err_unlock;
> > +			return ret;
> >  		ret = iio_convert_raw_to_processed_unlocked(chan, *val,
> > val,
> >  							    scale);
> 		return iio_convert_raw_to_proc...
> 

Hmm, unless I completely misunderstood your comments on v2, this was exactly
what I had but you recommended to leave the else branch :).

- Nuno Sá
Jonathan Cameron March 9, 2024, 5:41 p.m. UTC | #3
On Mon, 04 Mar 2024 09:04:49 +0100
Nuno Sá <noname.nuno@gmail.com> wrote:

> On Sun, 2024-03-03 at 14:24 +0000, Jonathan Cameron wrote:
> > On Thu, 29 Feb 2024 16:10:28 +0100
> > Nuno Sa <nuno.sa@analog.com> wrote:
> >   
> > > Use the new cleanup magic for handling mutexes in IIO. This allows us to
> > > greatly simplify some code paths.
> > > 
> > > While at it, also use __free(kfree) where allocations are done and drop
> > > obvious comment in iio_channel_read_min().
> > > 
> > > Signed-off-by: Nuno Sa <nuno.sa@analog.com>  
> > 
> > Hi Nuno
> > 
> > Series looks very nice. One trivial thing inline - I can tidy that up whilst
> > applying if nothing else comes up.
> > 
> > Given this obviously touches a lot of core code, so even though simple it's
> > high risk for queuing up late. I also have a complex mess already queued up
> > for the coming merge window. Hence I'm going to hold off on applying this
> > series until the start of the next cycle.
> > 
> > Nothing outside IIO is going to depend on it, so it's rather simpler decision
> > to hold it than for the ones that add new general purpose infrastructure.
> > 
> >   
> 
> Seems reasonable... It may even give us some time to see how the cond_guard()
> and scoped_cond_guard() will end up.

Absolutely - thankfully converting to the suggestions Linus made will be straight
forwards, so hopefully the worst that happens is a complex merge, or some
fixing up to do afterwards.

> 
> > 
> >   
> > >  EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
> > >  
> > > @@ -757,29 +711,24 @@ int iio_read_channel_processed_scale(struct
> > > iio_channel *chan, int *val,
> > >  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan-  
> > > >indio_dev);  
> > >  	int ret;
> > >  
> > > -	mutex_lock(&iio_dev_opaque->info_exist_lock);
> > > -	if (!chan->indio_dev->info) {
> > > -		ret = -ENODEV;
> > > -		goto err_unlock;
> > > -	}
> > > +	guard(mutex)(&iio_dev_opaque->info_exist_lock);
> > > +	if (!chan->indio_dev->info)
> > > +		return -ENODEV;
> > >  
> > >  	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
> > >  		ret = iio_channel_read(chan, val, NULL,
> > >  				       IIO_CHAN_INFO_PROCESSED);
> > >  		if (ret < 0)
> > > -			goto err_unlock;
> > > +			return ret;
> > >  		*val *= scale;  
> > 
> > 		return 0;
> >   
> > >  	} else {  
> > could drop the else.
> >   
> > >  		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
> > >  		if (ret < 0)
> > > -			goto err_unlock;
> > > +			return ret;
> > >  		ret = iio_convert_raw_to_processed_unlocked(chan, *val,
> > > val,
> > >  							    scale);  
> > 		return iio_convert_raw_to_proc...
> >   
> 
> Hmm, unless I completely misunderstood your comments on v2, this was exactly
> what I had but you recommended to leave the else branch :).
> 
That was a younger me :)  Either way is fine.

Jonathan


> - Nuno Sá
>
Jonathan Cameron March 16, 2024, 1:26 p.m. UTC | #4
On Sat, 9 Mar 2024 17:41:45 +0000
Jonathan Cameron <jic23@kernel.org> wrote:

> On Mon, 04 Mar 2024 09:04:49 +0100
> Nuno Sá <noname.nuno@gmail.com> wrote:
> 
> > On Sun, 2024-03-03 at 14:24 +0000, Jonathan Cameron wrote:  
> > > On Thu, 29 Feb 2024 16:10:28 +0100
> > > Nuno Sa <nuno.sa@analog.com> wrote:
> > >     
> > > > Use the new cleanup magic for handling mutexes in IIO. This allows us to
> > > > greatly simplify some code paths.
> > > > 
> > > > While at it, also use __free(kfree) where allocations are done and drop
> > > > obvious comment in iio_channel_read_min().
> > > > 
> > > > Signed-off-by: Nuno Sa <nuno.sa@analog.com>    
> > > 
> > > Hi Nuno
> > > 
> > > Series looks very nice. One trivial thing inline - I can tidy that up whilst
> > > applying if nothing else comes up.
> > > 
> > > Given this obviously touches a lot of core code, so even though simple it's
> > > high risk for queuing up late. I also have a complex mess already queued up
> > > for the coming merge window. Hence I'm going to hold off on applying this
> > > series until the start of the next cycle.
> > > 
> > > Nothing outside IIO is going to depend on it, so it's rather simpler decision
> > > to hold it than for the ones that add new general purpose infrastructure.
> > > 
> > >     
> > 
> > Seems reasonable... It may even give us some time to see how the cond_guard()
> > and scoped_cond_guard() will end up.  
> 
> Absolutely - thankfully converting to the suggestions Linus made will be straight
> forwards, so hopefully the worst that happens is a complex merge, or some
> fixing up to do afterwards.
> 
> >   
> > > 
> > >     
> > > >  EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
> > > >  
> > > > @@ -757,29 +711,24 @@ int iio_read_channel_processed_scale(struct
> > > > iio_channel *chan, int *val,
> > > >  	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan-    
> > > > >indio_dev);    
> > > >  	int ret;
> > > >  
> > > > -	mutex_lock(&iio_dev_opaque->info_exist_lock);
> > > > -	if (!chan->indio_dev->info) {
> > > > -		ret = -ENODEV;
> > > > -		goto err_unlock;
> > > > -	}
> > > > +	guard(mutex)(&iio_dev_opaque->info_exist_lock);
> > > > +	if (!chan->indio_dev->info)
> > > > +		return -ENODEV;
> > > >  
> > > >  	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
> > > >  		ret = iio_channel_read(chan, val, NULL,
> > > >  				       IIO_CHAN_INFO_PROCESSED);
> > > >  		if (ret < 0)
> > > > -			goto err_unlock;
> > > > +			return ret;
> > > >  		*val *= scale;    
> > > 
> > > 		return 0;
> > >     
> > > >  	} else {    
> > > could drop the else.
> > >     
> > > >  		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
> > > >  		if (ret < 0)
> > > > -			goto err_unlock;
> > > > +			return ret;
> > > >  		ret = iio_convert_raw_to_processed_unlocked(chan, *val,
> > > > val,
> > > >  							    scale);    
> > > 		return iio_convert_raw_to_proc...
> > >     
> > 
> > Hmm, unless I completely misunderstood your comments on v2, this was exactly
> > what I had but you recommended to leave the else branch :).
> >   
> That was a younger me :)  Either way is fine.

I compromised - move the returns into the two branches, but kept the else.

Given I've started queuing stuff up for next cycle, seemed sensible to pick these
up. Applied to the togreg-normal branch of iio.git.

That will get rebased on rc1 and become togreg as normal in a few weeks time
and hopefully I'll retire the togreg-normal / togreg-cleanup split.

Thanks,

Jonathan

> 
> Jonathan
> 
> 
> > - Nuno Sá
> >   
> 
>
Andy Shevchenko March 16, 2024, 7:48 p.m. UTC | #5
Thu, Feb 29, 2024 at 04:10:28PM +0100, Nuno Sa kirjoitti:
> Use the new cleanup magic for handling mutexes in IIO. This allows us to
> greatly simplify some code paths.
> 
> While at it, also use __free(kfree) where allocations are done and drop
> obvious comment in iio_channel_read_min().

...

>  int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
>  {
> -	int i = 0, ret = 0;
> +	int i = 0, ret;
>  	struct iio_map_internal *mapi;

Why not making it reversed xmas tree order at the same time?

>  	if (!maps)
>  		return 0;

...

> -error_ret:
> -	if (ret)
> -		iio_map_array_unregister_locked(indio_dev);
> -	mutex_unlock(&iio_map_list_lock);
>  
> +	return 0;
> +error_ret:
> +	iio_map_array_unregister_locked(indio_dev);
>  	return ret;
>  }

Do we really need to split this? (I'm fine with a new code, but seems to me as
unneeded churn.)

...

> +	struct iio_channel *channel __free(kfree) = kzalloc(sizeof(*channel),
> +							    GFP_KERNEL);

I would indent a bit differently:

	struct iio_channel *channel __free(kfree) =
					kzalloc(sizeof(*channel), GFP_KERNEL);

(maybe less TABs, but you got the idea)

>  	if (!channel)
>  		return ERR_PTR(-ENOMEM);

...

> +	struct iio_channel *chans __free(kfree) = kcalloc(nummaps + 1,
> +							  sizeof(*chans),
> +							  GFP_KERNEL);

Ditto.

>  	if (!chans)
>  		return ERR_PTR(-ENOMEM);

...

>  	/* first find matching entry the channel map */
> -	mutex_lock(&iio_map_list_lock);
> -	list_for_each_entry(c_i, &iio_map_list, l) {
> -		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
> -		    (channel_name &&
> -		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
> -			continue;
> -		c = c_i;
> -		iio_device_get(c->indio_dev);
> -		break;
> +	scoped_guard(mutex, &iio_map_list_lock) {
> +		list_for_each_entry(c_i, &iio_map_list, l) {
> +			if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
> +			    (channel_name &&
> +			     strcmp(channel_name, c_i->map->consumer_channel) != 0))

I would kill these ' != 0' pieces, but I see they are in the original code.

> +				continue;
> +			c = c_i;
> +			iio_device_get(c->indio_dev);
> +			break;
> +		}
>  	}

...

> -	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
> +	struct iio_channel *channel __free(kfree) = kzalloc(sizeof(*channel),
> +							    GFP_KERNEL);

Indentation?

...

> -error_no_chan:
> -	kfree(channel);
>  error_no_mem:
>  	iio_device_put(c->indio_dev);
>  	return ERR_PTR(err);

Effectively you move kfree after device put, would it be a problem?

...

>  struct iio_channel *iio_channel_get_all(struct device *dev)
>  {
>  	const char *name;
> -	struct iio_channel *chans;
> +	struct iio_channel *fw_chans;
>  	struct iio_map_internal *c = NULL;

Move it here for better ordering?

>  	int nummaps = 0;
>  	int mapind = 0;

...

> -	chans = fwnode_iio_channel_get_all(dev);
> +	fw_chans = fwnode_iio_channel_get_all(dev);

I would move it before conditional...

>  	/*
>  	 * We only want to carry on if the error is -ENODEV.  Anything else
>  	 * should be reported up the stack.
>  	 */
> -	if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV)
> -		return chans;

...here.

> +	if (!IS_ERR(fw_chans) || PTR_ERR(fw_chans) != -ENODEV)
> +		return fw_chans;

...

> +	struct iio_channel *chans __free(kfree) = kcalloc(nummaps + 1,
> +							  sizeof(*chans),
> +							  GFP_KERNEL);

Indentation?

> +	if (!chans)
> +		return ERR_PTR(-ENOMEM);
Nuno Sá March 18, 2024, 9:20 a.m. UTC | #6
On Sat, 2024-03-16 at 21:48 +0200, Andy Shevchenko wrote:
> Thu, Feb 29, 2024 at 04:10:28PM +0100, Nuno Sa kirjoitti:
> > Use the new cleanup magic for handling mutexes in IIO. This allows us to
> > greatly simplify some code paths.
> > 
> > While at it, also use __free(kfree) where allocations are done and drop
> > obvious comment in iio_channel_read_min().
> 
> ...
> 
> >  int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
> >  {
> > -	int i = 0, ret = 0;
> > +	int i = 0, ret;
> >  	struct iio_map_internal *mapi;
> 
> Why not making it reversed xmas tree order at the same time?
> 
> >  	if (!maps)
> >  		return 0;
> 
> ...
> 
> > -error_ret:
> > -	if (ret)
> > -		iio_map_array_unregister_locked(indio_dev);
> > -	mutex_unlock(&iio_map_list_lock);
> >  
> > +	return 0;
> > +error_ret:
> > +	iio_map_array_unregister_locked(indio_dev);
> >  	return ret;
> >  }
> 
> Do we really need to split this? (I'm fine with a new code, but seems to me as
> unneeded churn.)
> 
> ...
> 
> > +	struct iio_channel *channel __free(kfree) =
> > kzalloc(sizeof(*channel),
> > +							    GFP_KERNEL);
> 
> I would indent a bit differently:
> 
> 	struct iio_channel *channel __free(kfree) =
> 					kzalloc(sizeof(*channel),
> GFP_KERNEL);
> 
> (maybe less TABs, but you got the idea)
> 
> >  	if (!channel)
> >  		return ERR_PTR(-ENOMEM);
> 
> ...
> 
> > +	struct iio_channel *chans __free(kfree) = kcalloc(nummaps + 1,
> > +							  sizeof(*chans),
> > +							  GFP_KERNEL);
> 
> Ditto.
> 
> >  	if (!chans)
> >  		return ERR_PTR(-ENOMEM);
> 
> ...
> 
> >  	/* first find matching entry the channel map */
> > -	mutex_lock(&iio_map_list_lock);
> > -	list_for_each_entry(c_i, &iio_map_list, l) {
> > -		if ((name && strcmp(name, c_i->map->consumer_dev_name) !=
> > 0) ||
> > -		    (channel_name &&
> > -		     strcmp(channel_name, c_i->map->consumer_channel) !=
> > 0))
> > -			continue;
> > -		c = c_i;
> > -		iio_device_get(c->indio_dev);
> > -		break;
> > +	scoped_guard(mutex, &iio_map_list_lock) {
> > +		list_for_each_entry(c_i, &iio_map_list, l) {
> > +			if ((name && strcmp(name, c_i->map-
> > >consumer_dev_name) != 0) ||
> > +			    (channel_name &&
> > +			     strcmp(channel_name, c_i->map-
> > >consumer_channel) != 0))
> 
> I would kill these ' != 0' pieces, but I see they are in the original code.
> 
> > +				continue;
> > +			c = c_i;
> > +			iio_device_get(c->indio_dev);
> > +			break;
> > +		}
> >  	}
> 
> ...
> 
> > -	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
> > +	struct iio_channel *channel __free(kfree) =
> > kzalloc(sizeof(*channel),
> > +							    GFP_KERNEL);
> 
> Indentation?
> 
> ...
> 
> > -error_no_chan:
> > -	kfree(channel);
> >  error_no_mem:
> >  	iio_device_put(c->indio_dev);
> >  	return ERR_PTR(err);
> 
> Effectively you move kfree after device put, would it be a problem?
> 

This one got my attention... But I think we're fine. But yeah, subtle ordering
change that I did not unnoticed.

- Nuno Sá
Jonathan Cameron March 23, 2024, 6:09 p.m. UTC | #7
On Sat, 16 Mar 2024 21:48:18 +0200
Andy Shevchenko <andy.shevchenko@gmail.com> wrote:

> Thu, Feb 29, 2024 at 04:10:28PM +0100, Nuno Sa kirjoitti:
> > Use the new cleanup magic for handling mutexes in IIO. This allows us to
> > greatly simplify some code paths.
> > 
> > While at it, also use __free(kfree) where allocations are done and drop
> > obvious comment in iio_channel_read_min().  
> 
> ...
> 
> >  int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
> >  {
> > -	int i = 0, ret = 0;
> > +	int i = 0, ret;
> >  	struct iio_map_internal *mapi;  
> 
> Why not making it reversed xmas tree order at the same time?
> 

I tweaked this. Went a bit further as mixing declarations that
set values and ones that don't is a bad pattern for readability.

	struct iio_map_internal *mapi;
	int i = 0;
	int ret;

> >  	if (!maps)
> >  		return 0;  
> 
> ...
> 
> > -error_ret:
> > -	if (ret)
> > -		iio_map_array_unregister_locked(indio_dev);
> > -	mutex_unlock(&iio_map_list_lock);
> >  
> > +	return 0;
> > +error_ret:
> > +	iio_map_array_unregister_locked(indio_dev);
> >  	return ret;
> >  }  
> 
> Do we really need to split this? (I'm fine with a new code, but seems to me as
> unneeded churn.)

I much prefer not to have the
	if (ret) // error case
		do something.

	//back to both good and bad paths.
	return ret;

pattern - so I've very keen to have this spit as I disliked the original
code and there is even less reason to combine the paths now we don't need
the mutex_unlock.


> 
> ...
> 
> > +	struct iio_channel *channel __free(kfree) = kzalloc(sizeof(*channel),
> > +							    GFP_KERNEL);  
> 
> I would indent a bit differently:
> 
> 	struct iio_channel *channel __free(kfree) =
> 					kzalloc(sizeof(*channel), GFP_KERNEL);
> 
> (maybe less TABs, but you got the idea)
Given I was rebasing anyway, tidied this up (in 4 places) as well (fewer tabs ;)

> 
> >  	if (!channel)
> >  		return ERR_PTR(-ENOMEM);  
> 
> ...
> 
> > +	struct iio_channel *chans __free(kfree) = kcalloc(nummaps + 1,
> > +							  sizeof(*chans),
> > +							  GFP_KERNEL);  
> 
> Ditto.
> 
> >  	if (!chans)
> >  		return ERR_PTR(-ENOMEM);  
> 
> ...
> 
> >  	/* first find matching entry the channel map */
> > -	mutex_lock(&iio_map_list_lock);
> > -	list_for_each_entry(c_i, &iio_map_list, l) {
> > -		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
> > -		    (channel_name &&
> > -		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
> > -			continue;
> > -		c = c_i;
> > -		iio_device_get(c->indio_dev);
> > -		break;
> > +	scoped_guard(mutex, &iio_map_list_lock) {
> > +		list_for_each_entry(c_i, &iio_map_list, l) {
> > +			if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
> > +			    (channel_name &&
> > +			     strcmp(channel_name, c_i->map->consumer_channel) != 0))  
> 
> I would kill these ' != 0' pieces, but I see they are in the original code.

Don't mind a change doing that, but not in this patch.

> 
> > +				continue;
> > +			c = c_i;
> > +			iio_device_get(c->indio_dev);
> > +			break;
> > +		}
> >  	}  
> 
> ...
> 
> > -	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
> > +	struct iio_channel *channel __free(kfree) = kzalloc(sizeof(*channel),
> > +							    GFP_KERNEL);  
> 
> Indentation?
> 
> ...
> 
> > -error_no_chan:
> > -	kfree(channel);
> >  error_no_mem:
> >  	iio_device_put(c->indio_dev);
> >  	return ERR_PTR(err);  
> 
> Effectively you move kfree after device put, would it be a problem?
It's not immediately obvious what that put pairs with so we should probably
address that a bit more clearly anyway - but the change should be safe.

> 
> ...
> 
> >  struct iio_channel *iio_channel_get_all(struct device *dev)
> >  {
> >  	const char *name;
> > -	struct iio_channel *chans;
> > +	struct iio_channel *fw_chans;
> >  	struct iio_map_internal *c = NULL;  
> 
> Move it here for better ordering?
Trivial, but meh, I'm tweaking anyway so done.
> 
> >  	int nummaps = 0;
> >  	int mapind = 0;  
> 
> ...
> 
> > -	chans = fwnode_iio_channel_get_all(dev);
> > +	fw_chans = fwnode_iio_channel_get_all(dev);  
> 
> I would move it before conditional...
> 
> >  	/*
> >  	 * We only want to carry on if the error is -ENODEV.  Anything else
> >  	 * should be reported up the stack.
> >  	 */
> > -	if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV)
> > -		return chans;  
> 
> ...here.
> 
> > +	if (!IS_ERR(fw_chans) || PTR_ERR(fw_chans) != -ENODEV)
> > +		return fw_chans;  
> 
> ...
> 
> > +	struct iio_channel *chans __free(kfree) = kcalloc(nummaps + 1,
> > +							  sizeof(*chans),
> > +							  GFP_KERNEL);  
> 
> Indentation?
> 
> > +	if (!chans)
> > +		return ERR_PTR(-ENOMEM);  
>
diff mbox series

Patch

diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 7a1f6713318a..6017f294ac1c 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -3,6 +3,7 @@ 
  *
  * Copyright (c) 2011 Jonathan Cameron
  */
+#include <linux/cleanup.h>
 #include <linux/err.h>
 #include <linux/export.h>
 #include <linux/minmax.h>
@@ -43,13 +44,13 @@  static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
 
 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
 {
-	int i = 0, ret = 0;
+	int i = 0, ret;
 	struct iio_map_internal *mapi;
 
 	if (!maps)
 		return 0;
 
-	mutex_lock(&iio_map_list_lock);
+	guard(mutex)(&iio_map_list_lock);
 	while (maps[i].consumer_dev_name) {
 		mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
 		if (!mapi) {
@@ -61,11 +62,10 @@  int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
 		list_add_tail(&mapi->l, &iio_map_list);
 		i++;
 	}
-error_ret:
-	if (ret)
-		iio_map_array_unregister_locked(indio_dev);
-	mutex_unlock(&iio_map_list_lock);
 
+	return 0;
+error_ret:
+	iio_map_array_unregister_locked(indio_dev);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(iio_map_array_register);
@@ -75,13 +75,8 @@  EXPORT_SYMBOL_GPL(iio_map_array_register);
  */
 int iio_map_array_unregister(struct iio_dev *indio_dev)
 {
-	int ret;
-
-	mutex_lock(&iio_map_list_lock);
-	ret = iio_map_array_unregister_locked(indio_dev);
-	mutex_unlock(&iio_map_list_lock);
-
-	return ret;
+	guard(mutex)(&iio_map_list_lock);
+	return iio_map_array_unregister_locked(indio_dev);
 }
 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
 
@@ -183,25 +178,21 @@  static int __fwnode_iio_channel_get(struct iio_channel *channel,
 static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
 						  int index)
 {
-	struct iio_channel *channel;
 	int err;
 
 	if (index < 0)
 		return ERR_PTR(-EINVAL);
 
-	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+	struct iio_channel *channel __free(kfree) = kzalloc(sizeof(*channel),
+							    GFP_KERNEL);
 	if (!channel)
 		return ERR_PTR(-ENOMEM);
 
 	err = __fwnode_iio_channel_get(channel, fwnode, index);
 	if (err)
-		goto err_free_channel;
+		return ERR_PTR(err);
 
-	return channel;
-
-err_free_channel:
-	kfree(channel);
-	return ERR_PTR(err);
+	return_ptr(channel);
 }
 
 static struct iio_channel *
@@ -291,7 +282,6 @@  EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
 static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
 {
 	struct fwnode_handle *fwnode = dev_fwnode(dev);
-	struct iio_channel *chans;
 	int i, mapind, nummaps = 0;
 	int ret;
 
@@ -307,7 +297,9 @@  static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
 		return ERR_PTR(-ENODEV);
 
 	/* NULL terminated array to save passing size */
-	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
+	struct iio_channel *chans __free(kfree) = kcalloc(nummaps + 1,
+							  sizeof(*chans),
+							  GFP_KERNEL);
 	if (!chans)
 		return ERR_PTR(-ENOMEM);
 
@@ -317,12 +309,11 @@  static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
 		if (ret)
 			goto error_free_chans;
 	}
-	return chans;
+	return_ptr(chans);
 
 error_free_chans:
 	for (i = 0; i < mapind; i++)
 		iio_device_put(chans[i].indio_dev);
-	kfree(chans);
 	return ERR_PTR(ret);
 }
 
@@ -330,28 +321,28 @@  static struct iio_channel *iio_channel_get_sys(const char *name,
 					       const char *channel_name)
 {
 	struct iio_map_internal *c_i = NULL, *c = NULL;
-	struct iio_channel *channel;
 	int err;
 
 	if (!(name || channel_name))
 		return ERR_PTR(-ENODEV);
 
 	/* first find matching entry the channel map */
-	mutex_lock(&iio_map_list_lock);
-	list_for_each_entry(c_i, &iio_map_list, l) {
-		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
-		    (channel_name &&
-		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
-			continue;
-		c = c_i;
-		iio_device_get(c->indio_dev);
-		break;
+	scoped_guard(mutex, &iio_map_list_lock) {
+		list_for_each_entry(c_i, &iio_map_list, l) {
+			if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
+			    (channel_name &&
+			     strcmp(channel_name, c_i->map->consumer_channel) != 0))
+				continue;
+			c = c_i;
+			iio_device_get(c->indio_dev);
+			break;
+		}
 	}
-	mutex_unlock(&iio_map_list_lock);
 	if (!c)
 		return ERR_PTR(-ENODEV);
 
-	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+	struct iio_channel *channel __free(kfree) = kzalloc(sizeof(*channel),
+							    GFP_KERNEL);
 	if (!channel) {
 		err = -ENOMEM;
 		goto error_no_mem;
@@ -366,14 +357,12 @@  static struct iio_channel *iio_channel_get_sys(const char *name,
 
 		if (!channel->channel) {
 			err = -EINVAL;
-			goto error_no_chan;
+			goto error_no_mem;
 		}
 	}
 
-	return channel;
+	return_ptr(channel);
 
-error_no_chan:
-	kfree(channel);
 error_no_mem:
 	iio_device_put(c->indio_dev);
 	return ERR_PTR(err);
@@ -450,7 +439,7 @@  EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
 struct iio_channel *iio_channel_get_all(struct device *dev)
 {
 	const char *name;
-	struct iio_channel *chans;
+	struct iio_channel *fw_chans;
 	struct iio_map_internal *c = NULL;
 	int nummaps = 0;
 	int mapind = 0;
@@ -459,17 +448,17 @@  struct iio_channel *iio_channel_get_all(struct device *dev)
 	if (!dev)
 		return ERR_PTR(-EINVAL);
 
-	chans = fwnode_iio_channel_get_all(dev);
+	fw_chans = fwnode_iio_channel_get_all(dev);
 	/*
 	 * We only want to carry on if the error is -ENODEV.  Anything else
 	 * should be reported up the stack.
 	 */
-	if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV)
-		return chans;
+	if (!IS_ERR(fw_chans) || PTR_ERR(fw_chans) != -ENODEV)
+		return fw_chans;
 
 	name = dev_name(dev);
 
-	mutex_lock(&iio_map_list_lock);
+	guard(mutex)(&iio_map_list_lock);
 	/* first count the matching maps */
 	list_for_each_entry(c, &iio_map_list, l)
 		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
@@ -477,17 +466,15 @@  struct iio_channel *iio_channel_get_all(struct device *dev)
 		else
 			nummaps++;
 
-	if (nummaps == 0) {
-		ret = -ENODEV;
-		goto error_ret;
-	}
+	if (nummaps == 0)
+		return ERR_PTR(-ENODEV);
 
 	/* NULL terminated array to save passing size */
-	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
-	if (!chans) {
-		ret = -ENOMEM;
-		goto error_ret;
-	}
+	struct iio_channel *chans __free(kfree) = kcalloc(nummaps + 1,
+							  sizeof(*chans),
+							  GFP_KERNEL);
+	if (!chans)
+		return ERR_PTR(-ENOMEM);
 
 	/* for each map fill in the chans element */
 	list_for_each_entry(c, &iio_map_list, l) {
@@ -509,17 +496,12 @@  struct iio_channel *iio_channel_get_all(struct device *dev)
 		ret = -ENODEV;
 		goto error_free_chans;
 	}
-	mutex_unlock(&iio_map_list_lock);
 
-	return chans;
+	return_ptr(chans);
 
 error_free_chans:
 	for (i = 0; i < nummaps; i++)
 		iio_device_put(chans[i].indio_dev);
-	kfree(chans);
-error_ret:
-	mutex_unlock(&iio_map_list_lock);
-
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(iio_channel_get_all);
@@ -590,38 +572,24 @@  static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
 int iio_read_channel_raw(struct iio_channel *chan, int *val)
 {
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
-	int ret;
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
-	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
-	return ret;
+	return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
 }
 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
 
 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
 {
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
-	int ret;
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
-	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
-	return ret;
+	return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
 }
 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
 
@@ -708,20 +676,13 @@  int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
 				 int *processed, unsigned int scale)
 {
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
-	int ret;
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
-	ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
-						    scale);
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
-	return ret;
+	return iio_convert_raw_to_processed_unlocked(chan, raw, processed,
+						     scale);
 }
 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
 
@@ -729,19 +690,12 @@  int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
 			       enum iio_chan_info_enum attribute)
 {
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
-	int ret;
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
-	ret = iio_channel_read(chan, val, val2, attribute);
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
-	return ret;
+	return iio_channel_read(chan, val, val2, attribute);
 }
 EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
 
@@ -757,29 +711,24 @@  int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
 	int ret;
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
 	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
 		ret = iio_channel_read(chan, val, NULL,
 				       IIO_CHAN_INFO_PROCESSED);
 		if (ret < 0)
-			goto err_unlock;
+			return ret;
 		*val *= scale;
 	} else {
 		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
 		if (ret < 0)
-			goto err_unlock;
+			return ret;
 		ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
 							    scale);
 	}
 
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
 	return ret;
 }
 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
@@ -813,19 +762,12 @@  int iio_read_avail_channel_attribute(struct iio_channel *chan,
 				     enum iio_chan_info_enum attribute)
 {
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
-	int ret;
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
-	ret = iio_channel_read_avail(chan, vals, type, length, attribute);
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
-	return ret;
+	return iio_channel_read_avail(chan, vals, type, length, attribute);
 }
 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
 
@@ -892,20 +834,13 @@  static int iio_channel_read_max(struct iio_channel *chan,
 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
 {
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
-	int ret;
 	int type;
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
-	ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
-	return ret;
+	return iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
 }
 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
 
@@ -955,40 +890,27 @@  static int iio_channel_read_min(struct iio_channel *chan,
 int iio_read_min_channel_raw(struct iio_channel *chan, int *val)
 {
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
-	int ret;
 	int type;
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
-	ret = iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
-	return ret;
+	return iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
 }
 EXPORT_SYMBOL_GPL(iio_read_min_channel_raw);
 
 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
 {
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
-	int ret = 0;
-	/* Need to verify underlying driver has not gone away */
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
 	*type = chan->channel->type;
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
 
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(iio_get_channel_type);
 
@@ -1003,19 +925,12 @@  int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
 				enum iio_chan_info_enum attribute)
 {
 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
-	int ret;
 
-	mutex_lock(&iio_dev_opaque->info_exist_lock);
-	if (!chan->indio_dev->info) {
-		ret = -ENODEV;
-		goto err_unlock;
-	}
+	guard(mutex)(&iio_dev_opaque->info_exist_lock);
+	if (!chan->indio_dev->info)
+		return -ENODEV;
 
-	ret = iio_channel_write(chan, val, val2, attribute);
-err_unlock:
-	mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
-	return ret;
+	return iio_channel_write(chan, val, val2, attribute);
 }
 EXPORT_SYMBOL_GPL(iio_write_channel_attribute);