diff mbox series

[v5,6/8] iio: buffer-dma: Enable support for DMABUFs

Message ID 20231219175009.65482-7-paul@crapouillou.net (mailing list archive)
State Changes Requested
Headers show
Series iio: new DMABUF based API, v5 | expand

Commit Message

Paul Cercueil Dec. 19, 2023, 5:50 p.m. UTC
Implement iio_dma_buffer_attach_dmabuf(), iio_dma_buffer_detach_dmabuf()
and iio_dma_buffer_transfer_dmabuf(), which can then be used by the IIO
DMA buffer implementations.

Signed-off-by: Paul Cercueil <paul@crapouillou.net>

---
v3: Update code to provide the functions that will be used as callbacks
    for the new IOCTLs.
---
 drivers/iio/buffer/industrialio-buffer-dma.c | 157 +++++++++++++++++--
 include/linux/iio/buffer-dma.h               |  26 +++
 2 files changed, 170 insertions(+), 13 deletions(-)

Comments

Jonathan Cameron Dec. 21, 2023, 4:04 p.m. UTC | #1
On Tue, 19 Dec 2023 18:50:07 +0100
Paul Cercueil <paul@crapouillou.net> wrote:

> Implement iio_dma_buffer_attach_dmabuf(), iio_dma_buffer_detach_dmabuf()
> and iio_dma_buffer_transfer_dmabuf(), which can then be used by the IIO
> DMA buffer implementations.
> 
> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> 
Hi Paul,

A few comments in here. Mostly places where the cleanup.h guard() stuff
can simplify error paths.

Overall this looks reasonable to me.

Jonathan

> ---
> v3: Update code to provide the functions that will be used as callbacks
>     for the new IOCTLs.
> ---
>  drivers/iio/buffer/industrialio-buffer-dma.c | 157 +++++++++++++++++--
>  include/linux/iio/buffer-dma.h               |  26 +++
>  2 files changed, 170 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
> index 5610ba67925e..adb64f975064 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> @@ -14,6 +14,7 @@
>  #include <linux/poll.h>
>  #include <linux/iio/buffer_impl.h>
>  #include <linux/iio/buffer-dma.h>
> +#include <linux/dma-buf.h>
>  #include <linux/dma-mapping.h>
>  #include <linux/sizes.h>
>  
> @@ -94,14 +95,24 @@ static void iio_buffer_block_release(struct kref *kref)
>  {
>  	struct iio_dma_buffer_block *block = container_of(kref,
>  		struct iio_dma_buffer_block, kref);
> +	struct iio_dma_buffer_queue *queue = block->queue;
>  
> -	WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
> +	WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
>  
> -	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
> -					block->vaddr, block->phys_addr);
> +	mutex_lock(&queue->lock);
>  
> -	iio_buffer_put(&block->queue->buffer);
> +	if (block->fileio) {
> +		dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
> +				  block->vaddr, block->phys_addr);
> +		queue->num_fileio_blocks--;
> +	}
> +
> +	queue->num_blocks--;
>  	kfree(block);
> +
> +	mutex_unlock(&queue->lock);
> +
> +	iio_buffer_put(&queue->buffer);
>  }
>  
>  static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
> @@ -163,7 +174,7 @@ static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
>  }
>  
>  static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
> -	struct iio_dma_buffer_queue *queue, size_t size)
> +	struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
>  {
>  	struct iio_dma_buffer_block *block;
>  
> @@ -171,13 +182,16 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
>  	if (!block)
>  		return NULL;
>  
> -	block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
> -		&block->phys_addr, GFP_KERNEL);
> -	if (!block->vaddr) {
> -		kfree(block);
> -		return NULL;
> +	if (fileio) {
> +		block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
> +						  &block->phys_addr, GFP_KERNEL);
> +		if (!block->vaddr) {
> +			kfree(block);
> +			return NULL;
> +		}
>  	}
>  
> +	block->fileio = fileio;
>  	block->size = size;
>  	block->state = IIO_BLOCK_STATE_DONE;
>  	block->queue = queue;
> @@ -186,6 +200,9 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
>  
>  	iio_buffer_get(&queue->buffer);
>  
> +	queue->num_blocks++;
> +	queue->num_fileio_blocks += fileio;
Adding a boolean is non intuitive.

	if (fileio)
		queue->num_fileio_blocks++;

probably easier to read and compiler should be able to figure out how to
optimise the condition away.

> +
>  	return block;
>  }
>  
> @@ -211,6 +228,9 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
>  	_iio_dma_buffer_block_done(block);
>  	spin_unlock_irqrestore(&queue->list_lock, flags);
>  
> +	if (!block->fileio)
> +		iio_buffer_signal_dmabuf_done(block->attach, 0);
> +
>  	iio_buffer_block_put_atomic(block);
>  	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
>  }
> @@ -237,10 +257,14 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
>  		list_del(&block->head);
>  		block->bytes_used = 0;
>  		_iio_dma_buffer_block_done(block);
> +
> +		if (!block->fileio)
> +			iio_buffer_signal_dmabuf_done(block->attach, -EINTR);
>  		iio_buffer_block_put_atomic(block);
>  	}
>  	spin_unlock_irqrestore(&queue->list_lock, flags);
>  
> +	queue->fileio.enabled = false;

While this obviously doesn't need to be conditional if it can already be false
it might be easier to follow the code flow it if didn't check if we were doing
fileio or not before disabling it.

>  	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
>  }
>  EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
> @@ -261,6 +285,12 @@ static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
>  	}
>  }
>  
> +static bool iio_dma_buffer_fileio_mode(struct iio_dma_buffer_queue *queue)
> +{
> +	return queue->fileio.enabled ||
> +		queue->num_blocks == queue->num_fileio_blocks;
This is a little odd. So would be good have a comment on why this condition.
Or rename the function to imply it's checking if enabled, or can be enabled.

At first glanced I expected a function with this name to just be an accessor
function. e.g.
	return queue->fileio.enabled;

> +}
> +
>  /**
>   * iio_dma_buffer_request_update() - DMA buffer request_update callback
>   * @buffer: The buffer which to request an update
> @@ -287,6 +317,12 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>  
>  	mutex_lock(&queue->lock);
>  
> +	queue->fileio.enabled = iio_dma_buffer_fileio_mode(queue);
> +
> +	/* If DMABUFs were created, disable fileio interface */
> +	if (!queue->fileio.enabled)
> +		goto out_unlock;
> +
>  	/* Allocations are page aligned */
>  	if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
>  		try_reuse = true;
> @@ -317,7 +353,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>  			block = queue->fileio.blocks[i];
>  			if (block->state == IIO_BLOCK_STATE_DEAD) {
>  				/* Could not reuse it */
> -				iio_buffer_block_put(block);
> +				iio_buffer_block_put_atomic(block);
>  				block = NULL;
>  			} else {
>  				block->size = size;
> @@ -327,7 +363,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>  		}
>  
>  		if (!block) {
> -			block = iio_dma_buffer_alloc_block(queue, size);
> +			block = iio_dma_buffer_alloc_block(queue, size, true);
>  			if (!block) {
>  				ret = -ENOMEM;
>  				goto out_unlock;
> @@ -363,7 +399,7 @@ static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
>  	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
>  		if (!queue->fileio.blocks[i])
>  			continue;
> -		iio_buffer_block_put(queue->fileio.blocks[i]);
> +		iio_buffer_block_put_atomic(queue->fileio.blocks[i]);

For these cases that are atomic or not, it might be worth calling out why they need to be
atomic.

>  		queue->fileio.blocks[i] = NULL;
>  	}
>  	queue->fileio.active_block = NULL;
> @@ -384,8 +420,12 @@ static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
>  
>  	block->state = IIO_BLOCK_STATE_ACTIVE;
>  	iio_buffer_block_get(block);
> +
>  	ret = queue->ops->submit(queue, block);
>  	if (ret) {
> +		if (!block->fileio)
> +			iio_buffer_signal_dmabuf_done(block->attach, ret);
> +
>  		/*
>  		 * This is a bit of a problem and there is not much we can do
>  		 * other then wait for the buffer to be disabled and re-enabled
> @@ -588,6 +628,97 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
>  }
>  EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
>  
> +struct iio_dma_buffer_block *
> +iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
> +			     struct dma_buf_attachment *attach)
> +{
> +	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> +	struct iio_dma_buffer_block *block;
> +	int err;
> +
> +	mutex_lock(&queue->lock);

	guard(mutex)(&queue->lock);
> +
> +	/*
> +	 * If the buffer is enabled and in fileio mode new blocks can't be
> +	 * allocated.
> +	 */
> +	if (queue->fileio.enabled) {
> +		err = -EBUSY;
		return ERR_PTR(-EBUSY);
> +		goto err_unlock;
> +	}
> +
> +	block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false);
> +	if (!block) {
> +		err = -ENOMEM;

		return 

> +		goto err_unlock;
> +	}
> +
> +	block->attach = attach;
> +
> +	/* Free memory that might be in use for fileio mode */
> +	iio_dma_buffer_fileio_free(queue);
> +
> +	mutex_unlock(&queue->lock);

Drop this as unneeded if you use guard()

> +
> +	return block;
> +
> +err_unlock:
> +	mutex_unlock(&queue->lock);
> +	return ERR_PTR(err);
> +}
> +EXPORT_SYMBOL_GPL(iio_dma_buffer_attach_dmabuf);


> +static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block)
> +{
> +	struct iio_dma_buffer_queue *queue = block->queue;
> +
> +	/* If in fileio mode buffers can't be enqueued. */
> +	if (queue->fileio.enabled)
> +		return -EBUSY;
> +
> +	switch (block->state) {
> +	case IIO_BLOCK_STATE_QUEUED:
> +		return -EPERM;
> +	case IIO_BLOCK_STATE_DONE:
> +		return 0;
> +	default:
> +		return -EBUSY;

Is this a real condition or just avoiding a compile warning?  If it's real
I'd like the various states that lead to it be listed here just so we
can more easily understand why -EBUSY is appropriate.

> +	}
> +}
> +
> +int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
> +				  struct iio_dma_buffer_block *block,
> +				  struct sg_table *sgt,
> +				  size_t size, bool cyclic)
> +{
> +	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> +	int ret = 0;

No need to init as it's always set.


> +
> +	mutex_lock(&queue->lock);

guard(mutex)(&queue->lock);

Then can do direct returns on error and not bother with the manual
unlock.

> +	ret = iio_dma_can_enqueue_block(block);
> +	if (ret < 0)
> +		goto out_mutex_unlock;
> +
> +	block->bytes_used = size;
> +	block->cyclic = cyclic;
> +	block->sg_table = sgt;
> +
> +	iio_dma_buffer_enqueue(queue, block);
> +
> +out_mutex_unlock:
> +	mutex_unlock(&queue->lock);
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(iio_dma_buffer_enqueue_dmabuf);
Nuno Sá Dec. 22, 2023, 8:56 a.m. UTC | #2
On Thu, 2023-12-21 at 16:04 +0000, Jonathan Cameron wrote:
> On Tue, 19 Dec 2023 18:50:07 +0100
> Paul Cercueil <paul@crapouillou.net> wrote:
> 
> > Implement iio_dma_buffer_attach_dmabuf(), iio_dma_buffer_detach_dmabuf()
> > and iio_dma_buffer_transfer_dmabuf(), which can then be used by the IIO
> > DMA buffer implementations.
> > 
> > Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> > 
> Hi Paul,
> 
> A few comments in here. Mostly places where the cleanup.h guard() stuff
> can simplify error paths.
> 
> Overall this looks reasonable to me.
> 
> Jonathan
> 
> > ---
> > v3: Update code to provide the functions that will be used as callbacks
> >     for the new IOCTLs.
> > ---
> >  drivers/iio/buffer/industrialio-buffer-dma.c | 157 +++++++++++++++++--
> >  include/linux/iio/buffer-dma.h               |  26 +++
> >  2 files changed, 170 insertions(+), 13 deletions(-)
> > 
> > diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c
> > b/drivers/iio/buffer/industrialio-buffer-dma.c
> > index 5610ba67925e..adb64f975064 100644
> > --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> > +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> > @@ -14,6 +14,7 @@
> >  #include <linux/poll.h>
> >  #include <linux/iio/buffer_impl.h>
> >  #include <linux/iio/buffer-dma.h>
> > +#include <linux/dma-buf.h>
> >  #include <linux/dma-mapping.h>
> >  #include <linux/sizes.h>
> >  
> > @@ -94,14 +95,24 @@ static void iio_buffer_block_release(struct kref *kref)
> >  {
> >         struct iio_dma_buffer_block *block = container_of(kref,
> >                 struct iio_dma_buffer_block, kref);
> > +       struct iio_dma_buffer_queue *queue = block->queue;
> >  
> > -       WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
> > +       WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
> >  
> > -       dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
> > -                                       block->vaddr, block->phys_addr);
> > +       mutex_lock(&queue->lock);
> >  
> > -       iio_buffer_put(&block->queue->buffer);
> > +       if (block->fileio) {
> > +               dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
> > +                                 block->vaddr, block->phys_addr);
> > +               queue->num_fileio_blocks--;
> > +       }
> > +
> > +       queue->num_blocks--;
> >         kfree(block);
> > +
> > +       mutex_unlock(&queue->lock);
> > +
> > +       iio_buffer_put(&queue->buffer);
> >  }
> >  
> >  static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
> > @@ -163,7 +174,7 @@ static struct iio_dma_buffer_queue
> > *iio_buffer_to_queue(struct iio_buffer *buf)
> >  }
> >  
> >  static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
> > -       struct iio_dma_buffer_queue *queue, size_t size)
> > +       struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
> >  {
> >         struct iio_dma_buffer_block *block;
> >  
> > @@ -171,13 +182,16 @@ static struct iio_dma_buffer_block
> > *iio_dma_buffer_alloc_block(
> >         if (!block)
> >                 return NULL;
> >  
> > -       block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
> > -               &block->phys_addr, GFP_KERNEL);
> > -       if (!block->vaddr) {
> > -               kfree(block);
> > -               return NULL;
> > +       if (fileio) {
> > +               block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
> > +                                                 &block->phys_addr, GFP_KERNEL);
> > +               if (!block->vaddr) {
> > +                       kfree(block);
> > +                       return NULL;
> > +               }
> >         }
> >  
> > +       block->fileio = fileio;
> >         block->size = size;
> >         block->state = IIO_BLOCK_STATE_DONE;
> >         block->queue = queue;
> > @@ -186,6 +200,9 @@ static struct iio_dma_buffer_block
> > *iio_dma_buffer_alloc_block(
> >  
> >         iio_buffer_get(&queue->buffer);
> >  
> > +       queue->num_blocks++;
> > +       queue->num_fileio_blocks += fileio;
> Adding a boolean is non intuitive.
> 
>         if (fileio)
>                 queue->num_fileio_blocks++;
> 
> probably easier to read and compiler should be able to figure out how to
> optimise the condition away.
> 
> > +
> >         return block;
> >  }
> >  
> > @@ -211,6 +228,9 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block
> > *block)
> >         _iio_dma_buffer_block_done(block);
> >         spin_unlock_irqrestore(&queue->list_lock, flags);
> >  
> > +       if (!block->fileio)
> > +               iio_buffer_signal_dmabuf_done(block->attach, 0);
> > +
> >         iio_buffer_block_put_atomic(block);
> >         wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
> >  }
> > @@ -237,10 +257,14 @@ void iio_dma_buffer_block_list_abort(struct
> > iio_dma_buffer_queue *queue,
> >                 list_del(&block->head);
> >                 block->bytes_used = 0;
> >                 _iio_dma_buffer_block_done(block);
> > +
> > +               if (!block->fileio)
> > +                       iio_buffer_signal_dmabuf_done(block->attach, -EINTR);
> >                 iio_buffer_block_put_atomic(block);
> >         }
> >         spin_unlock_irqrestore(&queue->list_lock, flags);
> >  
> > +       queue->fileio.enabled = false;
> 
> While this obviously doesn't need to be conditional if it can already be false
> it might be easier to follow the code flow it if didn't check if we were doing
> fileio or not before disabling it.
> 
> >         wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
> >  }
> >  EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
> > @@ -261,6 +285,12 @@ static bool iio_dma_block_reusable(struct
> > iio_dma_buffer_block *block)
> >         }
> >  }
> >  
> > +static bool iio_dma_buffer_fileio_mode(struct iio_dma_buffer_queue *queue)
> > +{
> > +       return queue->fileio.enabled ||
> > +               queue->num_blocks == queue->num_fileio_blocks;
> This is a little odd. So would be good have a comment on why this condition.
> Or rename the function to imply it's checking if enabled, or can be enabled.
> 
> At first glanced I expected a function with this name to just be an accessor
> function. e.g.
>         return queue->fileio.enabled;
> 
> > +}
> > +
> >  /**
> >   * iio_dma_buffer_request_update() - DMA buffer request_update callback
> >   * @buffer: The buffer which to request an update
> > @@ -287,6 +317,12 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
> >  
> >         mutex_lock(&queue->lock);
> >  
> > +       queue->fileio.enabled = iio_dma_buffer_fileio_mode(queue);
> > +
> > +       /* If DMABUFs were created, disable fileio interface */
> > +       if (!queue->fileio.enabled)
> > +               goto out_unlock;
> > +
> >         /* Allocations are page aligned */
> >         if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
> >                 try_reuse = true;
> > @@ -317,7 +353,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
> >                         block = queue->fileio.blocks[i];
> >                         if (block->state == IIO_BLOCK_STATE_DEAD) {
> >                                 /* Could not reuse it */
> > -                               iio_buffer_block_put(block);
> > +                               iio_buffer_block_put_atomic(block);
> >                                 block = NULL;
> >                         } else {
> >                                 block->size = size;
> > @@ -327,7 +363,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
> >                 }
> >  
> >                 if (!block) {
> > -                       block = iio_dma_buffer_alloc_block(queue, size);
> > +                       block = iio_dma_buffer_alloc_block(queue, size, true);
> >                         if (!block) {
> >                                 ret = -ENOMEM;
> >                                 goto out_unlock;
> > @@ -363,7 +399,7 @@ static void iio_dma_buffer_fileio_free(struct
> > iio_dma_buffer_queue *queue)
> >         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> >                 if (!queue->fileio.blocks[i])
> >                         continue;
> > -               iio_buffer_block_put(queue->fileio.blocks[i]);
> > +               iio_buffer_block_put_atomic(queue->fileio.blocks[i]);
> 
> For these cases that are atomic or not, it might be worth calling out why they need
> to be
> atomic.
> 
> >                 queue->fileio.blocks[i] = NULL;
> >         }
> >         queue->fileio.active_block = NULL;
> > @@ -384,8 +420,12 @@ static void iio_dma_buffer_submit_block(struct
> > iio_dma_buffer_queue *queue,
> >  
> >         block->state = IIO_BLOCK_STATE_ACTIVE;
> >         iio_buffer_block_get(block);
> > +
> >         ret = queue->ops->submit(queue, block);
> >         if (ret) {
> > +               if (!block->fileio)
> > +                       iio_buffer_signal_dmabuf_done(block->attach, ret);
> > +
> >                 /*
> >                  * This is a bit of a problem and there is not much we can do
> >                  * other then wait for the buffer to be disabled and re-enabled
> > @@ -588,6 +628,97 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
> >  }
> >  EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
> >  
> > +struct iio_dma_buffer_block *
> > +iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
> > +                            struct dma_buf_attachment *attach)
> > +{
> > +       struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> > +       struct iio_dma_buffer_block *block;
> > +       int err;
> > +
> > +       mutex_lock(&queue->lock);
> 
>         guard(mutex)(&queue->lock);

I'm also a big fan of this new stuff but shouldn't we have a prep patch making the
transition to that? Otherwise we'll end up with a mix of styles. I'm happy to clean
up stuff with follow up patches (even some coding style could be improved IIRC) but
typically that's not how we handle things like this I believe...

- Nuno Sá
>
Jonathan Cameron Dec. 26, 2023, 3:30 p.m. UTC | #3
> > > +struct iio_dma_buffer_block *
> > > +iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
> > > +                            struct dma_buf_attachment *attach)
> > > +{
> > > +       struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> > > +       struct iio_dma_buffer_block *block;
> > > +       int err;
> > > +
> > > +       mutex_lock(&queue->lock);  
> > 
> >         guard(mutex)(&queue->lock);  
> 
> I'm also a big fan of this new stuff but shouldn't we have a prep patch making the
> transition to that? Otherwise we'll end up with a mix of styles. I'm happy to clean
> up stuff with follow up patches (even some coding style could be improved IIRC) but
> typically that's not how we handle things like this I believe...

Ideally yes, I think a prep patch would make sense - but I'm not that fussed
about it and follow up patches would be fine here. 

There are cases for using this that are much easier to justify than
others, so I don't really mind if simple

mutex_lock();
do_something
mutex_unlock();

cases are left for ever not using guard(), though I also don't mind if people want to use
scoped_guard() or guard for these just to be consistent.  Either way is pretty
easy to read.  We'll probably also continue to gain new uses of this logic such
as the conditional guard stuff that is queued up for the next merge window and
whilst that is going on we are going to have a bit of mixed style.

Jonathan


> 
> - Nuno Sá
> >   
>
diff mbox series

Patch

diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 5610ba67925e..adb64f975064 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -14,6 +14,7 @@ 
 #include <linux/poll.h>
 #include <linux/iio/buffer_impl.h>
 #include <linux/iio/buffer-dma.h>
+#include <linux/dma-buf.h>
 #include <linux/dma-mapping.h>
 #include <linux/sizes.h>
 
@@ -94,14 +95,24 @@  static void iio_buffer_block_release(struct kref *kref)
 {
 	struct iio_dma_buffer_block *block = container_of(kref,
 		struct iio_dma_buffer_block, kref);
+	struct iio_dma_buffer_queue *queue = block->queue;
 
-	WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
+	WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
 
-	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
-					block->vaddr, block->phys_addr);
+	mutex_lock(&queue->lock);
 
-	iio_buffer_put(&block->queue->buffer);
+	if (block->fileio) {
+		dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
+				  block->vaddr, block->phys_addr);
+		queue->num_fileio_blocks--;
+	}
+
+	queue->num_blocks--;
 	kfree(block);
+
+	mutex_unlock(&queue->lock);
+
+	iio_buffer_put(&queue->buffer);
 }
 
 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
@@ -163,7 +174,7 @@  static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
 }
 
 static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
-	struct iio_dma_buffer_queue *queue, size_t size)
+	struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
 {
 	struct iio_dma_buffer_block *block;
 
@@ -171,13 +182,16 @@  static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
 	if (!block)
 		return NULL;
 
-	block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
-		&block->phys_addr, GFP_KERNEL);
-	if (!block->vaddr) {
-		kfree(block);
-		return NULL;
+	if (fileio) {
+		block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
+						  &block->phys_addr, GFP_KERNEL);
+		if (!block->vaddr) {
+			kfree(block);
+			return NULL;
+		}
 	}
 
+	block->fileio = fileio;
 	block->size = size;
 	block->state = IIO_BLOCK_STATE_DONE;
 	block->queue = queue;
@@ -186,6 +200,9 @@  static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
 
 	iio_buffer_get(&queue->buffer);
 
+	queue->num_blocks++;
+	queue->num_fileio_blocks += fileio;
+
 	return block;
 }
 
@@ -211,6 +228,9 @@  void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 	_iio_dma_buffer_block_done(block);
 	spin_unlock_irqrestore(&queue->list_lock, flags);
 
+	if (!block->fileio)
+		iio_buffer_signal_dmabuf_done(block->attach, 0);
+
 	iio_buffer_block_put_atomic(block);
 	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
 }
@@ -237,10 +257,14 @@  void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
 		list_del(&block->head);
 		block->bytes_used = 0;
 		_iio_dma_buffer_block_done(block);
+
+		if (!block->fileio)
+			iio_buffer_signal_dmabuf_done(block->attach, -EINTR);
 		iio_buffer_block_put_atomic(block);
 	}
 	spin_unlock_irqrestore(&queue->list_lock, flags);
 
+	queue->fileio.enabled = false;
 	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
@@ -261,6 +285,12 @@  static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
 	}
 }
 
+static bool iio_dma_buffer_fileio_mode(struct iio_dma_buffer_queue *queue)
+{
+	return queue->fileio.enabled ||
+		queue->num_blocks == queue->num_fileio_blocks;
+}
+
 /**
  * iio_dma_buffer_request_update() - DMA buffer request_update callback
  * @buffer: The buffer which to request an update
@@ -287,6 +317,12 @@  int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 
 	mutex_lock(&queue->lock);
 
+	queue->fileio.enabled = iio_dma_buffer_fileio_mode(queue);
+
+	/* If DMABUFs were created, disable fileio interface */
+	if (!queue->fileio.enabled)
+		goto out_unlock;
+
 	/* Allocations are page aligned */
 	if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
 		try_reuse = true;
@@ -317,7 +353,7 @@  int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 			block = queue->fileio.blocks[i];
 			if (block->state == IIO_BLOCK_STATE_DEAD) {
 				/* Could not reuse it */
-				iio_buffer_block_put(block);
+				iio_buffer_block_put_atomic(block);
 				block = NULL;
 			} else {
 				block->size = size;
@@ -327,7 +363,7 @@  int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 		}
 
 		if (!block) {
-			block = iio_dma_buffer_alloc_block(queue, size);
+			block = iio_dma_buffer_alloc_block(queue, size, true);
 			if (!block) {
 				ret = -ENOMEM;
 				goto out_unlock;
@@ -363,7 +399,7 @@  static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 		if (!queue->fileio.blocks[i])
 			continue;
-		iio_buffer_block_put(queue->fileio.blocks[i]);
+		iio_buffer_block_put_atomic(queue->fileio.blocks[i]);
 		queue->fileio.blocks[i] = NULL;
 	}
 	queue->fileio.active_block = NULL;
@@ -384,8 +420,12 @@  static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
 
 	block->state = IIO_BLOCK_STATE_ACTIVE;
 	iio_buffer_block_get(block);
+
 	ret = queue->ops->submit(queue, block);
 	if (ret) {
+		if (!block->fileio)
+			iio_buffer_signal_dmabuf_done(block->attach, ret);
+
 		/*
 		 * This is a bit of a problem and there is not much we can do
 		 * other then wait for the buffer to be disabled and re-enabled
@@ -588,6 +628,97 @@  size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
 
+struct iio_dma_buffer_block *
+iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
+			     struct dma_buf_attachment *attach)
+{
+	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+	struct iio_dma_buffer_block *block;
+	int err;
+
+	mutex_lock(&queue->lock);
+
+	/*
+	 * If the buffer is enabled and in fileio mode new blocks can't be
+	 * allocated.
+	 */
+	if (queue->fileio.enabled) {
+		err = -EBUSY;
+		goto err_unlock;
+	}
+
+	block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false);
+	if (!block) {
+		err = -ENOMEM;
+		goto err_unlock;
+	}
+
+	block->attach = attach;
+
+	/* Free memory that might be in use for fileio mode */
+	iio_dma_buffer_fileio_free(queue);
+
+	mutex_unlock(&queue->lock);
+
+	return block;
+
+err_unlock:
+	mutex_unlock(&queue->lock);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_attach_dmabuf);
+
+void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
+				  struct iio_dma_buffer_block *block)
+{
+	block->state = IIO_BLOCK_STATE_DEAD;
+	iio_buffer_block_put_atomic(block);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_detach_dmabuf);
+
+static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block)
+{
+	struct iio_dma_buffer_queue *queue = block->queue;
+
+	/* If in fileio mode buffers can't be enqueued. */
+	if (queue->fileio.enabled)
+		return -EBUSY;
+
+	switch (block->state) {
+	case IIO_BLOCK_STATE_QUEUED:
+		return -EPERM;
+	case IIO_BLOCK_STATE_DONE:
+		return 0;
+	default:
+		return -EBUSY;
+	}
+}
+
+int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
+				  struct iio_dma_buffer_block *block,
+				  struct sg_table *sgt,
+				  size_t size, bool cyclic)
+{
+	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
+	int ret = 0;
+
+	mutex_lock(&queue->lock);
+	ret = iio_dma_can_enqueue_block(block);
+	if (ret < 0)
+		goto out_mutex_unlock;
+
+	block->bytes_used = size;
+	block->cyclic = cyclic;
+	block->sg_table = sgt;
+
+	iio_dma_buffer_enqueue(queue, block);
+
+out_mutex_unlock:
+	mutex_unlock(&queue->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_enqueue_dmabuf);
+
 /**
  * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
  * @buffer: Buffer to set the bytes-per-datum for
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 18d3702fa95d..7be12a6bff5b 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -16,6 +16,8 @@ 
 struct iio_dma_buffer_queue;
 struct iio_dma_buffer_ops;
 struct device;
+struct dma_buf_attachment;
+struct sg_table;
 
 /**
  * enum iio_block_state - State of a struct iio_dma_buffer_block
@@ -41,6 +43,8 @@  enum iio_block_state {
  * @queue: Parent DMA buffer queue
  * @kref: kref used to manage the lifetime of block
  * @state: Current state of the block
+ * @cyclic: True if this is a cyclic buffer
+ * @fileio: True if this buffer is used for fileio mode
  */
 struct iio_dma_buffer_block {
 	/* May only be accessed by the owner of the block */
@@ -63,6 +67,12 @@  struct iio_dma_buffer_block {
 	 * queue->list_lock if the block is not owned by the core.
 	 */
 	enum iio_block_state state;
+
+	bool cyclic;
+	bool fileio;
+
+	struct dma_buf_attachment *attach;
+	struct sg_table *sg_table;
 };
 
 /**
@@ -72,6 +82,7 @@  struct iio_dma_buffer_block {
  * @pos: Read offset in the active block
  * @block_size: Size of each block
  * @next_dequeue: index of next block that will be dequeued
+ * @enabled: Whether the buffer is operating in fileio mode
  */
 struct iio_dma_buffer_queue_fileio {
 	struct iio_dma_buffer_block *blocks[2];
@@ -80,6 +91,7 @@  struct iio_dma_buffer_queue_fileio {
 	size_t block_size;
 
 	unsigned int next_dequeue;
+	bool enabled;
 };
 
 /**
@@ -95,6 +107,8 @@  struct iio_dma_buffer_queue_fileio {
  *   the DMA controller
  * @incoming: List of buffers on the incoming queue
  * @active: Whether the buffer is currently active
+ * @num_blocks: Total number of DMA blocks
+ * @num_fileio_blocks: Number of DMA blocks for fileio mode
  * @fileio: FileIO state
  */
 struct iio_dma_buffer_queue {
@@ -107,6 +121,8 @@  struct iio_dma_buffer_queue {
 	struct list_head incoming;
 
 	bool active;
+	unsigned int num_blocks;
+	unsigned int num_fileio_blocks;
 
 	struct iio_dma_buffer_queue_fileio fileio;
 };
@@ -142,4 +158,14 @@  int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
 
+struct iio_dma_buffer_block *
+iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
+			     struct dma_buf_attachment *attach);
+void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
+				  struct iio_dma_buffer_block *block);
+int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
+				  struct iio_dma_buffer_block *block,
+				  struct sg_table *sgt,
+				  size_t size, bool cyclic);
+
 #endif