diff mbox series

[v2,02/12] iio: buffer-dma: Enable buffer write support

Message ID 20220207125933.81634-3-paul@crapouillou.net (mailing list archive)
State Deferred
Headers show
Series iio: buffer-dma: write() and new DMABUF based API | expand

Commit Message

Paul Cercueil Feb. 7, 2022, 12:59 p.m. UTC
Adding write support to the buffer-dma code is easy - the write()
function basically needs to do the exact same thing as the read()
function: dequeue a block, read or write the data, enqueue the block
when entirely processed.

Therefore, the iio_buffer_dma_read() and the new iio_buffer_dma_write()
now both call a function iio_buffer_dma_io(), which will perform this
task.

The .space_available() callback can return the exact same value as the
.data_available() callback for input buffers, since in both cases we
count the exact same thing (the number of bytes in each available
block).

Note that we preemptively reset block->bytes_used to the buffer's size
in iio_dma_buffer_request_update(), as in the future the
iio_dma_buffer_enqueue() function won't reset it.

v2: - Fix block->state not being reset in
      iio_dma_buffer_request_update() for output buffers.
    - Only update block->bytes_used once and add a comment about why we
      update it.
    - Add a comment about why we're setting a different state for output
      buffers in iio_dma_buffer_request_update()
    - Remove useless cast to bool (!!) in iio_dma_buffer_io()

Signed-off-by: Paul Cercueil <paul@crapouillou.net>
Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>
---
 drivers/iio/buffer/industrialio-buffer-dma.c | 88 ++++++++++++++++----
 include/linux/iio/buffer-dma.h               |  7 ++
 2 files changed, 79 insertions(+), 16 deletions(-)

Comments

Jonathan Cameron March 28, 2022, 5:24 p.m. UTC | #1
On Mon,  7 Feb 2022 12:59:23 +0000
Paul Cercueil <paul@crapouillou.net> wrote:

> Adding write support to the buffer-dma code is easy - the write()
> function basically needs to do the exact same thing as the read()
> function: dequeue a block, read or write the data, enqueue the block
> when entirely processed.
> 
> Therefore, the iio_buffer_dma_read() and the new iio_buffer_dma_write()
> now both call a function iio_buffer_dma_io(), which will perform this
> task.
> 
> The .space_available() callback can return the exact same value as the
> .data_available() callback for input buffers, since in both cases we
> count the exact same thing (the number of bytes in each available
> block).
> 
> Note that we preemptively reset block->bytes_used to the buffer's size
> in iio_dma_buffer_request_update(), as in the future the
> iio_dma_buffer_enqueue() function won't reset it.
> 
> v2: - Fix block->state not being reset in
>       iio_dma_buffer_request_update() for output buffers.
>     - Only update block->bytes_used once and add a comment about why we
>       update it.
>     - Add a comment about why we're setting a different state for output
>       buffers in iio_dma_buffer_request_update()
>     - Remove useless cast to bool (!!) in iio_dma_buffer_io()
> 
> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>
One comment inline.

I'd be tempted to queue this up with that fixed, but do we have
any users?  Even though it's trivial I'm not that keen on code
upstream well in advance of it being used.

Thanks,

Jonathan

> ---
>  drivers/iio/buffer/industrialio-buffer-dma.c | 88 ++++++++++++++++----
>  include/linux/iio/buffer-dma.h               |  7 ++
>  2 files changed, 79 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
> index 1fc91467d1aa..a9f1b673374f 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> @@ -195,6 +195,18 @@ static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
>  		block->state = IIO_BLOCK_STATE_DONE;
>  }
>  
> +static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue)
> +{
> +	__poll_t flags;
> +
> +	if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
> +		flags = EPOLLIN | EPOLLRDNORM;
> +	else
> +		flags = EPOLLOUT | EPOLLWRNORM;
> +
> +	wake_up_interruptible_poll(&queue->buffer.pollq, flags);
> +}
> +
>  /**
>   * iio_dma_buffer_block_done() - Indicate that a block has been completed
>   * @block: The completed block
> @@ -212,7 +224,7 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
>  	spin_unlock_irqrestore(&queue->list_lock, flags);
>  
>  	iio_buffer_block_put_atomic(block);
> -	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
> +	iio_dma_buffer_queue_wake(queue);
>  }
>  EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
>  
> @@ -241,7 +253,7 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
>  	}
>  	spin_unlock_irqrestore(&queue->list_lock, flags);
>  
> -	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
> +	iio_dma_buffer_queue_wake(queue);
>  }
>  EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
>  
> @@ -335,8 +347,24 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>  			queue->fileio.blocks[i] = block;
>  		}
>  
> -		block->state = IIO_BLOCK_STATE_QUEUED;
> -		list_add_tail(&block->head, &queue->incoming);
> +		/*
> +		 * block->bytes_used may have been modified previously, e.g. by
> +		 * iio_dma_buffer_block_list_abort(). Reset it here to the
> +		 * block's so that iio_dma_buffer_io() will work.
> +		 */
> +		block->bytes_used = block->size;
> +
> +		/*
> +		 * If it's an input buffer, mark the block as queued, and
> +		 * iio_dma_buffer_enable() will submit it. Otherwise mark it as
> +		 * done, which means it's ready to be dequeued.
> +		 */
> +		if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
> +			block->state = IIO_BLOCK_STATE_QUEUED;
> +			list_add_tail(&block->head, &queue->incoming);
> +		} else {
> +			block->state = IIO_BLOCK_STATE_DONE;
> +		}
>  	}
>  
>  out_unlock:
> @@ -465,20 +493,12 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
>  	return block;
>  }
>  
> -/**
> - * iio_dma_buffer_read() - DMA buffer read callback
> - * @buffer: Buffer to read form
> - * @n: Number of bytes to read
> - * @user_buffer: Userspace buffer to copy the data to
> - *
> - * Should be used as the read callback for iio_buffer_access_ops
> - * struct for DMA buffers.
> - */
> -int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
> -	char __user *user_buffer)
> +static int iio_dma_buffer_io(struct iio_buffer *buffer,
> +			     size_t n, char __user *user_buffer, bool is_write)
>  {
>  	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
>  	struct iio_dma_buffer_block *block;
> +	void *addr;
>  	int ret;
>  
>  	if (n < buffer->bytes_per_datum)
> @@ -501,8 +521,13 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
>  	n = rounddown(n, buffer->bytes_per_datum);
>  	if (n > block->bytes_used - queue->fileio.pos)
>  		n = block->bytes_used - queue->fileio.pos;
> +	addr = block->vaddr + queue->fileio.pos;
>  
> -	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
> +	if (is_write)
> +		ret = copy_from_user(addr, user_buffer, n);
> +	else
> +		ret = copy_to_user(user_buffer, addr, n);
> +	if (ret) {
>  		ret = -EFAULT;
>  		goto out_unlock;
>  	}
> @@ -521,8 +546,39 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
>  
>  	return ret;
>  }
> +
> +/**
> + * iio_dma_buffer_read() - DMA buffer read callback
> + * @buffer: Buffer to read form
> + * @n: Number of bytes to read
> + * @user_buffer: Userspace buffer to copy the data to
> + *
> + * Should be used as the read callback for iio_buffer_access_ops
> + * struct for DMA buffers.
> + */
> +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
> +	char __user *user_buffer)
> +{
> +	return iio_dma_buffer_io(buffer, n, user_buffer, false);
> +}
>  EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
>  
> +/**
> + * iio_dma_buffer_write() - DMA buffer write callback
> + * @buffer: Buffer to read form
> + * @n: Number of bytes to read
> + * @user_buffer: Userspace buffer to copy the data from
> + *
> + * Should be used as the write callback for iio_buffer_access_ops
> + * struct for DMA buffers.
> + */
> +int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
> +			 const char __user *user_buffer)
> +{
> +	return iio_dma_buffer_io(buffer, n, (__force char *)user_buffer, true);

Casting away the const is a little nasty.   Perhaps it's worth adding a
parameter to iio_dma_buffer_io so you can have different parameters
for the read and write cases and hence keep the const in place?
return iio_dma_buffer_io(buffer, n, NULL, user_buffer, true);
and
return iio_dma_buffer_io(buffer,n, user_buffer, NULL, false);

> +}
> +EXPORT_SYMBOL_GPL(iio_dma_buffer_write);
> +
>  /**
>   * iio_dma_buffer_data_available() - DMA buffer data_available callback
>   * @buf: Buffer to check for data availability
> diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
> index 18d3702fa95d..490b93f76fa8 100644
> --- a/include/linux/iio/buffer-dma.h
> +++ b/include/linux/iio/buffer-dma.h
> @@ -132,6 +132,8 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer,
>  	struct iio_dev *indio_dev);
>  int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
>  	char __user *user_buffer);
> +int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
> +			 const char __user *user_buffer);
>  size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
>  int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
>  int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
> @@ -142,4 +144,9 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
>  void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
>  void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
>  
> +static inline size_t iio_dma_buffer_space_available(struct iio_buffer *buffer)
> +{
> +	return iio_dma_buffer_data_available(buffer);
> +}
> +
>  #endif
Paul Cercueil March 28, 2022, 6:39 p.m. UTC | #2
Hi Jonathan,

Le lun., mars 28 2022 at 18:24:09 +0100, Jonathan Cameron 
<jic23@kernel.org> a écrit :
> On Mon,  7 Feb 2022 12:59:23 +0000
> Paul Cercueil <paul@crapouillou.net> wrote:
> 
>>  Adding write support to the buffer-dma code is easy - the write()
>>  function basically needs to do the exact same thing as the read()
>>  function: dequeue a block, read or write the data, enqueue the block
>>  when entirely processed.
>> 
>>  Therefore, the iio_buffer_dma_read() and the new 
>> iio_buffer_dma_write()
>>  now both call a function iio_buffer_dma_io(), which will perform 
>> this
>>  task.
>> 
>>  The .space_available() callback can return the exact same value as 
>> the
>>  .data_available() callback for input buffers, since in both cases we
>>  count the exact same thing (the number of bytes in each available
>>  block).
>> 
>>  Note that we preemptively reset block->bytes_used to the buffer's 
>> size
>>  in iio_dma_buffer_request_update(), as in the future the
>>  iio_dma_buffer_enqueue() function won't reset it.
>> 
>>  v2: - Fix block->state not being reset in
>>        iio_dma_buffer_request_update() for output buffers.
>>      - Only update block->bytes_used once and add a comment about 
>> why we
>>        update it.
>>      - Add a comment about why we're setting a different state for 
>> output
>>        buffers in iio_dma_buffer_request_update()
>>      - Remove useless cast to bool (!!) in iio_dma_buffer_io()
>> 
>>  Signed-off-by: Paul Cercueil <paul@crapouillou.net>
>>  Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>
> One comment inline.
> 
> I'd be tempted to queue this up with that fixed, but do we have
> any users?  Even though it's trivial I'm not that keen on code
> upstream well in advance of it being used.

There's a userspace user in libiio. On the kernel side we do have 
drivers that use it in ADI's downstream kernel, that we plan to 
upstream in the long term (but it can take some time, as we need to 
upstream other things first, like JESD204B support).

> 
>>  ---
>>   drivers/iio/buffer/industrialio-buffer-dma.c | 88 
>> ++++++++++++++++----
>>   include/linux/iio/buffer-dma.h               |  7 ++
>>   2 files changed, 79 insertions(+), 16 deletions(-)
>> 
>>  diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c 
>> b/drivers/iio/buffer/industrialio-buffer-dma.c
>>  index 1fc91467d1aa..a9f1b673374f 100644
>>  --- a/drivers/iio/buffer/industrialio-buffer-dma.c
>>  +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
>>  @@ -195,6 +195,18 @@ static void _iio_dma_buffer_block_done(struct 
>> iio_dma_buffer_block *block)
>>   		block->state = IIO_BLOCK_STATE_DONE;
>>   }
>> 
>>  +static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue 
>> *queue)
>>  +{
>>  +	__poll_t flags;
>>  +
>>  +	if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
>>  +		flags = EPOLLIN | EPOLLRDNORM;
>>  +	else
>>  +		flags = EPOLLOUT | EPOLLWRNORM;
>>  +
>>  +	wake_up_interruptible_poll(&queue->buffer.pollq, flags);
>>  +}
>>  +
>>   /**
>>    * iio_dma_buffer_block_done() - Indicate that a block has been 
>> completed
>>    * @block: The completed block
>>  @@ -212,7 +224,7 @@ void iio_dma_buffer_block_done(struct 
>> iio_dma_buffer_block *block)
>>   	spin_unlock_irqrestore(&queue->list_lock, flags);
>> 
>>   	iio_buffer_block_put_atomic(block);
>>  -	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | 
>> EPOLLRDNORM);
>>  +	iio_dma_buffer_queue_wake(queue);
>>   }
>>   EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
>> 
>>  @@ -241,7 +253,7 @@ void iio_dma_buffer_block_list_abort(struct 
>> iio_dma_buffer_queue *queue,
>>   	}
>>   	spin_unlock_irqrestore(&queue->list_lock, flags);
>> 
>>  -	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | 
>> EPOLLRDNORM);
>>  +	iio_dma_buffer_queue_wake(queue);
>>   }
>>   EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
>> 
>>  @@ -335,8 +347,24 @@ int iio_dma_buffer_request_update(struct 
>> iio_buffer *buffer)
>>   			queue->fileio.blocks[i] = block;
>>   		}
>> 
>>  -		block->state = IIO_BLOCK_STATE_QUEUED;
>>  -		list_add_tail(&block->head, &queue->incoming);
>>  +		/*
>>  +		 * block->bytes_used may have been modified previously, e.g. by
>>  +		 * iio_dma_buffer_block_list_abort(). Reset it here to the
>>  +		 * block's so that iio_dma_buffer_io() will work.
>>  +		 */
>>  +		block->bytes_used = block->size;
>>  +
>>  +		/*
>>  +		 * If it's an input buffer, mark the block as queued, and
>>  +		 * iio_dma_buffer_enable() will submit it. Otherwise mark it as
>>  +		 * done, which means it's ready to be dequeued.
>>  +		 */
>>  +		if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
>>  +			block->state = IIO_BLOCK_STATE_QUEUED;
>>  +			list_add_tail(&block->head, &queue->incoming);
>>  +		} else {
>>  +			block->state = IIO_BLOCK_STATE_DONE;
>>  +		}
>>   	}
>> 
>>   out_unlock:
>>  @@ -465,20 +493,12 @@ static struct iio_dma_buffer_block 
>> *iio_dma_buffer_dequeue(
>>   	return block;
>>   }
>> 
>>  -/**
>>  - * iio_dma_buffer_read() - DMA buffer read callback
>>  - * @buffer: Buffer to read form
>>  - * @n: Number of bytes to read
>>  - * @user_buffer: Userspace buffer to copy the data to
>>  - *
>>  - * Should be used as the read callback for iio_buffer_access_ops
>>  - * struct for DMA buffers.
>>  - */
>>  -int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
>>  -	char __user *user_buffer)
>>  +static int iio_dma_buffer_io(struct iio_buffer *buffer,
>>  +			     size_t n, char __user *user_buffer, bool is_write)
>>   {
>>   	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
>>   	struct iio_dma_buffer_block *block;
>>  +	void *addr;
>>   	int ret;
>> 
>>   	if (n < buffer->bytes_per_datum)
>>  @@ -501,8 +521,13 @@ int iio_dma_buffer_read(struct iio_buffer 
>> *buffer, size_t n,
>>   	n = rounddown(n, buffer->bytes_per_datum);
>>   	if (n > block->bytes_used - queue->fileio.pos)
>>   		n = block->bytes_used - queue->fileio.pos;
>>  +	addr = block->vaddr + queue->fileio.pos;
>> 
>>  -	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, 
>> n)) {
>>  +	if (is_write)
>>  +		ret = copy_from_user(addr, user_buffer, n);
>>  +	else
>>  +		ret = copy_to_user(user_buffer, addr, n);
>>  +	if (ret) {
>>   		ret = -EFAULT;
>>   		goto out_unlock;
>>   	}
>>  @@ -521,8 +546,39 @@ int iio_dma_buffer_read(struct iio_buffer 
>> *buffer, size_t n,
>> 
>>   	return ret;
>>   }
>>  +
>>  +/**
>>  + * iio_dma_buffer_read() - DMA buffer read callback
>>  + * @buffer: Buffer to read form
>>  + * @n: Number of bytes to read
>>  + * @user_buffer: Userspace buffer to copy the data to
>>  + *
>>  + * Should be used as the read callback for iio_buffer_access_ops
>>  + * struct for DMA buffers.
>>  + */
>>  +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
>>  +	char __user *user_buffer)
>>  +{
>>  +	return iio_dma_buffer_io(buffer, n, user_buffer, false);
>>  +}
>>   EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
>> 
>>  +/**
>>  + * iio_dma_buffer_write() - DMA buffer write callback
>>  + * @buffer: Buffer to read form
>>  + * @n: Number of bytes to read
>>  + * @user_buffer: Userspace buffer to copy the data from
>>  + *
>>  + * Should be used as the write callback for iio_buffer_access_ops
>>  + * struct for DMA buffers.
>>  + */
>>  +int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
>>  +			 const char __user *user_buffer)
>>  +{
>>  +	return iio_dma_buffer_io(buffer, n, (__force char *)user_buffer, 
>> true);
> 
> Casting away the const is a little nasty.   Perhaps it's worth adding 
> a
> parameter to iio_dma_buffer_io so you can have different parameters
> for the read and write cases and hence keep the const in place?
> return iio_dma_buffer_io(buffer, n, NULL, user_buffer, true);
> and
> return iio_dma_buffer_io(buffer,n, user_buffer, NULL, false);

I can do that.

Cheers,
-Paul

>>  +}
>>  +EXPORT_SYMBOL_GPL(iio_dma_buffer_write);
>>  +
>>   /**
>>    * iio_dma_buffer_data_available() - DMA buffer data_available 
>> callback
>>    * @buf: Buffer to check for data availability
>>  diff --git a/include/linux/iio/buffer-dma.h 
>> b/include/linux/iio/buffer-dma.h
>>  index 18d3702fa95d..490b93f76fa8 100644
>>  --- a/include/linux/iio/buffer-dma.h
>>  +++ b/include/linux/iio/buffer-dma.h
>>  @@ -132,6 +132,8 @@ int iio_dma_buffer_disable(struct iio_buffer 
>> *buffer,
>>   	struct iio_dev *indio_dev);
>>   int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
>>   	char __user *user_buffer);
>>  +int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
>>  +			 const char __user *user_buffer);
>>   size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
>>   int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, 
>> size_t bpd);
>>   int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned 
>> int length);
>>  @@ -142,4 +144,9 @@ int iio_dma_buffer_init(struct 
>> iio_dma_buffer_queue *queue,
>>   void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
>>   void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
>> 
>>  +static inline size_t iio_dma_buffer_space_available(struct 
>> iio_buffer *buffer)
>>  +{
>>  +	return iio_dma_buffer_data_available(buffer);
>>  +}
>>  +
>>   #endif
>
Andy Shevchenko March 28, 2022, 8:38 p.m. UTC | #3
On Wed, Feb 9, 2022 at 9:10 AM Paul Cercueil <paul@crapouillou.net> wrote:
>
> Adding write support to the buffer-dma code is easy - the write()
> function basically needs to do the exact same thing as the read()
> function: dequeue a block, read or write the data, enqueue the block
> when entirely processed.
>
> Therefore, the iio_buffer_dma_read() and the new iio_buffer_dma_write()
> now both call a function iio_buffer_dma_io(), which will perform this
> task.
>
> The .space_available() callback can return the exact same value as the
> .data_available() callback for input buffers, since in both cases we
> count the exact same thing (the number of bytes in each available
> block).
>
> Note that we preemptively reset block->bytes_used to the buffer's size
> in iio_dma_buffer_request_update(), as in the future the
> iio_dma_buffer_enqueue() function won't reset it.

...

> v2: - Fix block->state not being reset in
>       iio_dma_buffer_request_update() for output buffers.
>     - Only update block->bytes_used once and add a comment about why we
>       update it.
>     - Add a comment about why we're setting a different state for output
>       buffers in iio_dma_buffer_request_update()
>     - Remove useless cast to bool (!!) in iio_dma_buffer_io()

Usual place for changelog is after the cutter '--- ' line below...

> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>
> ---

...somewhere here.

>  drivers/iio/buffer/industrialio-buffer-dma.c | 88 ++++++++++++++++----
>  include/linux/iio/buffer-dma.h               |  7 ++

...

> +static int iio_dma_buffer_io(struct iio_buffer *buffer,
> +                            size_t n, char __user *user_buffer, bool is_write)

I believe there is a room for size_t n on the previous line.

...

> +       if (is_write)

I would name it is_from_user.

> +               ret = copy_from_user(addr, user_buffer, n);
> +       else
> +               ret = copy_to_user(user_buffer, addr, n);

...

> +int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
> +                        const char __user *user_buffer)
> +{
> +       return iio_dma_buffer_io(buffer, n, (__force char *)user_buffer, true);

Why do you drop address space markers?

> +}
Nuno Sá March 29, 2022, 7:11 a.m. UTC | #4
On Mon, 2022-03-28 at 19:39 +0100, Paul Cercueil wrote:
> Hi Jonathan,
> 
> Le lun., mars 28 2022 at 18:24:09 +0100, Jonathan Cameron 
> <jic23@kernel.org> a écrit :
> > On Mon,  7 Feb 2022 12:59:23 +0000
> > Paul Cercueil <paul@crapouillou.net> wrote:
> > 
> > >  Adding write support to the buffer-dma code is easy - the
> > > write()
> > >  function basically needs to do the exact same thing as the
> > > read()
> > >  function: dequeue a block, read or write the data, enqueue the
> > > block
> > >  when entirely processed.
> > > 
> > >  Therefore, the iio_buffer_dma_read() and the new 
> > > iio_buffer_dma_write()
> > >  now both call a function iio_buffer_dma_io(), which will perform
> > > this
> > >  task.
> > > 
> > >  The .space_available() callback can return the exact same value
> > > as 
> > > the
> > >  .data_available() callback for input buffers, since in both
> > > cases we
> > >  count the exact same thing (the number of bytes in each
> > > available
> > >  block).
> > > 
> > >  Note that we preemptively reset block->bytes_used to the
> > > buffer's 
> > > size
> > >  in iio_dma_buffer_request_update(), as in the future the
> > >  iio_dma_buffer_enqueue() function won't reset it.
> > > 
> > >  v2: - Fix block->state not being reset in
> > >        iio_dma_buffer_request_update() for output buffers.
> > >      - Only update block->bytes_used once and add a comment about
> > > why we
> > >        update it.
> > >      - Add a comment about why we're setting a different state
> > > for 
> > > output
> > >        buffers in iio_dma_buffer_request_update()
> > >      - Remove useless cast to bool (!!) in iio_dma_buffer_io()
> > > 
> > >  Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> > >  Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>
> > One comment inline.
> > 
> > I'd be tempted to queue this up with that fixed, but do we have
> > any users?  Even though it's trivial I'm not that keen on code
> > upstream well in advance of it being used.
> 
> There's a userspace user in libiio. On the kernel side we do have 
> drivers that use it in ADI's downstream kernel, that we plan to 
> upstream in the long term (but it can take some time, as we need to 
> upstream other things first, like JESD204B support).
> 
> 

You mean, users for DMA output buffers? If so, I have on my queue to
add the dac counterpart of this one:

https://elixir.bootlin.com/linux/latest/source/drivers/iio/adc/adi-axi-adc.c

Which is a user of DMA buffers. Though this one does not depend on
JESD204, I suspect it will also be a tricky process mainly because I
think there are major issues on how things are done right now (on the
ADC driver).

But yeah, not a topic here and I do plan to first start the discussion
on the mailing list before starting developing (hopefully in the coming
weeks)...

- Nuno Sá
diff mbox series

Patch

diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 1fc91467d1aa..a9f1b673374f 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -195,6 +195,18 @@  static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 		block->state = IIO_BLOCK_STATE_DONE;
 }
 
+static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue)
+{
+	__poll_t flags;
+
+	if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
+		flags = EPOLLIN | EPOLLRDNORM;
+	else
+		flags = EPOLLOUT | EPOLLWRNORM;
+
+	wake_up_interruptible_poll(&queue->buffer.pollq, flags);
+}
+
 /**
  * iio_dma_buffer_block_done() - Indicate that a block has been completed
  * @block: The completed block
@@ -212,7 +224,7 @@  void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 	spin_unlock_irqrestore(&queue->list_lock, flags);
 
 	iio_buffer_block_put_atomic(block);
-	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
+	iio_dma_buffer_queue_wake(queue);
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
 
@@ -241,7 +253,7 @@  void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
 	}
 	spin_unlock_irqrestore(&queue->list_lock, flags);
 
-	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
+	iio_dma_buffer_queue_wake(queue);
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
 
@@ -335,8 +347,24 @@  int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 			queue->fileio.blocks[i] = block;
 		}
 
-		block->state = IIO_BLOCK_STATE_QUEUED;
-		list_add_tail(&block->head, &queue->incoming);
+		/*
+		 * block->bytes_used may have been modified previously, e.g. by
+		 * iio_dma_buffer_block_list_abort(). Reset it here to the
+		 * block's so that iio_dma_buffer_io() will work.
+		 */
+		block->bytes_used = block->size;
+
+		/*
+		 * If it's an input buffer, mark the block as queued, and
+		 * iio_dma_buffer_enable() will submit it. Otherwise mark it as
+		 * done, which means it's ready to be dequeued.
+		 */
+		if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
+			block->state = IIO_BLOCK_STATE_QUEUED;
+			list_add_tail(&block->head, &queue->incoming);
+		} else {
+			block->state = IIO_BLOCK_STATE_DONE;
+		}
 	}
 
 out_unlock:
@@ -465,20 +493,12 @@  static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
 	return block;
 }
 
-/**
- * iio_dma_buffer_read() - DMA buffer read callback
- * @buffer: Buffer to read form
- * @n: Number of bytes to read
- * @user_buffer: Userspace buffer to copy the data to
- *
- * Should be used as the read callback for iio_buffer_access_ops
- * struct for DMA buffers.
- */
-int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
-	char __user *user_buffer)
+static int iio_dma_buffer_io(struct iio_buffer *buffer,
+			     size_t n, char __user *user_buffer, bool is_write)
 {
 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
 	struct iio_dma_buffer_block *block;
+	void *addr;
 	int ret;
 
 	if (n < buffer->bytes_per_datum)
@@ -501,8 +521,13 @@  int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
 	n = rounddown(n, buffer->bytes_per_datum);
 	if (n > block->bytes_used - queue->fileio.pos)
 		n = block->bytes_used - queue->fileio.pos;
+	addr = block->vaddr + queue->fileio.pos;
 
-	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
+	if (is_write)
+		ret = copy_from_user(addr, user_buffer, n);
+	else
+		ret = copy_to_user(user_buffer, addr, n);
+	if (ret) {
 		ret = -EFAULT;
 		goto out_unlock;
 	}
@@ -521,8 +546,39 @@  int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
 
 	return ret;
 }
+
+/**
+ * iio_dma_buffer_read() - DMA buffer read callback
+ * @buffer: Buffer to read form
+ * @n: Number of bytes to read
+ * @user_buffer: Userspace buffer to copy the data to
+ *
+ * Should be used as the read callback for iio_buffer_access_ops
+ * struct for DMA buffers.
+ */
+int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
+	char __user *user_buffer)
+{
+	return iio_dma_buffer_io(buffer, n, user_buffer, false);
+}
 EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
 
+/**
+ * iio_dma_buffer_write() - DMA buffer write callback
+ * @buffer: Buffer to read form
+ * @n: Number of bytes to read
+ * @user_buffer: Userspace buffer to copy the data from
+ *
+ * Should be used as the write callback for iio_buffer_access_ops
+ * struct for DMA buffers.
+ */
+int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
+			 const char __user *user_buffer)
+{
+	return iio_dma_buffer_io(buffer, n, (__force char *)user_buffer, true);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_write);
+
 /**
  * iio_dma_buffer_data_available() - DMA buffer data_available callback
  * @buf: Buffer to check for data availability
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 18d3702fa95d..490b93f76fa8 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -132,6 +132,8 @@  int iio_dma_buffer_disable(struct iio_buffer *buffer,
 	struct iio_dev *indio_dev);
 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
 	char __user *user_buffer);
+int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
+			 const char __user *user_buffer);
 size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
@@ -142,4 +144,9 @@  int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
 
+static inline size_t iio_dma_buffer_space_available(struct iio_buffer *buffer)
+{
+	return iio_dma_buffer_data_available(buffer);
+}
+
 #endif