diff mbox series

[01/15] iio: buffer-dma: Get rid of incoming/outgoing queues

Message ID 20211115141925.60164-2-paul@crapouillou.net (mailing list archive)
State Changes Requested
Headers show
Series iio: buffer-dma: write() and new DMABUF based API | expand

Commit Message

Paul Cercueil Nov. 15, 2021, 2:19 p.m. UTC
The buffer-dma code was using two queues, incoming and outgoing, to
manage the state of the blocks in use.

While this totally works, it adds some complexity to the code,
especially since the code only manages 2 blocks. It is much easier to
just check each block's state manually, and keep a counter for the next
block to dequeue.

Since the new DMABUF based API wouldn't use these incoming and outgoing
queues anyway, getting rid of them now makes the upcoming changes
simpler.

Signed-off-by: Paul Cercueil <paul@crapouillou.net>
---
 drivers/iio/buffer/industrialio-buffer-dma.c | 68 ++++++++++----------
 include/linux/iio/buffer-dma.h               |  7 +-
 2 files changed, 37 insertions(+), 38 deletions(-)

Comments

Alexandru Ardelean Nov. 16, 2021, 8:23 a.m. UTC | #1
On Mon, Nov 15, 2021 at 4:19 PM Paul Cercueil <paul@crapouillou.net> wrote:
>
> The buffer-dma code was using two queues, incoming and outgoing, to
> manage the state of the blocks in use.
>
> While this totally works, it adds some complexity to the code,
> especially since the code only manages 2 blocks. It is much easier to
> just check each block's state manually, and keep a counter for the next
> block to dequeue.
>
> Since the new DMABUF based API wouldn't use these incoming and outgoing
> queues anyway, getting rid of them now makes the upcoming changes
> simpler.
>

Reviewed-by: Alexandru Ardelean <ardeleanalex@gmail.com>

> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> ---
>  drivers/iio/buffer/industrialio-buffer-dma.c | 68 ++++++++++----------
>  include/linux/iio/buffer-dma.h               |  7 +-
>  2 files changed, 37 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
> index d348af8b9705..abac88f20104 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> @@ -191,16 +191,8 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
>
>  static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
>  {
> -       struct iio_dma_buffer_queue *queue = block->queue;
> -
> -       /*
> -        * The buffer has already been freed by the application, just drop the
> -        * reference.
> -        */
> -       if (block->state != IIO_BLOCK_STATE_DEAD) {
> +       if (block->state != IIO_BLOCK_STATE_DEAD)
>                 block->state = IIO_BLOCK_STATE_DONE;
> -               list_add_tail(&block->head, &queue->outgoing);
> -       }
>  }
>
>  /**
> @@ -317,11 +309,8 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>          * dead. This means we can reset the lists without having to fear
>          * corrution.
>          */
> -       INIT_LIST_HEAD(&queue->outgoing);
>         spin_unlock_irq(&queue->list_lock);
>
> -       INIT_LIST_HEAD(&queue->incoming);
> -
>         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
>                 if (queue->fileio.blocks[i]) {
>                         block = queue->fileio.blocks[i];
> @@ -346,7 +335,6 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>                 }
>
>                 block->state = IIO_BLOCK_STATE_QUEUED;
> -               list_add_tail(&block->head, &queue->incoming);
>         }
>
>  out_unlock:
> @@ -401,13 +389,18 @@ int iio_dma_buffer_enable(struct iio_buffer *buffer,
>         struct iio_dev *indio_dev)
>  {
>         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> -       struct iio_dma_buffer_block *block, *_block;
> +       struct iio_dma_buffer_block *block;
> +       unsigned int i;
>
>         mutex_lock(&queue->lock);
>         queue->active = true;
> -       list_for_each_entry_safe(block, _block, &queue->incoming, head) {
> -               list_del(&block->head);
> -               iio_dma_buffer_submit_block(queue, block);
> +       queue->fileio.next_dequeue = 0;
> +
> +       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> +               block = queue->fileio.blocks[i];
> +
> +               if (block->state == IIO_BLOCK_STATE_QUEUED)
> +                       iio_dma_buffer_submit_block(queue, block);
>         }
>         mutex_unlock(&queue->lock);
>
> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>  static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
>         struct iio_dma_buffer_block *block)
>  {
> -       if (block->state == IIO_BLOCK_STATE_DEAD) {
> +       if (block->state == IIO_BLOCK_STATE_DEAD)
>                 iio_buffer_block_put(block);
> -       } else if (queue->active) {
> +       else if (queue->active)
>                 iio_dma_buffer_submit_block(queue, block);
> -       } else {
> +       else
>                 block->state = IIO_BLOCK_STATE_QUEUED;
> -               list_add_tail(&block->head, &queue->incoming);
> -       }
>  }
>
>  static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
>         struct iio_dma_buffer_queue *queue)
>  {
>         struct iio_dma_buffer_block *block;
> +       unsigned int idx;
>
>         spin_lock_irq(&queue->list_lock);
> -       block = list_first_entry_or_null(&queue->outgoing, struct
> -               iio_dma_buffer_block, head);
> -       if (block != NULL) {
> -               list_del(&block->head);
> +
> +       idx = queue->fileio.next_dequeue;
> +       block = queue->fileio.blocks[idx];
> +
> +       if (block->state == IIO_BLOCK_STATE_DONE) {
>                 block->state = IIO_BLOCK_STATE_DEQUEUED;
> +               idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
> +               queue->fileio.next_dequeue = idx;
> +       } else {
> +               block = NULL;
>         }
> +
>         spin_unlock_irq(&queue->list_lock);
>
>         return block;
> @@ -539,6 +537,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
>         struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
>         struct iio_dma_buffer_block *block;
>         size_t data_available = 0;
> +       unsigned int i;
>
>         /*
>          * For counting the available bytes we'll use the size of the block not
> @@ -552,8 +551,15 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
>                 data_available += queue->fileio.active_block->size;
>
>         spin_lock_irq(&queue->list_lock);
> -       list_for_each_entry(block, &queue->outgoing, head)
> -               data_available += block->size;
> +
> +       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> +               block = queue->fileio.blocks[i];
> +
> +               if (block != queue->fileio.active_block
> +                   && block->state == IIO_BLOCK_STATE_DONE)
> +                       data_available += block->size;
> +       }
> +
>         spin_unlock_irq(&queue->list_lock);
>         mutex_unlock(&queue->lock);
>
> @@ -616,9 +622,6 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
>         queue->dev = dev;
>         queue->ops = ops;
>
> -       INIT_LIST_HEAD(&queue->incoming);
> -       INIT_LIST_HEAD(&queue->outgoing);
> -
>         mutex_init(&queue->lock);
>         spin_lock_init(&queue->list_lock);
>
> @@ -645,11 +648,8 @@ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
>                         continue;
>                 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
>         }
> -       INIT_LIST_HEAD(&queue->outgoing);
>         spin_unlock_irq(&queue->list_lock);
>
> -       INIT_LIST_HEAD(&queue->incoming);
> -
>         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
>                 if (!queue->fileio.blocks[i])
>                         continue;
> diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
> index ff15c61bf319..d4ed5ff39d44 100644
> --- a/include/linux/iio/buffer-dma.h
> +++ b/include/linux/iio/buffer-dma.h
> @@ -78,12 +78,15 @@ struct iio_dma_buffer_block {
>   * @active_block: Block being used in read()
>   * @pos: Read offset in the active block
>   * @block_size: Size of each block
> + * @next_dequeue: index of next block that will be dequeued
>   */
>  struct iio_dma_buffer_queue_fileio {
>         struct iio_dma_buffer_block *blocks[2];
>         struct iio_dma_buffer_block *active_block;
>         size_t pos;
>         size_t block_size;
> +
> +       unsigned int next_dequeue;
>  };
>
>  /**
> @@ -97,8 +100,6 @@ struct iio_dma_buffer_queue_fileio {
>   *   atomic context as well as blocks on those lists. This is the outgoing queue
>   *   list and typically also a list of active blocks in the part that handles
>   *   the DMA controller
> - * @incoming: List of buffers on the incoming queue
> - * @outgoing: List of buffers on the outgoing queue
>   * @active: Whether the buffer is currently active
>   * @fileio: FileIO state
>   */
> @@ -109,8 +110,6 @@ struct iio_dma_buffer_queue {
>
>         struct mutex lock;
>         spinlock_t list_lock;
> -       struct list_head incoming;
> -       struct list_head outgoing;
>
>         bool active;
>
> --
> 2.33.0
>
Jonathan Cameron Nov. 21, 2021, 2:05 p.m. UTC | #2
On Mon, 15 Nov 2021 14:19:11 +0000
Paul Cercueil <paul@crapouillou.net> wrote:

> The buffer-dma code was using two queues, incoming and outgoing, to
> manage the state of the blocks in use.
> 
> While this totally works, it adds some complexity to the code,
> especially since the code only manages 2 blocks. It is much easier to
> just check each block's state manually, and keep a counter for the next
> block to dequeue.
> 
> Since the new DMABUF based API wouldn't use these incoming and outgoing
> queues anyway, getting rid of them now makes the upcoming changes
> simpler.
> 
> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
This looks to be a nice little cleanup on it's own.

Let me know if you'd like me to pick this up before the main discussion
comes to any conclusion.

Jonathan

> ---
>  drivers/iio/buffer/industrialio-buffer-dma.c | 68 ++++++++++----------
>  include/linux/iio/buffer-dma.h               |  7 +-
>  2 files changed, 37 insertions(+), 38 deletions(-)
> 
> diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
> index d348af8b9705..abac88f20104 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> @@ -191,16 +191,8 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
>  
>  static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
>  {
> -	struct iio_dma_buffer_queue *queue = block->queue;
> -
> -	/*
> -	 * The buffer has already been freed by the application, just drop the
> -	 * reference.
> -	 */
> -	if (block->state != IIO_BLOCK_STATE_DEAD) {
> +	if (block->state != IIO_BLOCK_STATE_DEAD)
>  		block->state = IIO_BLOCK_STATE_DONE;
> -		list_add_tail(&block->head, &queue->outgoing);
> -	}
>  }
>  
>  /**
> @@ -317,11 +309,8 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>  	 * dead. This means we can reset the lists without having to fear
>  	 * corrution.
>  	 */
> -	INIT_LIST_HEAD(&queue->outgoing);
>  	spin_unlock_irq(&queue->list_lock);
>  
> -	INIT_LIST_HEAD(&queue->incoming);
> -
>  	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
>  		if (queue->fileio.blocks[i]) {
>  			block = queue->fileio.blocks[i];
> @@ -346,7 +335,6 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
>  		}
>  
>  		block->state = IIO_BLOCK_STATE_QUEUED;
> -		list_add_tail(&block->head, &queue->incoming);
>  	}
>  
>  out_unlock:
> @@ -401,13 +389,18 @@ int iio_dma_buffer_enable(struct iio_buffer *buffer,
>  	struct iio_dev *indio_dev)
>  {
>  	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> -	struct iio_dma_buffer_block *block, *_block;
> +	struct iio_dma_buffer_block *block;
> +	unsigned int i;
>  
>  	mutex_lock(&queue->lock);
>  	queue->active = true;
> -	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
> -		list_del(&block->head);
> -		iio_dma_buffer_submit_block(queue, block);
> +	queue->fileio.next_dequeue = 0;
> +
> +	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> +		block = queue->fileio.blocks[i];
> +
> +		if (block->state == IIO_BLOCK_STATE_QUEUED)
> +			iio_dma_buffer_submit_block(queue, block);
>  	}
>  	mutex_unlock(&queue->lock);
>  
> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>  static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
>  	struct iio_dma_buffer_block *block)
>  {
> -	if (block->state == IIO_BLOCK_STATE_DEAD) {
> +	if (block->state == IIO_BLOCK_STATE_DEAD)
>  		iio_buffer_block_put(block);
> -	} else if (queue->active) {
> +	else if (queue->active)
>  		iio_dma_buffer_submit_block(queue, block);
> -	} else {
> +	else
>  		block->state = IIO_BLOCK_STATE_QUEUED;
> -		list_add_tail(&block->head, &queue->incoming);
> -	}
>  }
>  
>  static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
>  	struct iio_dma_buffer_queue *queue)
>  {
>  	struct iio_dma_buffer_block *block;
> +	unsigned int idx;
>  
>  	spin_lock_irq(&queue->list_lock);
> -	block = list_first_entry_or_null(&queue->outgoing, struct
> -		iio_dma_buffer_block, head);
> -	if (block != NULL) {
> -		list_del(&block->head);
> +
> +	idx = queue->fileio.next_dequeue;
> +	block = queue->fileio.blocks[idx];
> +
> +	if (block->state == IIO_BLOCK_STATE_DONE) {
>  		block->state = IIO_BLOCK_STATE_DEQUEUED;
> +		idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
> +		queue->fileio.next_dequeue = idx;
> +	} else {
> +		block = NULL;
>  	}
> +
>  	spin_unlock_irq(&queue->list_lock);
>  
>  	return block;
> @@ -539,6 +537,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
>  	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
>  	struct iio_dma_buffer_block *block;
>  	size_t data_available = 0;
> +	unsigned int i;
>  
>  	/*
>  	 * For counting the available bytes we'll use the size of the block not
> @@ -552,8 +551,15 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
>  		data_available += queue->fileio.active_block->size;
>  
>  	spin_lock_irq(&queue->list_lock);
> -	list_for_each_entry(block, &queue->outgoing, head)
> -		data_available += block->size;
> +
> +	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> +		block = queue->fileio.blocks[i];
> +
> +		if (block != queue->fileio.active_block
> +		    && block->state == IIO_BLOCK_STATE_DONE)
> +			data_available += block->size;
> +	}
> +
>  	spin_unlock_irq(&queue->list_lock);
>  	mutex_unlock(&queue->lock);
>  
> @@ -616,9 +622,6 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
>  	queue->dev = dev;
>  	queue->ops = ops;
>  
> -	INIT_LIST_HEAD(&queue->incoming);
> -	INIT_LIST_HEAD(&queue->outgoing);
> -
>  	mutex_init(&queue->lock);
>  	spin_lock_init(&queue->list_lock);
>  
> @@ -645,11 +648,8 @@ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
>  			continue;
>  		queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
>  	}
> -	INIT_LIST_HEAD(&queue->outgoing);
>  	spin_unlock_irq(&queue->list_lock);
>  
> -	INIT_LIST_HEAD(&queue->incoming);
> -
>  	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
>  		if (!queue->fileio.blocks[i])
>  			continue;
> diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
> index ff15c61bf319..d4ed5ff39d44 100644
> --- a/include/linux/iio/buffer-dma.h
> +++ b/include/linux/iio/buffer-dma.h
> @@ -78,12 +78,15 @@ struct iio_dma_buffer_block {
>   * @active_block: Block being used in read()
>   * @pos: Read offset in the active block
>   * @block_size: Size of each block
> + * @next_dequeue: index of next block that will be dequeued
>   */
>  struct iio_dma_buffer_queue_fileio {
>  	struct iio_dma_buffer_block *blocks[2];
>  	struct iio_dma_buffer_block *active_block;
>  	size_t pos;
>  	size_t block_size;
> +
> +	unsigned int next_dequeue;
>  };
>  
>  /**
> @@ -97,8 +100,6 @@ struct iio_dma_buffer_queue_fileio {
>   *   atomic context as well as blocks on those lists. This is the outgoing queue
>   *   list and typically also a list of active blocks in the part that handles
>   *   the DMA controller
> - * @incoming: List of buffers on the incoming queue
> - * @outgoing: List of buffers on the outgoing queue
>   * @active: Whether the buffer is currently active
>   * @fileio: FileIO state
>   */
> @@ -109,8 +110,6 @@ struct iio_dma_buffer_queue {
>  
>  	struct mutex lock;
>  	spinlock_t list_lock;
> -	struct list_head incoming;
> -	struct list_head outgoing;
>  
>  	bool active;
>
Lars-Peter Clausen Nov. 21, 2021, 4:23 p.m. UTC | #3
On 11/15/21 3:19 PM, Paul Cercueil wrote:
> The buffer-dma code was using two queues, incoming and outgoing, to
> manage the state of the blocks in use.
>
> While this totally works, it adds some complexity to the code,
> especially since the code only manages 2 blocks. It is much easier to
> just check each block's state manually, and keep a counter for the next
> block to dequeue.
>
> Since the new DMABUF based API wouldn't use these incoming and outgoing
> queues anyway, getting rid of them now makes the upcoming changes
> simpler.
>
> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
The outgoing queue is going to be replaced by fences, but I think we 
need to keep the incoming queue.
> [...]
> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>   static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
>   	struct iio_dma_buffer_block *block)
>   {
> -	if (block->state == IIO_BLOCK_STATE_DEAD) {
> +	if (block->state == IIO_BLOCK_STATE_DEAD)
>   		iio_buffer_block_put(block);
> -	} else if (queue->active) {
> +	else if (queue->active)
>   		iio_dma_buffer_submit_block(queue, block);
> -	} else {
> +	else
>   		block->state = IIO_BLOCK_STATE_QUEUED;
> -		list_add_tail(&block->head, &queue->incoming);
If iio_dma_buffer_enqueue() is called with a dmabuf and the buffer is 
not active, it will be marked as queued, but we don't actually keep a 
reference to it anywhere. It will never be submitted to the DMA, and it 
will never be signaled as completed.
Paul Cercueil Nov. 21, 2021, 5:52 p.m. UTC | #4
Hi Lars,

Le dim., nov. 21 2021 at 17:23:35 +0100, Lars-Peter Clausen 
<lars@metafoo.de> a écrit :
> On 11/15/21 3:19 PM, Paul Cercueil wrote:
>> The buffer-dma code was using two queues, incoming and outgoing, to
>> manage the state of the blocks in use.
>> 
>> While this totally works, it adds some complexity to the code,
>> especially since the code only manages 2 blocks. It is much easier to
>> just check each block's state manually, and keep a counter for the 
>> next
>> block to dequeue.
>> 
>> Since the new DMABUF based API wouldn't use these incoming and 
>> outgoing
>> queues anyway, getting rid of them now makes the upcoming changes
>> simpler.
>> 
>> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
> The outgoing queue is going to be replaced by fences, but I think we 
> need to keep the incoming queue.

Blocks are always accessed in sequential order, so we now have a 
"queue->next_dequeue" that cycles between the buffers allocated for 
fileio.

>> [...]
>> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>>   static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue 
>> *queue,
>>   	struct iio_dma_buffer_block *block)
>>   {
>> -	if (block->state == IIO_BLOCK_STATE_DEAD) {
>> +	if (block->state == IIO_BLOCK_STATE_DEAD)
>>   		iio_buffer_block_put(block);
>> -	} else if (queue->active) {
>> +	else if (queue->active)
>>   		iio_dma_buffer_submit_block(queue, block);
>> -	} else {
>> +	else
>>   		block->state = IIO_BLOCK_STATE_QUEUED;
>> -		list_add_tail(&block->head, &queue->incoming);
> If iio_dma_buffer_enqueue() is called with a dmabuf and the buffer is 
> not active, it will be marked as queued, but we don't actually keep a 
> reference to it anywhere. It will never be submitted to the DMA, and 
> it will never be signaled as completed.

We do keep a reference to the buffers, in the queue->fileio.blocks 
array. When the buffer is enabled, all the blocks in that array that 
are in the "queued" state will be submitted to the DMA.

Cheers,
-Paul
Lars-Peter Clausen Nov. 21, 2021, 6:49 p.m. UTC | #5
On 11/21/21 6:52 PM, Paul Cercueil wrote:
> Hi Lars,
>
> Le dim., nov. 21 2021 at 17:23:35 +0100, Lars-Peter Clausen 
> <lars@metafoo.de> a écrit :
>> On 11/15/21 3:19 PM, Paul Cercueil wrote:
>>> The buffer-dma code was using two queues, incoming and outgoing, to
>>> manage the state of the blocks in use.
>>>
>>> While this totally works, it adds some complexity to the code,
>>> especially since the code only manages 2 blocks. It is much easier to
>>> just check each block's state manually, and keep a counter for the next
>>> block to dequeue.
>>>
>>> Since the new DMABUF based API wouldn't use these incoming and outgoing
>>> queues anyway, getting rid of them now makes the upcoming changes
>>> simpler.
>>>
>>> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
>> The outgoing queue is going to be replaced by fences, but I think we 
>> need to keep the incoming queue.
>
> Blocks are always accessed in sequential order, so we now have a 
> "queue->next_dequeue" that cycles between the buffers allocated for 
> fileio.
>
>>> [...]
>>> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>>>   static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue 
>>> *queue,
>>>       struct iio_dma_buffer_block *block)
>>>   {
>>> -    if (block->state == IIO_BLOCK_STATE_DEAD) {
>>> +    if (block->state == IIO_BLOCK_STATE_DEAD)
>>>           iio_buffer_block_put(block);
>>> -    } else if (queue->active) {
>>> +    else if (queue->active)
>>>           iio_dma_buffer_submit_block(queue, block);
>>> -    } else {
>>> +    else
>>>           block->state = IIO_BLOCK_STATE_QUEUED;
>>> -        list_add_tail(&block->head, &queue->incoming);
>> If iio_dma_buffer_enqueue() is called with a dmabuf and the buffer is 
>> not active, it will be marked as queued, but we don't actually keep a 
>> reference to it anywhere. It will never be submitted to the DMA, and 
>> it will never be signaled as completed.
>
> We do keep a reference to the buffers, in the queue->fileio.blocks 
> array. When the buffer is enabled, all the blocks in that array that 
> are in the "queued" state will be submitted to the DMA.
>
But not when used in combination with the DMA buf changes later in this 
series.
Paul Cercueil Nov. 21, 2021, 8:08 p.m. UTC | #6
Le dim., nov. 21 2021 at 19:49:03 +0100, Lars-Peter Clausen 
<lars@metafoo.de> a écrit :
> On 11/21/21 6:52 PM, Paul Cercueil wrote:
>> Hi Lars,
>> 
>> Le dim., nov. 21 2021 at 17:23:35 +0100, Lars-Peter Clausen 
>> <lars@metafoo.de> a écrit :
>>> On 11/15/21 3:19 PM, Paul Cercueil wrote:
>>>> The buffer-dma code was using two queues, incoming and outgoing, to
>>>> manage the state of the blocks in use.
>>>> 
>>>> While this totally works, it adds some complexity to the code,
>>>> especially since the code only manages 2 blocks. It is much easier 
>>>> to
>>>> just check each block's state manually, and keep a counter for the 
>>>> next
>>>> block to dequeue.
>>>> 
>>>> Since the new DMABUF based API wouldn't use these incoming and 
>>>> outgoing
>>>> queues anyway, getting rid of them now makes the upcoming changes
>>>> simpler.
>>>> 
>>>> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
>>> The outgoing queue is going to be replaced by fences, but I think 
>>> we need to keep the incoming queue.
>> 
>> Blocks are always accessed in sequential order, so we now have a 
>> "queue->next_dequeue" that cycles between the buffers allocated for 
>> fileio.
>> 
>>>> [...]
>>>> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>>>>   static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue 
>>>> *queue,
>>>>       struct iio_dma_buffer_block *block)
>>>>   {
>>>> -    if (block->state == IIO_BLOCK_STATE_DEAD) {
>>>> +    if (block->state == IIO_BLOCK_STATE_DEAD)
>>>>           iio_buffer_block_put(block);
>>>> -    } else if (queue->active) {
>>>> +    else if (queue->active)
>>>>           iio_dma_buffer_submit_block(queue, block);
>>>> -    } else {
>>>> +    else
>>>>           block->state = IIO_BLOCK_STATE_QUEUED;
>>>> -        list_add_tail(&block->head, &queue->incoming);
>>> If iio_dma_buffer_enqueue() is called with a dmabuf and the buffer 
>>> is not active, it will be marked as queued, but we don't actually 
>>> keep a reference to it anywhere. It will never be submitted to 
>>> the DMA, and it will never be signaled as completed.
>> 
>> We do keep a reference to the buffers, in the queue->fileio.blocks 
>> array. When the buffer is enabled, all the blocks in that array 
>> that are in the "queued" state will be submitted to the DMA.
>> 
> But not when used in combination with the DMA buf changes later in 
> this series.
> 

That's still the case after the DMABUF changes of the series. Or can 
you point me exactly what you think is broken?

-Paul
Lars-Peter Clausen Nov. 22, 2021, 3:08 p.m. UTC | #7
On 11/21/21 9:08 PM, Paul Cercueil wrote:
>
>
> Le dim., nov. 21 2021 at 19:49:03 +0100, Lars-Peter Clausen 
> <lars@metafoo.de> a écrit :
>> On 11/21/21 6:52 PM, Paul Cercueil wrote:
>>> Hi Lars,
>>>
>>> Le dim., nov. 21 2021 at 17:23:35 +0100, Lars-Peter Clausen 
>>> <lars@metafoo.de> a écrit :
>>>> On 11/15/21 3:19 PM, Paul Cercueil wrote:
>>>>> The buffer-dma code was using two queues, incoming and outgoing, to
>>>>> manage the state of the blocks in use.
>>>>>
>>>>> While this totally works, it adds some complexity to the code,
>>>>> especially since the code only manages 2 blocks. It is much easier to
>>>>> just check each block's state manually, and keep a counter for the 
>>>>> next
>>>>> block to dequeue.
>>>>>
>>>>> Since the new DMABUF based API wouldn't use these incoming and 
>>>>> outgoing
>>>>> queues anyway, getting rid of them now makes the upcoming changes
>>>>> simpler.
>>>>>
>>>>> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
>>>> The outgoing queue is going to be replaced by fences, but I think 
>>>> we need to keep the incoming queue.
>>>
>>> Blocks are always accessed in sequential order, so we now have a 
>>> "queue->next_dequeue" that cycles between the buffers allocated for 
>>> fileio.
>>>
>>>>> [...]
>>>>> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>>>>>   static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue 
>>>>> *queue,
>>>>>       struct iio_dma_buffer_block *block)
>>>>>   {
>>>>> -    if (block->state == IIO_BLOCK_STATE_DEAD) {
>>>>> +    if (block->state == IIO_BLOCK_STATE_DEAD)
>>>>>           iio_buffer_block_put(block);
>>>>> -    } else if (queue->active) {
>>>>> +    else if (queue->active)
>>>>>           iio_dma_buffer_submit_block(queue, block);
>>>>> -    } else {
>>>>> +    else
>>>>>           block->state = IIO_BLOCK_STATE_QUEUED;
>>>>> -        list_add_tail(&block->head, &queue->incoming);
>>>> If iio_dma_buffer_enqueue() is called with a dmabuf and the buffer 
>>>> is not active, it will be marked as queued, but we don't actually 
>>>> keep a reference to it anywhere. It will never be submitted to 
>>>> the DMA, and it will never be signaled as completed.
>>>
>>> We do keep a reference to the buffers, in the queue->fileio.blocks 
>>> array. When the buffer is enabled, all the blocks in that array 
>>> that are in the "queued" state will be submitted to the DMA.
>>>
>> But not when used in combination with the DMA buf changes later in 
>> this series.
>>
>
> That's still the case after the DMABUF changes of the series. Or can 
> you point me exactly what you think is broken?
>
When you allocate a DMABUF with the allocate IOCTL and then submit it 
with the enqueue IOCTL before the buffer is enabled it will end up 
marked as queued, but not actually be queued anywhere.
Paul Cercueil Nov. 22, 2021, 3:16 p.m. UTC | #8
Hi Lars,

Le lun., nov. 22 2021 at 16:08:51 +0100, Lars-Peter Clausen 
<lars@metafoo.de> a écrit :
> On 11/21/21 9:08 PM, Paul Cercueil wrote:
>> 
>> 
>> Le dim., nov. 21 2021 at 19:49:03 +0100, Lars-Peter Clausen 
>> <lars@metafoo.de> a écrit :
>>> On 11/21/21 6:52 PM, Paul Cercueil wrote:
>>>> Hi Lars,
>>>> 
>>>> Le dim., nov. 21 2021 at 17:23:35 +0100, Lars-Peter Clausen 
>>>> <lars@metafoo.de> a écrit :
>>>>> On 11/15/21 3:19 PM, Paul Cercueil wrote:
>>>>>> The buffer-dma code was using two queues, incoming and outgoing, 
>>>>>> to
>>>>>> manage the state of the blocks in use.
>>>>>> 
>>>>>> While this totally works, it adds some complexity to the code,
>>>>>> especially since the code only manages 2 blocks. It is much 
>>>>>> easier to
>>>>>> just check each block's state manually, and keep a counter for 
>>>>>> the next
>>>>>> block to dequeue.
>>>>>> 
>>>>>> Since the new DMABUF based API wouldn't use these incoming and 
>>>>>> outgoing
>>>>>> queues anyway, getting rid of them now makes the upcoming changes
>>>>>> simpler.
>>>>>> 
>>>>>> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
>>>>> The outgoing queue is going to be replaced by fences, but I think 
>>>>> we need to keep the incoming queue.
>>>> 
>>>> Blocks are always accessed in sequential order, so we now have a 
>>>> "queue->next_dequeue" that cycles between the buffers 
>>>> allocated for fileio.
>>>> 
>>>>>> [...]
>>>>>> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>>>>>>   static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue 
>>>>>> *queue,
>>>>>>       struct iio_dma_buffer_block *block)
>>>>>>   {
>>>>>> -    if (block->state == IIO_BLOCK_STATE_DEAD) {
>>>>>> +    if (block->state == IIO_BLOCK_STATE_DEAD)
>>>>>>           iio_buffer_block_put(block);
>>>>>> -    } else if (queue->active) {
>>>>>> +    else if (queue->active)
>>>>>>           iio_dma_buffer_submit_block(queue, block);
>>>>>> -    } else {
>>>>>> +    else
>>>>>>           block->state = IIO_BLOCK_STATE_QUEUED;
>>>>>> -        list_add_tail(&block->head, &queue->incoming);
>>>>> If iio_dma_buffer_enqueue() is called with a dmabuf and the 
>>>>> buffer is not active, it will be marked as queued, but we 
>>>>> don't actually keep a reference to it anywhere. It will 
>>>>> never be submitted to the DMA, and it will never be 
>>>>> signaled as completed.
>>>> 
>>>> We do keep a reference to the buffers, in the queue->fileio.blocks 
>>>> array. When the buffer is enabled, all the blocks in that 
>>>> array that are in the "queued" state will be submitted to the 
>>>> DMA.
>>>> 
>>> But not when used in combination with the DMA buf changes later in 
>>> this series.
>>> 
>> 
>> That's still the case after the DMABUF changes of the series. Or can 
>> you point me exactly what you think is broken?
>> 
> When you allocate a DMABUF with the allocate IOCTL and then submit it 
> with the enqueue IOCTL before the buffer is enabled it will end up 
> marked as queued, but not actually be queued anywhere.
> 

Ok, it works for me because I never enqueue blocks before enabling the 
buffer. I can add a requirement that blocks must be enqueued only after 
the buffer is enabled.

Cheers,
-Paul
Lars-Peter Clausen Nov. 22, 2021, 3:17 p.m. UTC | #9
On 11/22/21 4:16 PM, Paul Cercueil wrote:
> Hi Lars,
>
> Le lun., nov. 22 2021 at 16:08:51 +0100, Lars-Peter Clausen 
> <lars@metafoo.de> a écrit :
>> On 11/21/21 9:08 PM, Paul Cercueil wrote:
>>>
>>>
>>> Le dim., nov. 21 2021 at 19:49:03 +0100, Lars-Peter Clausen 
>>> <lars@metafoo.de> a écrit :
>>>> On 11/21/21 6:52 PM, Paul Cercueil wrote:
>>>>> Hi Lars,
>>>>>
>>>>> Le dim., nov. 21 2021 at 17:23:35 +0100, Lars-Peter Clausen 
>>>>> <lars@metafoo.de> a écrit :
>>>>>> On 11/15/21 3:19 PM, Paul Cercueil wrote:
>>>>>>> The buffer-dma code was using two queues, incoming and outgoing, to
>>>>>>> manage the state of the blocks in use.
>>>>>>>
>>>>>>> While this totally works, it adds some complexity to the code,
>>>>>>> especially since the code only manages 2 blocks. It is much 
>>>>>>> easier to
>>>>>>> just check each block's state manually, and keep a counter for 
>>>>>>> the next
>>>>>>> block to dequeue.
>>>>>>>
>>>>>>> Since the new DMABUF based API wouldn't use these incoming and 
>>>>>>> outgoing
>>>>>>> queues anyway, getting rid of them now makes the upcoming changes
>>>>>>> simpler.
>>>>>>>
>>>>>>> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
>>>>>> The outgoing queue is going to be replaced by fences, but I think 
>>>>>> we need to keep the incoming queue.
>>>>>
>>>>> Blocks are always accessed in sequential order, so we now have a 
>>>>> "queue->next_dequeue" that cycles between the buffers 
>>>>> allocated for fileio.
>>>>>
>>>>>>> [...]
>>>>>>> @@ -442,28 +435,33 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>>>>>>>   static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue 
>>>>>>> *queue,
>>>>>>>       struct iio_dma_buffer_block *block)
>>>>>>>   {
>>>>>>> -    if (block->state == IIO_BLOCK_STATE_DEAD) {
>>>>>>> +    if (block->state == IIO_BLOCK_STATE_DEAD)
>>>>>>>           iio_buffer_block_put(block);
>>>>>>> -    } else if (queue->active) {
>>>>>>> +    else if (queue->active)
>>>>>>>           iio_dma_buffer_submit_block(queue, block);
>>>>>>> -    } else {
>>>>>>> +    else
>>>>>>>           block->state = IIO_BLOCK_STATE_QUEUED;
>>>>>>> -        list_add_tail(&block->head, &queue->incoming);
>>>>>> If iio_dma_buffer_enqueue() is called with a dmabuf and the 
>>>>>> buffer is not active, it will be marked as queued, but we 
>>>>>> don't actually keep a reference to it anywhere. It will 
>>>>>> never be submitted to the DMA, and it will never be 
>>>>>> signaled as completed.
>>>>>
>>>>> We do keep a reference to the buffers, in the queue->fileio.blocks 
>>>>> array. When the buffer is enabled, all the blocks in that 
>>>>> array that are in the "queued" state will be submitted to the 
>>>>> DMA.
>>>>>
>>>> But not when used in combination with the DMA buf changes later in 
>>>> this series.
>>>>
>>>
>>> That's still the case after the DMABUF changes of the series. Or can 
>>> you point me exactly what you think is broken?
>>>
>> When you allocate a DMABUF with the allocate IOCTL and then submit it 
>> with the enqueue IOCTL before the buffer is enabled it will end up 
>> marked as queued, but not actually be queued anywhere.
>>
>
> Ok, it works for me because I never enqueue blocks before enabling the 
> buffer. I can add a requirement that blocks must be enqueued only 
> after the buffer is enabled.

I don't think that is a good idea. This way you are going to potentially 
drop data at the begining of your stream when the DMA isn't ready yet.
Paul Cercueil Nov. 22, 2021, 3:27 p.m. UTC | #10
Le lun., nov. 22 2021 at 16:17:59 +0100, Lars-Peter Clausen 
<lars@metafoo.de> a écrit :
> On 11/22/21 4:16 PM, Paul Cercueil wrote:
>> Hi Lars,
>> 
>> Le lun., nov. 22 2021 at 16:08:51 +0100, Lars-Peter Clausen 
>> <lars@metafoo.de> a écrit :
>>> On 11/21/21 9:08 PM, Paul Cercueil wrote:
>>>> 
>>>> 
>>>> Le dim., nov. 21 2021 at 19:49:03 +0100, Lars-Peter Clausen 
>>>> <lars@metafoo.de> a écrit :
>>>>> On 11/21/21 6:52 PM, Paul Cercueil wrote:
>>>>>> Hi Lars,
>>>>>> 
>>>>>> Le dim., nov. 21 2021 at 17:23:35 +0100, Lars-Peter Clausen 
>>>>>> <lars@metafoo.de> a écrit :
>>>>>>> On 11/15/21 3:19 PM, Paul Cercueil wrote:
>>>>>>>> The buffer-dma code was using two queues, incoming and 
>>>>>>>> outgoing, to
>>>>>>>> manage the state of the blocks in use.
>>>>>>>> 
>>>>>>>> While this totally works, it adds some complexity to the code,
>>>>>>>> especially since the code only manages 2 blocks. It is much 
>>>>>>>> easier to
>>>>>>>> just check each block's state manually, and keep a counter for 
>>>>>>>> the next
>>>>>>>> block to dequeue.
>>>>>>>> 
>>>>>>>> Since the new DMABUF based API wouldn't use these incoming and 
>>>>>>>> outgoing
>>>>>>>> queues anyway, getting rid of them now makes the upcoming 
>>>>>>>> changes
>>>>>>>> simpler.
>>>>>>>> 
>>>>>>>> Signed-off-by: Paul Cercueil <paul@crapouillou.net>
>>>>>>> The outgoing queue is going to be replaced by fences, but I 
>>>>>>> think we need to keep the incoming queue.
>>>>>> 
>>>>>> Blocks are always accessed in sequential order, so we now have a 
>>>>>> "queue->next_dequeue" that cycles between the buffers 
>>>>>> allocated for fileio.
>>>>>> 
>>>>>>>> [...]
>>>>>>>> @@ -442,28 +435,33 @@ 
>>>>>>>> EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
>>>>>>>>   static void iio_dma_buffer_enqueue(struct 
>>>>>>>> iio_dma_buffer_queue *queue,
>>>>>>>>       struct iio_dma_buffer_block *block)
>>>>>>>>   {
>>>>>>>> -    if (block->state == IIO_BLOCK_STATE_DEAD) {
>>>>>>>> +    if (block->state == IIO_BLOCK_STATE_DEAD)
>>>>>>>>           iio_buffer_block_put(block);
>>>>>>>> -    } else if (queue->active) {
>>>>>>>> +    else if (queue->active)
>>>>>>>>           iio_dma_buffer_submit_block(queue, block);
>>>>>>>> -    } else {
>>>>>>>> +    else
>>>>>>>>           block->state = IIO_BLOCK_STATE_QUEUED;
>>>>>>>> -        list_add_tail(&block->head, &queue->incoming);
>>>>>>> If iio_dma_buffer_enqueue() is called with a dmabuf and the 
>>>>>>> buffer is not active, it will be marked as queued, 
>>>>>>> but we don't actually keep a reference to it 
>>>>>>> anywhere. It will never be submitted to the DMA, and 
>>>>>>> it will never be signaled as completed.
>>>>>> 
>>>>>> We do keep a reference to the buffers, in the 
>>>>>> queue->fileio.blocks array. When the buffer is enabled, 
>>>>>> all the blocks in that array that are in the "queued" 
>>>>>> state will be submitted to the DMA.
>>>>>> 
>>>>> But not when used in combination with the DMA buf changes later 
>>>>> in this series.
>>>>> 
>>>> 
>>>> That's still the case after the DMABUF changes of the series. Or 
>>>> can you point me exactly what you think is broken?
>>>> 
>>> When you allocate a DMABUF with the allocate IOCTL and then submit 
>>> it with the enqueue IOCTL before the buffer is enabled it will 
>>> end up marked as queued, but not actually be queued anywhere.
>>> 
>> 
>> Ok, it works for me because I never enqueue blocks before enabling 
>> the buffer. I can add a requirement that blocks must be enqueued 
>> only after the buffer is enabled.
> 
> I don't think that is a good idea. This way you are going to 
> potentially drop data at the begining of your stream when the DMA 
> isn't ready yet.
> 

You wouldn't drop data, but it could cause an underrun, yes. Is it such 
a big deal, knowing that the buffer was just enabled? I don't think you 
can disable then enable the buffer without causing a discontinuity in 
the stream.

-Paul
diff mbox series

Patch

diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index d348af8b9705..abac88f20104 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -191,16 +191,8 @@  static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
 
 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 {
-	struct iio_dma_buffer_queue *queue = block->queue;
-
-	/*
-	 * The buffer has already been freed by the application, just drop the
-	 * reference.
-	 */
-	if (block->state != IIO_BLOCK_STATE_DEAD) {
+	if (block->state != IIO_BLOCK_STATE_DEAD)
 		block->state = IIO_BLOCK_STATE_DONE;
-		list_add_tail(&block->head, &queue->outgoing);
-	}
 }
 
 /**
@@ -317,11 +309,8 @@  int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 	 * dead. This means we can reset the lists without having to fear
 	 * corrution.
 	 */
-	INIT_LIST_HEAD(&queue->outgoing);
 	spin_unlock_irq(&queue->list_lock);
 
-	INIT_LIST_HEAD(&queue->incoming);
-
 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 		if (queue->fileio.blocks[i]) {
 			block = queue->fileio.blocks[i];
@@ -346,7 +335,6 @@  int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 		}
 
 		block->state = IIO_BLOCK_STATE_QUEUED;
-		list_add_tail(&block->head, &queue->incoming);
 	}
 
 out_unlock:
@@ -401,13 +389,18 @@  int iio_dma_buffer_enable(struct iio_buffer *buffer,
 	struct iio_dev *indio_dev)
 {
 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
-	struct iio_dma_buffer_block *block, *_block;
+	struct iio_dma_buffer_block *block;
+	unsigned int i;
 
 	mutex_lock(&queue->lock);
 	queue->active = true;
-	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
-		list_del(&block->head);
-		iio_dma_buffer_submit_block(queue, block);
+	queue->fileio.next_dequeue = 0;
+
+	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+		block = queue->fileio.blocks[i];
+
+		if (block->state == IIO_BLOCK_STATE_QUEUED)
+			iio_dma_buffer_submit_block(queue, block);
 	}
 	mutex_unlock(&queue->lock);
 
@@ -442,28 +435,33 @@  EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
 	struct iio_dma_buffer_block *block)
 {
-	if (block->state == IIO_BLOCK_STATE_DEAD) {
+	if (block->state == IIO_BLOCK_STATE_DEAD)
 		iio_buffer_block_put(block);
-	} else if (queue->active) {
+	else if (queue->active)
 		iio_dma_buffer_submit_block(queue, block);
-	} else {
+	else
 		block->state = IIO_BLOCK_STATE_QUEUED;
-		list_add_tail(&block->head, &queue->incoming);
-	}
 }
 
 static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
 	struct iio_dma_buffer_queue *queue)
 {
 	struct iio_dma_buffer_block *block;
+	unsigned int idx;
 
 	spin_lock_irq(&queue->list_lock);
-	block = list_first_entry_or_null(&queue->outgoing, struct
-		iio_dma_buffer_block, head);
-	if (block != NULL) {
-		list_del(&block->head);
+
+	idx = queue->fileio.next_dequeue;
+	block = queue->fileio.blocks[idx];
+
+	if (block->state == IIO_BLOCK_STATE_DONE) {
 		block->state = IIO_BLOCK_STATE_DEQUEUED;
+		idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
+		queue->fileio.next_dequeue = idx;
+	} else {
+		block = NULL;
 	}
+
 	spin_unlock_irq(&queue->list_lock);
 
 	return block;
@@ -539,6 +537,7 @@  size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
 	struct iio_dma_buffer_block *block;
 	size_t data_available = 0;
+	unsigned int i;
 
 	/*
 	 * For counting the available bytes we'll use the size of the block not
@@ -552,8 +551,15 @@  size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
 		data_available += queue->fileio.active_block->size;
 
 	spin_lock_irq(&queue->list_lock);
-	list_for_each_entry(block, &queue->outgoing, head)
-		data_available += block->size;
+
+	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+		block = queue->fileio.blocks[i];
+
+		if (block != queue->fileio.active_block
+		    && block->state == IIO_BLOCK_STATE_DONE)
+			data_available += block->size;
+	}
+
 	spin_unlock_irq(&queue->list_lock);
 	mutex_unlock(&queue->lock);
 
@@ -616,9 +622,6 @@  int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
 	queue->dev = dev;
 	queue->ops = ops;
 
-	INIT_LIST_HEAD(&queue->incoming);
-	INIT_LIST_HEAD(&queue->outgoing);
-
 	mutex_init(&queue->lock);
 	spin_lock_init(&queue->list_lock);
 
@@ -645,11 +648,8 @@  void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
 			continue;
 		queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
 	}
-	INIT_LIST_HEAD(&queue->outgoing);
 	spin_unlock_irq(&queue->list_lock);
 
-	INIT_LIST_HEAD(&queue->incoming);
-
 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
 		if (!queue->fileio.blocks[i])
 			continue;
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index ff15c61bf319..d4ed5ff39d44 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -78,12 +78,15 @@  struct iio_dma_buffer_block {
  * @active_block: Block being used in read()
  * @pos: Read offset in the active block
  * @block_size: Size of each block
+ * @next_dequeue: index of next block that will be dequeued
  */
 struct iio_dma_buffer_queue_fileio {
 	struct iio_dma_buffer_block *blocks[2];
 	struct iio_dma_buffer_block *active_block;
 	size_t pos;
 	size_t block_size;
+
+	unsigned int next_dequeue;
 };
 
 /**
@@ -97,8 +100,6 @@  struct iio_dma_buffer_queue_fileio {
  *   atomic context as well as blocks on those lists. This is the outgoing queue
  *   list and typically also a list of active blocks in the part that handles
  *   the DMA controller
- * @incoming: List of buffers on the incoming queue
- * @outgoing: List of buffers on the outgoing queue
  * @active: Whether the buffer is currently active
  * @fileio: FileIO state
  */
@@ -109,8 +110,6 @@  struct iio_dma_buffer_queue {
 
 	struct mutex lock;
 	spinlock_t list_lock;
-	struct list_head incoming;
-	struct list_head outgoing;
 
 	bool active;