diff mbox series

[V2,5/5] loop: add hint for handling aio via IOCB_NOWAIT

Message ID 20250314021148.3081954-6-ming.lei@redhat.com (mailing list archive)
State New
Headers show
Series loop: improve loop aio perf by IOCB_NOWAIT | expand

Commit Message

Ming Lei March 14, 2025, 2:11 a.m. UTC
Add hint for using IOCB_NOWAIT to handle loop aio command for avoiding
to cause write(especially randwrite) perf regression on sparse file.

Try IOCB_NOWAIT in the following situations:

- backing file is block device

- READ aio command

- there isn't queued aio non-NOWAIT WRITE, since retry of NOWAIT won't
cause contention on WRITE and non-NOWAIT WRITE often implies exclusive
lock.

With this simple policy, perf regression of randwrte/write on sparse
backing file is fixed. Meantime this way addresses perf problem[1] in
case of stable FS block mapping via NOWAIT.

Link: https://lore.kernel.org/dm-devel/7d6ae2c9-df8e-50d0-7ad6-b787cb3cfab4@redhat.com/
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 drivers/block/loop.c | 61 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 61 insertions(+)

Comments

hch March 20, 2025, 7:22 a.m. UTC | #1
On Fri, Mar 14, 2025 at 10:11:45AM +0800, Ming Lei wrote:
> Add hint for using IOCB_NOWAIT to handle loop aio command for avoiding
> to cause write(especially randwrite) perf regression on sparse file.
> 
> Try IOCB_NOWAIT in the following situations:
> 
> - backing file is block device

Why limit yourself to block devices?

> - READ aio command
> - there isn't queued aio non-NOWAIT WRITE, since retry of NOWAIT won't
> cause contention on WRITE and non-NOWAIT WRITE often implies exclusive
> lock.

This reads really odd because to me the list implies that you only
support reads, but the code also supports writes.  Maybe try to
explain this more clearly.

> With this simple policy, perf regression of randwrte/write on sparse
> backing file is fixed. Meantime this way addresses perf problem[1] in
> case of stable FS block mapping via NOWAIT.

This needs to go in with the patch implementing the logic.

> @@ -70,6 +70,7 @@ struct loop_device {
>  	struct rb_root          worker_tree;
>  	struct timer_list       timer;
>  	bool			sysfs_inited;
> +	unsigned 		queued_wait_write;

lo_nr_blocking_writes?

What serializes access to this variable?

> +static inline bool lo_aio_need_try_nowait(struct loop_device *lo,
> +		struct loop_cmd *cmd)

Drop the need_ in the name, it not only is superfluous, but also
makes it really hard to read the function name.

Also the inline looks spurious.

> +LOOP_ATTR_RO(nr_wait_write);

nr_blocking_writes?

> +static inline void loop_inc_wait_write(struct loop_device *lo, struct loop_cmd *cmd)

Overly long line.

> +	if (cmd->use_aio){

missing whitespace.

> +		struct request *rq = blk_mq_rq_from_pdu(cmd);
> +
> +		if (req_op(rq) == REQ_OP_WRITE)
> +			lo->queued_wait_write += 1;


	if (cmd->use_aio && req_op(blk_mq_rq_from_pdu(cmd)) == REQ_OP_WRITE)
			lo->queued_wait_write++;

> +	}
> +}
> +
> +static inline void loop_dec_wait_write(struct loop_device *lo, struct loop_cmd *cmd)
> +{
> +	lockdep_assert_held(&lo->lo_mutex);
> +
> +	if (cmd->use_aio){
> +		struct request *rq = blk_mq_rq_from_pdu(cmd);
> +
> +		if (req_op(rq) == REQ_OP_WRITE)
> +			lo->queued_wait_write -= 1;
> +	}
> +}

All the things above apply here as well.
Ming Lei March 20, 2025, 7:38 a.m. UTC | #2
On Thu, Mar 20, 2025 at 08:22:47AM +0100, Christoph Hellwig wrote:
> On Fri, Mar 14, 2025 at 10:11:45AM +0800, Ming Lei wrote:
> > Add hint for using IOCB_NOWAIT to handle loop aio command for avoiding
> > to cause write(especially randwrite) perf regression on sparse file.
> > 
> > Try IOCB_NOWAIT in the following situations:
> > 
> > - backing file is block device
> 
> Why limit yourself to block devices?

It doesn't limit to block device, just submit NOWAIT unconditionally.

I should have added 'OR' among the three lines.

> 
> > - READ aio command
> > - there isn't queued aio non-NOWAIT WRITE, since retry of NOWAIT won't
> > cause contention on WRITE and non-NOWAIT WRITE often implies exclusive
> > lock.
> 
> This reads really odd because to me the list implies that you only
> support reads, but the code also supports writes.  Maybe try to
> explain this more clearly.

Will improve the comment log.

> 
> > With this simple policy, perf regression of randwrte/write on sparse
> > backing file is fixed. Meantime this way addresses perf problem[1] in
> > case of stable FS block mapping via NOWAIT.
> 
> This needs to go in with the patch implementing the logic.

OK.

> 
> > @@ -70,6 +70,7 @@ struct loop_device {
> >  	struct rb_root          worker_tree;
> >  	struct timer_list       timer;
> >  	bool			sysfs_inited;
> > +	unsigned 		queued_wait_write;
> 
> lo_nr_blocking_writes?
> 
> What serializes access to this variable?

The write is serialized by the loop spin lock, and the read is done
via READ_ONCE(), since it is just a hint.

> 
> > +static inline bool lo_aio_need_try_nowait(struct loop_device *lo,
> > +		struct loop_cmd *cmd)
> 
> Drop the need_ in the name, it not only is superfluous, but also
> makes it really hard to read the function name.

OK.


Thanks, 
Ming
diff mbox series

Patch

diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 542b1fe938a7..5bf14549cf8e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -70,6 +70,7 @@  struct loop_device {
 	struct rb_root          worker_tree;
 	struct timer_list       timer;
 	bool			sysfs_inited;
+	unsigned 		queued_wait_write;
 
 	struct request_queue	*lo_queue;
 	struct blk_mq_tag_set	tag_set;
@@ -522,6 +523,30 @@  static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd)
 	return 0;
 }
 
+static inline bool lo_aio_need_try_nowait(struct loop_device *lo,
+		struct loop_cmd *cmd)
+{
+	struct file *file = lo->lo_backing_file;
+	struct inode *inode = file->f_mapping->host;
+	struct request *rq = blk_mq_rq_from_pdu(cmd);
+
+	/* NOWAIT works fine for backing block device */
+	if (S_ISBLK(inode->i_mode))
+		return true;
+
+	/* NOWAIT is supposed to be fine for READ */
+	if (req_op(rq) == REQ_OP_READ)
+		return true;
+
+	/*
+	 * If there is any queued non-NOWAIT async WRITE , don't try new
+	 * NOWAIT WRITE for avoiding contention
+	 *
+	 * Here we focus on handling stable FS block mapping via NOWAIT
+	 */
+	return READ_ONCE(lo->queued_wait_write) == 0;
+}
+
 static blk_status_t lo_rw_aio_nowait(struct loop_device *lo,
 		struct loop_cmd *cmd)
 {
@@ -531,6 +556,9 @@  static blk_status_t lo_rw_aio_nowait(struct loop_device *lo,
 	if (unlikely(ret < 0))
 		return BLK_STS_IOERR;
 
+	if (!lo_aio_need_try_nowait(lo, cmd))
+		return BLK_STS_AGAIN;
+
 	cmd->iocb.ki_flags |= IOCB_NOWAIT;
 	ret = lo_submit_rw_aio(lo, cmd, nr_bvec);
 	if (ret == -EAGAIN)
@@ -821,12 +849,18 @@  static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
 	return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
 }
 
+static ssize_t loop_attr_nr_wait_write_show(struct loop_device *lo, char *buf)
+{
+	return sysfs_emit(buf, "%u\n", lo->queued_wait_write);
+}
+
 LOOP_ATTR_RO(backing_file);
 LOOP_ATTR_RO(offset);
 LOOP_ATTR_RO(sizelimit);
 LOOP_ATTR_RO(autoclear);
 LOOP_ATTR_RO(partscan);
 LOOP_ATTR_RO(dio);
+LOOP_ATTR_RO(nr_wait_write);
 
 static struct attribute *loop_attrs[] = {
 	&loop_attr_backing_file.attr,
@@ -835,6 +869,7 @@  static struct attribute *loop_attrs[] = {
 	&loop_attr_autoclear.attr,
 	&loop_attr_partscan.attr,
 	&loop_attr_dio.attr,
+	&loop_attr_nr_wait_write.attr,
 	NULL,
 };
 
@@ -910,6 +945,30 @@  static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
 }
 #endif
 
+static inline void loop_inc_wait_write(struct loop_device *lo, struct loop_cmd *cmd)
+{
+	lockdep_assert_held(&lo->lo_mutex);
+
+	if (cmd->use_aio){
+		struct request *rq = blk_mq_rq_from_pdu(cmd);
+
+		if (req_op(rq) == REQ_OP_WRITE)
+			lo->queued_wait_write += 1;
+	}
+}
+
+static inline void loop_dec_wait_write(struct loop_device *lo, struct loop_cmd *cmd)
+{
+	lockdep_assert_held(&lo->lo_mutex);
+
+	if (cmd->use_aio){
+		struct request *rq = blk_mq_rq_from_pdu(cmd);
+
+		if (req_op(rq) == REQ_OP_WRITE)
+			lo->queued_wait_write -= 1;
+	}
+}
+
 static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
 {
 	struct request __maybe_unused *rq = blk_mq_rq_from_pdu(cmd);
@@ -992,6 +1051,7 @@  static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
 		work = &lo->rootcg_work;
 		cmd_list = &lo->rootcg_cmd_list;
 	}
+	loop_inc_wait_write(lo, cmd);
 	list_add_tail(&cmd->list_entry, cmd_list);
 	queue_work(lo->workqueue, work);
 	spin_unlock_irq(&lo->lo_work_lock);
@@ -2051,6 +2111,7 @@  static void loop_process_work(struct loop_worker *worker,
 		cond_resched();
 
 		spin_lock_irq(&lo->lo_work_lock);
+		loop_dec_wait_write(lo, cmd);
 	}
 
 	/*