diff mbox series

[1/2] block: Call .limit_depth() after .hctx has been set

Message ID 20240403212354.523925-2-bvanassche@acm.org (mailing list archive)
State New
Headers show
Series Fix the mq-deadline async_depth implementation | expand

Commit Message

Bart Van Assche April 3, 2024, 9:23 p.m. UTC
Call .limit_depth() after data->hctx has been set such that data->hctx can
be used in .limit_depth() implementations.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: Zhiguo Niu <zhiguo.niu@unisoc.com>
Fixes: 07757588e507 ("block/mq-deadline: Reserve 25% of scheduler tags for synchronous requests")
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/blk-mq.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

Comments

Christoph Hellwig April 5, 2024, 8:46 a.m. UTC | #1
Calling limit_depth with the hctx set make sense, but the way it's done
looks odd.  Why not something like this?

diff --git a/block/blk-mq.c b/block/blk-mq.c
index b8dbfed8b28be1..88886fd93b1a9c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -448,6 +448,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 	if (data->cmd_flags & REQ_NOWAIT)
 		data->flags |= BLK_MQ_REQ_NOWAIT;
 
+retry:
+	data->ctx = blk_mq_get_ctx(q);
+	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+
 	if (q->elevator) {
 		/*
 		 * All requests use scheduler tags when an I/O scheduler is
@@ -469,13 +473,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 			if (ops->limit_depth)
 				ops->limit_depth(data->cmd_flags, data);
 		}
-	}
-
-retry:
-	data->ctx = blk_mq_get_ctx(q);
-	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
-	if (!(data->rq_flags & RQF_SCHED_TAGS))
+	} else {
 		blk_mq_tag_busy(data->hctx);
+	}
 
 	if (data->flags & BLK_MQ_REQ_RESERVED)
 		data->rq_flags |= RQF_RESV;
Bart Van Assche April 5, 2024, 8:05 p.m. UTC | #2
On 4/5/24 01:46, Christoph Hellwig wrote:
> Calling limit_depth with the hctx set make sense, but the way it's done
> looks odd.  Why not something like this?
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index b8dbfed8b28be1..88886fd93b1a9c 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -448,6 +448,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
>   	if (data->cmd_flags & REQ_NOWAIT)
>   		data->flags |= BLK_MQ_REQ_NOWAIT;
>   
> +retry:
> +	data->ctx = blk_mq_get_ctx(q);
> +	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
> +
>   	if (q->elevator) {
>   		/*
>   		 * All requests use scheduler tags when an I/O scheduler is
> @@ -469,13 +473,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
>   			if (ops->limit_depth)
>   				ops->limit_depth(data->cmd_flags, data);
>   		}
> -	}
> -
> -retry:
> -	data->ctx = blk_mq_get_ctx(q);
> -	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
> -	if (!(data->rq_flags & RQF_SCHED_TAGS))
> +	} else {
>   		blk_mq_tag_busy(data->hctx);
> +	}
>   
>   	if (data->flags & BLK_MQ_REQ_RESERVED)
>   		data->rq_flags |= RQF_RESV;

Hi Christoph,

The above patch looks good to me and I'm fine with replacing patch 1/2
with the above patch. Do you want me to add your Signed-off-by to the
above patch?

Thanks,

Bart.
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 34060d885c5a..bcaa722896a0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -434,6 +434,7 @@  __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
 
 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 {
+	void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *) = NULL;
 	struct request_queue *q = data->q;
 	u64 alloc_time_ns = 0;
 	struct request *rq;
@@ -459,13 +460,11 @@  static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 		 */
 		if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
 		    !blk_op_is_passthrough(data->cmd_flags)) {
-			struct elevator_mq_ops *ops = &q->elevator->type->ops;
+			limit_depth = q->elevator->type->ops.limit_depth;
 
 			WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED);
 
 			data->rq_flags |= RQF_USE_SCHED;
-			if (ops->limit_depth)
-				ops->limit_depth(data->cmd_flags, data);
 		}
 	}
 
@@ -478,6 +477,9 @@  static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
 	if (data->flags & BLK_MQ_REQ_RESERVED)
 		data->rq_flags |= RQF_RESV;
 
+	if (limit_depth)
+		limit_depth(data->cmd_flags, data);
+
 	/*
 	 * Try batched alloc if we want more than 1 tag.
 	 */