diff mbox series

[v16,12/26] block: Rework request allocation in blk_mq_submit_bio()

Message ID 20241119002815.600608-13-bvanassche@acm.org (mailing list archive)
State New
Headers show
Series Improve write performance for zoned UFS devices | expand

Commit Message

Bart Van Assche Nov. 19, 2024, 12:28 a.m. UTC
Prepare for allocating a request from a specific hctx by making
blk_mq_submit_bio() allocate a request later.

The performance impact of this patch on the hot path is small: if a
request is cached, one percpu_ref_get(&q->q_usage_counter) call and one
percpu_ref_put(&q->q_usage_counter) call are added to the hot path.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/blk-mq.c | 36 +++++++++++++-----------------------
 1 file changed, 13 insertions(+), 23 deletions(-)

Comments

Damien Le Moal Nov. 19, 2024, 7:44 a.m. UTC | #1
On 11/19/24 09:28, Bart Van Assche wrote:
> Prepare for allocating a request from a specific hctx by making
> blk_mq_submit_bio() allocate a request later.
> 
> The performance impact of this patch on the hot path is small: if a
> request is cached, one percpu_ref_get(&q->q_usage_counter) call and one
> percpu_ref_put(&q->q_usage_counter) call are added to the hot path.
> 
> Signed-off-by: Bart Van Assche <bvanassche@acm.org>

The patch looks OK, but I cannot weight on if this is really a "small" impact on
performance. Jens may want to run his perf setup with this to verify.
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 56a6b5bef39f..5ff124fbc17c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3071,11 +3071,6 @@  void blk_mq_submit_bio(struct bio *bio)
 	struct request *rq;
 	blk_status_t ret;
 
-	/*
-	 * If the plug has a cached request for this queue, try to use it.
-	 */
-	rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
-
 	/*
 	 * A BIO that was released from a zone write plug has already been
 	 * through the preparation in this function, already holds a reference
@@ -3084,26 +3079,19 @@  void blk_mq_submit_bio(struct bio *bio)
 	 */
 	if (bio_zone_write_plugging(bio)) {
 		nr_segs = bio->__bi_nr_segments;
-		if (rq)
-			blk_queue_exit(q);
 		goto new_request;
 	}
 
 	bio = blk_queue_bounce(bio, q);
 
 	/*
-	 * The cached request already holds a q_usage_counter reference and we
-	 * don't have to acquire a new one if we use it.
+	 * Increment the queue usage counter before performing any checks
+	 * based on queue properties that may change if the queue is frozen,
+	 * e.g. the logical block size.
 	 */
-	if (!rq) {
-		if (unlikely(bio_queue_enter(bio)))
-			return;
-	}
+	if (unlikely(bio_queue_enter(bio)))
+		return;
 
-	/*
-	 * Device reconfiguration may change logical block size, so alignment
-	 * check has to be done with queue usage counter held
-	 */
 	if (unlikely(bio_unaligned(bio, q))) {
 		bio_io_error(bio);
 		goto queue_exit;
@@ -3123,8 +3111,15 @@  void blk_mq_submit_bio(struct bio *bio)
 		goto queue_exit;
 
 new_request:
+	rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
 	if (rq) {
 		blk_mq_use_cached_rq(rq, plug, bio);
+		/*
+		 * Here we hold two references: one because of the
+		 * bio_queue_enter() call and a second one as the result of
+		 * request allocation. Drop one.
+		 */
+		blk_queue_exit(q);
 	} else {
 		rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
 		if (unlikely(!rq))
@@ -3167,12 +3162,7 @@  void blk_mq_submit_bio(struct bio *bio)
 	return;
 
 queue_exit:
-	/*
-	 * Don't drop the queue reference if we were trying to use a cached
-	 * request and thus didn't acquire one.
-	 */
-	if (!rq)
-		blk_queue_exit(q);
+	blk_queue_exit(q);
 }
 
 #ifdef CONFIG_BLK_MQ_STACKING