diff mbox series

[1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio

Message ID 20211123160443.1315598-2-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio | expand

Commit Message

Christoph Hellwig Nov. 23, 2021, 4:04 p.m. UTC
blk_mq_submit_bio has two different plug cases, one that uses full
plugging and a limited plugging one.

The limited plugging case is only used for a corner case that does
not matter in real life:

 - no ->commit_rqs (so not NVMe)
 - no shared tags (so not SCSI)
 - not rotational (so no old disk or floppy driver)
 - must have multiple queues (so no eMMC)

Remove the limited merging case and all the related junk to simplify
blk_mq_submit_bio and the functions called from it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-merge.c |  9 +------
 block/blk-mq.c    | 68 +++++++++--------------------------------------
 block/blk.h       |  2 +-
 3 files changed, 15 insertions(+), 64 deletions(-)

Comments

Jens Axboe Nov. 23, 2021, 7:23 p.m. UTC | #1
On Tue, 23 Nov 2021 17:04:41 +0100, Christoph Hellwig wrote:
> blk_mq_submit_bio has two different plug cases, one that uses full
> plugging and a limited plugging one.
> 
> The limited plugging case is only used for a corner case that does
> not matter in real life:
> 
>  - no ->commit_rqs (so not NVMe)
>  - no shared tags (so not SCSI)
>  - not rotational (so no old disk or floppy driver)
>  - must have multiple queues (so no eMMC)
> 
> [...]

Applied, thanks!

[1/3] blk-mq: simplify the plug handling in blk_mq_submit_bio
      commit: bb5b684ffe6deb797ed36b2b323f747a5f7d1a2c
[2/3] blk-mq: move more plug handling from blk_mq_submit_bio into blk_add_rq_to_plug
      commit: da7bdd66a69b14d13ff8f9064efc524081e64335
[3/3] blk-mq: cleanup request allocation
      (no commit info)

Best regards,
diff mbox series

Patch

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 893c1a60b701f..ba761c3f482ba 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -1067,7 +1067,6 @@  static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
  * @q: request_queue new bio is being queued at
  * @bio: new bio being queued
  * @nr_segs: number of segments in @bio
- * @same_queue_rq: output value, will be true if there's an existing request
  * from the passed in @q already in the plug list
  *
  * Determine whether @bio being queued on @q can be merged with the previous
@@ -1084,7 +1083,7 @@  static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
  * Caller must ensure !blk_queue_nomerges(q) beforehand.
  */
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-		unsigned int nr_segs, bool *same_queue_rq)
+		unsigned int nr_segs)
 {
 	struct blk_plug *plug;
 	struct request *rq;
@@ -1096,12 +1095,6 @@  bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
 	/* check the previously added entry for a quick merge attempt */
 	rq = rq_list_peek(&plug->mq_list);
 	if (rq->q == q) {
-		/*
-		 * Only blk-mq multiple hardware queues case checks the rq in
-		 * the same queue, there should be only one such rq in a queue
-		 */
-		*same_queue_rq = true;
-
 		if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
 				BIO_MERGE_OK)
 			return true;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1feb9ab65f28a..f05c458d983b4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2689,11 +2689,10 @@  static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
 }
 
 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
-				     struct bio *bio, unsigned int nr_segs,
-				     bool *same_queue_rq)
+				     struct bio *bio, unsigned int nr_segs)
 {
 	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
-		if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
+		if (blk_attempt_plug_merge(q, bio, nr_segs))
 			return true;
 		if (blk_mq_sched_bio_merge(q, bio, nr_segs))
 			return true;
@@ -2704,8 +2703,7 @@  static bool blk_mq_attempt_bio_merge(struct request_queue *q,
 static struct request *blk_mq_get_new_requests(struct request_queue *q,
 					       struct blk_plug *plug,
 					       struct bio *bio,
-					       unsigned int nsegs,
-					       bool *same_queue_rq)
+					       unsigned int nsegs)
 {
 	struct blk_mq_alloc_data data = {
 		.q		= q,
@@ -2714,7 +2712,7 @@  static struct request *blk_mq_get_new_requests(struct request_queue *q,
 	};
 	struct request *rq;
 
-	if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
+	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
 		return NULL;
 
 	rq_qos_throttle(q, bio);
@@ -2751,8 +2749,7 @@  static inline bool blk_mq_can_use_cached_rq(struct request *rq,
 static inline struct request *blk_mq_get_request(struct request_queue *q,
 						 struct blk_plug *plug,
 						 struct bio *bio,
-						 unsigned int nsegs,
-						 bool *same_queue_rq)
+						 unsigned int nsegs)
 {
 	struct request *rq;
 	bool checked = false;
@@ -2763,8 +2760,7 @@  static inline struct request *blk_mq_get_request(struct request_queue *q,
 		if (rq && rq->q == q) {
 			if (unlikely(!submit_bio_checks(bio)))
 				return NULL;
-			if (blk_mq_attempt_bio_merge(q, bio, nsegs,
-						same_queue_rq))
+			if (blk_mq_attempt_bio_merge(q, bio, nsegs))
 				return NULL;
 			checked = true;
 			if (!blk_mq_can_use_cached_rq(rq, bio))
@@ -2782,7 +2778,7 @@  static inline struct request *blk_mq_get_request(struct request_queue *q,
 		return NULL;
 	if (!checked && !submit_bio_checks(bio))
 		return NULL;
-	rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
+	rq = blk_mq_get_new_requests(q, plug, bio, nsegs);
 	if (!rq)
 		blk_queue_exit(q);
 	return rq;
@@ -2807,7 +2803,6 @@  void blk_mq_submit_bio(struct bio *bio)
 	const int is_sync = op_is_sync(bio->bi_opf);
 	struct request *rq;
 	struct blk_plug *plug;
-	bool same_queue_rq = false;
 	unsigned int nr_segs = 1;
 	blk_status_t ret;
 
@@ -2822,7 +2817,7 @@  void blk_mq_submit_bio(struct bio *bio)
 		return;
 
 	plug = blk_mq_plug(q, bio);
-	rq = blk_mq_get_request(q, plug, bio, nr_segs, &same_queue_rq);
+	rq = blk_mq_get_request(q, plug, bio, nr_segs);
 	if (unlikely(!rq))
 		return;
 
@@ -2843,16 +2838,7 @@  void blk_mq_submit_bio(struct bio *bio)
 	if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
 		return;
 
-	if (plug && (q->nr_hw_queues == 1 ||
-	    blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
-	    q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
-		/*
-		 * Use plugging if we have a ->commit_rqs() hook as well, as
-		 * we know the driver uses bd->last in a smart fashion.
-		 *
-		 * Use normal plugging if this disk is slow HDD, as sequential
-		 * IO may benefit a lot from plug merging.
-		 */
+	if (plug) {
 		unsigned int request_count = plug->rq_count;
 		struct request *last = NULL;
 
@@ -2870,40 +2856,12 @@  void blk_mq_submit_bio(struct bio *bio)
 		}
 
 		blk_add_rq_to_plug(plug, rq);
-	} else if (rq->rq_flags & RQF_ELV) {
-		/* Insert the request at the IO scheduler queue */
+	} else if ((rq->rq_flags & RQF_ELV) ||
+		   (rq->mq_hctx->dispatch_busy &&
+		    (q->nr_hw_queues == 1 || !is_sync))) {
 		blk_mq_sched_insert_request(rq, false, true, true);
-	} else if (plug && !blk_queue_nomerges(q)) {
-		struct request *next_rq = NULL;
-
-		/*
-		 * We do limited plugging. If the bio can be merged, do that.
-		 * Otherwise the existing request in the plug list will be
-		 * issued. So the plug list will have one request at most
-		 * The plug list might get flushed before this. If that happens,
-		 * the plug list is empty, and same_queue_rq is invalid.
-		 */
-		if (same_queue_rq) {
-			next_rq = rq_list_pop(&plug->mq_list);
-			plug->rq_count--;
-		}
-		blk_add_rq_to_plug(plug, rq);
-		trace_block_plug(q);
-
-		if (next_rq) {
-			trace_block_unplug(q, 1, true);
-			blk_mq_try_issue_directly(next_rq->mq_hctx, next_rq);
-		}
-	} else if ((q->nr_hw_queues > 1 && is_sync) ||
-		   !rq->mq_hctx->dispatch_busy) {
-		/*
-		 * There is no scheduler and we can try to send directly
-		 * to the hardware.
-		 */
-		blk_mq_try_issue_directly(rq->mq_hctx, rq);
 	} else {
-		/* Default case. */
-		blk_mq_sched_insert_request(rq, false, true, true);
+		blk_mq_try_issue_directly(rq->mq_hctx, rq);
 	}
 }
 
diff --git a/block/blk.h b/block/blk.h
index 296e3010f8d65..cfac3bdeb77d9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -253,7 +253,7 @@  void blk_add_timer(struct request *req);
 const char *blk_status_to_str(blk_status_t status);
 
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-		unsigned int nr_segs, bool *same_queue_rq);
+		unsigned int nr_segs);
 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
 			struct bio *bio, unsigned int nr_segs);