diff mbox series

[1/2] block: Split blk_recalc_rq_segments()

Message ID 20230317195938.1745318-2-bvanassche@acm.org (mailing list archive)
State New, archived
Headers show
Series Submit split bios in LBA order | expand

Commit Message

Bart Van Assche March 17, 2023, 7:59 p.m. UTC
Prepare for adding a direct call to bio_nr_segments().

Cc: Christoph Hellwig <hch@lst.de>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/blk-merge.c | 24 ++++++++++++++----------
 block/blk.h       |  1 +
 2 files changed, 15 insertions(+), 10 deletions(-)

Comments

Christoph Hellwig March 18, 2023, 6:38 a.m. UTC | #1
On Fri, Mar 17, 2023 at 12:59:37PM -0700, Bart Van Assche wrote:
> +unsigned int bio_nr_segments(const struct queue_limits *lim, struct bio *bio)

The name is wrong, as it operates not on a single bio, but
rather on a chain of bios.  That being said it seems like your caller
in the next patch only cares about the regulad read/write bio case,
which is kust this:

> +	for_each_bio(bio)
> +		bio_for_each_bvec(bv, bio, iter)
> +			bvec_split_segs(lim, &bv, &nr_phys_segs, &bytes,
> +					UINT_MAX, UINT_MAX);

So maybe split that into a separat patch.

Also please pass the queue_limit after the bio like the other
functions in this file.
diff mbox series

Patch

diff --git a/block/blk-merge.c b/block/blk-merge.c
index b80c3e650588..2e07f6bd96be 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -408,22 +408,20 @@  struct bio *bio_split_to_limits(struct bio *bio)
 }
 EXPORT_SYMBOL(bio_split_to_limits);
 
-unsigned int blk_recalc_rq_segments(struct request *rq)
+unsigned int bio_nr_segments(const struct queue_limits *lim, struct bio *bio)
 {
 	unsigned int nr_phys_segs = 0;
 	unsigned int bytes = 0;
-	struct req_iterator iter;
+	struct bvec_iter iter;
 	struct bio_vec bv;
 
-	if (!rq->bio)
+	if (!bio)
 		return 0;
 
-	switch (bio_op(rq->bio)) {
+	switch (bio_op(bio)) {
 	case REQ_OP_DISCARD:
 	case REQ_OP_SECURE_ERASE:
-		if (queue_max_discard_segments(rq->q) > 1) {
-			struct bio *bio = rq->bio;
-
+		if (lim->max_discard_segments > 1) {
 			for_each_bio(bio)
 				nr_phys_segs++;
 			return nr_phys_segs;
@@ -435,12 +433,18 @@  unsigned int blk_recalc_rq_segments(struct request *rq)
 		break;
 	}
 
-	rq_for_each_bvec(bv, rq, iter)
-		bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
-				UINT_MAX, UINT_MAX);
+	for_each_bio(bio)
+		bio_for_each_bvec(bv, bio, iter)
+			bvec_split_segs(lim, &bv, &nr_phys_segs, &bytes,
+					UINT_MAX, UINT_MAX);
 	return nr_phys_segs;
 }
 
+unsigned int blk_recalc_rq_segments(struct request *rq)
+{
+	return bio_nr_segments(&rq->q->limits, rq->bio);
+}
+
 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
 		struct scatterlist *sglist)
 {
diff --git a/block/blk.h b/block/blk.h
index d65d96994a94..9686ee808bab 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -330,6 +330,7 @@  int ll_back_merge_fn(struct request *req, struct bio *bio,
 		unsigned int nr_segs);
 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 				struct request *next);
+unsigned int bio_nr_segments(const struct queue_limits *lim, struct bio *bio);
 unsigned int blk_recalc_rq_segments(struct request *rq);
 void blk_rq_set_mixed_merge(struct request *rq);
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);