diff mbox series

[v2,3/8] block: Support submitting passthrough requests with small segments

Message ID 20221123205740.463185-4-bvanassche@acm.org (mailing list archive)
State New, archived
Headers show
Series Add support for segments smaller than one page | expand

Commit Message

Bart Van Assche Nov. 23, 2022, 8:57 p.m. UTC
If the segment size is smaller than the page size there may be multiple
segments per bvec even if a bvec only contains a single page. Hence this
patch.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Keith Busch <kbusch@kernel.org>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/blk-map.c | 16 +++++++++++++++-
 block/blk.h     | 11 +++++++++++
 2 files changed, 26 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/block/blk-map.c b/block/blk-map.c
index 19940c978c73..d2d6ee098514 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -524,6 +524,20 @@  static struct bio *bio_copy_kern(struct request_queue *q, void *data,
 	return ERR_PTR(-ENOMEM);
 }
 
+#ifdef CONFIG_BLK_SUB_PAGE_SEGMENTS
+/* Number of DMA segments required to transfer @bytes data. */
+unsigned int blk_segments(const struct queue_limits *limits, unsigned int bytes)
+{
+	const unsigned int mss = limits->max_segment_size;
+
+	if (bytes <= mss)
+		return 1;
+	if (is_power_of_2(mss))
+		return round_up(bytes, mss) >> ilog2(mss);
+	return (bytes + mss - 1) / mss;
+}
+#endif
+
 /*
  * Append a bio to a passthrough request.  Only works if the bio can be merged
  * into the request based on the driver constraints.
@@ -535,7 +549,7 @@  int blk_rq_append_bio(struct request *rq, struct bio *bio)
 	unsigned int nr_segs = 0;
 
 	bio_for_each_bvec(bv, bio, iter)
-		nr_segs++;
+		nr_segs += blk_segments(&rq->q->limits, bv.bv_len);
 
 	if (!rq->bio) {
 		blk_rq_bio_prep(rq, bio, nr_segs);
diff --git a/block/blk.h b/block/blk.h
index 5929559acd71..fb486eff3eef 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -80,6 +80,17 @@  struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
 		gfp_t gfp_mask);
 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
 
+#ifdef CONFIG_BLK_SUB_PAGE_SEGMENTS
+unsigned int blk_segments(const struct queue_limits *limits,
+			  unsigned int bytes);
+#else
+static inline unsigned int blk_segments(const struct queue_limits *limits,
+					unsigned int bytes)
+{
+	return 1;
+}
+#endif
+
 static inline bool biovec_phys_mergeable(struct request_queue *q,
 		struct bio_vec *vec1, struct bio_vec *vec2)
 {