diff mbox series

block: fix segment calculation for passthrough IO

Message ID 20190303131748.24646-1-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series block: fix segment calculation for passthrough IO | expand

Commit Message

Ming Lei March 3, 2019, 1:17 p.m. UTC
blk_recount_segments() can be called in bio_add_pc_page() for
calculating how many segments this bio will has after one page is added
to this bio. If the resulted segment number is beyond the queue limit,
the added page will be removed.

The try-and-fix policy requires blk_recount_segments(__blk_recalc_rq_segments)
to not consider the segment number limit. Unfortunately bvec_split_segs()
does check this limit, and causes small segment number returned to
bio_add_pc_page(), then page still may be added to the bio even though
segment number limit becomes broken.

Fixes this issue by not considering segment number limit when calcualting
bio's segment number.

Fixes: dcebd755926b ("block: use bio_for_each_bvec() to compute multi-page bvec count")
Cc: Christoph Hellwig <hch@lst.de>
Cc: Omar Sandoval <osandov@fb.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-merge.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

Comments

Jens Axboe March 6, 2019, 4:43 p.m. UTC | #1
On 3/3/19 6:17 AM, Ming Lei wrote:
> blk_recount_segments() can be called in bio_add_pc_page() for
> calculating how many segments this bio will has after one page is added
> to this bio. If the resulted segment number is beyond the queue limit,
> the added page will be removed.
> 
> The try-and-fix policy requires blk_recount_segments(__blk_recalc_rq_segments)
> to not consider the segment number limit. Unfortunately bvec_split_segs()
> does check this limit, and causes small segment number returned to
> bio_add_pc_page(), then page still may be added to the bio even though
> segment number limit becomes broken.
> 
> Fixes this issue by not considering segment number limit when calcualting
> bio's segment number.

Applied, thanks Ming.
diff mbox series

Patch

diff --git a/block/blk-merge.c b/block/blk-merge.c
index 22467f475ab4..1c9d4f0f96ea 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -180,7 +180,7 @@  static unsigned get_max_segment_size(struct request_queue *q,
  */
 static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
 		unsigned *nsegs, unsigned *last_seg_size,
-		unsigned *front_seg_size, unsigned *sectors)
+		unsigned *front_seg_size, unsigned *sectors, unsigned max_segs)
 {
 	unsigned len = bv->bv_len;
 	unsigned total_len = 0;
@@ -190,7 +190,7 @@  static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
 	 * Multi-page bvec may be too big to hold in one segment, so the
 	 * current bvec has to be splitted as multiple segments.
 	 */
-	while (len && new_nsegs + *nsegs < queue_max_segments(q)) {
+	while (len && new_nsegs + *nsegs < max_segs) {
 		seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
 		seg_size = min(seg_size, len);
 
@@ -240,6 +240,7 @@  static struct bio *blk_bio_segment_split(struct request_queue *q,
 	bool do_split = true;
 	struct bio *new = NULL;
 	const unsigned max_sectors = get_max_io_size(q, bio);
+	const unsigned max_segs = queue_max_segments(q);
 
 	bio_for_each_bvec(bv, bio, iter) {
 		/*
@@ -254,14 +255,14 @@  static struct bio *blk_bio_segment_split(struct request_queue *q,
 			 * Consider this a new segment if we're splitting in
 			 * the middle of this vector.
 			 */
-			if (nsegs < queue_max_segments(q) &&
+			if (nsegs < max_segs &&
 			    sectors < max_sectors) {
 				/* split in the middle of bvec */
 				bv.bv_len = (max_sectors - sectors) << 9;
 				bvec_split_segs(q, &bv, &nsegs,
 						&seg_size,
 						&front_seg_size,
-						&sectors);
+						&sectors, max_segs);
 			}
 			goto split;
 		}
@@ -283,7 +284,7 @@  static struct bio *blk_bio_segment_split(struct request_queue *q,
 			continue;
 		}
 new_segment:
-		if (nsegs == queue_max_segments(q))
+		if (nsegs == max_segs)
 			goto split;
 
 		bvprv = bv;
@@ -296,7 +297,7 @@  static struct bio *blk_bio_segment_split(struct request_queue *q,
 			if (nsegs == 1 && seg_size > front_seg_size)
 				front_seg_size = seg_size;
 		} else if (bvec_split_segs(q, &bv, &nsegs, &seg_size,
-				    &front_seg_size, &sectors)) {
+				    &front_seg_size, &sectors, max_segs)) {
 			goto split;
 		}
 	}
@@ -415,7 +416,7 @@  static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 			bvprv = bv;
 			prev = 1;
 			bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
-					&front_seg_size, NULL);
+					&front_seg_size, NULL, UINT_MAX);
 		}
 		bbio = bio;
 	}