@@ -157,22 +157,21 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bvprvp = &bvprv;
sectors += bv.bv_len >> 9;
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
continue;
}
new_segment:
if (nsegs == queue_max_segments(q))
goto split;
+ if (nsegs == 1 && seg_size > front_seg_size)
+ front_seg_size = seg_size;
+
nsegs++;
bvprv = bv;
bvprvp = &bvprv;
seg_size = bv.bv_len;
sectors += bv.bv_len >> 9;
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
}
do_split = false;
@@ -185,6 +184,8 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bio = new;
}
+ if (nsegs == 1 && seg_size > front_seg_size)
+ front_seg_size = seg_size;
bio->bi_seg_front_size = front_seg_size;
if (seg_size > bio->bi_seg_back_size)
bio->bi_seg_back_size = seg_size;
It is enough to check and compute bio->bi_seg_front_size just after the 1st segment is found, but current code checks that for each bvec, which is inefficient. This patch follows the way in __blk_recalc_rq_segments() for computing bio->bi_seg_front_size, and it is more efficient and code becomes more readable too. Signed-off-by: Ming Lei <tom.leiming@gmail.com> --- block/blk-merge.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)