@@ -294,7 +294,8 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
if (nsegs < lim->max_segments &&
bytes + bv.bv_len <= max_bytes &&
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
- nsegs++;
+ /* single-page bvec optimization */
+ nsegs += blk_segments(lim, bv.bv_len);
bytes += bv.bv_len;
} else {
if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
@@ -544,7 +545,10 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
__blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
goto next_bvec;
- if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
+ if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE &&
+ (!blk_queue_sub_page_limits(&q->limits) ||
+ bvec.bv_len <= q->limits.max_segment_size))
+ /* single-segment bvec optimization */
nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
else
nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
@@ -2936,6 +2936,8 @@ void blk_mq_submit_bio(struct bio *bio)
bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
if (!bio)
return;
+ } else if (bio->bi_vcnt == 1) {
+ nr_segs = blk_segments(&q->limits, bio->bi_io_vec[0].bv_len);
}
if (!bio_integrity_prep(bio))
@@ -332,13 +332,12 @@ static inline bool bio_may_exceed_limits(struct bio *bio,
}
/*
- * All drivers must accept single-segments bios that are <= PAGE_SIZE.
- * This is a quick and dirty check that relies on the fact that
- * bi_io_vec[0] is always valid if a bio has data. The check might
- * lead to occasional false negatives when bios are cloned, but compared
- * to the performance impact of cloned bios themselves the loop below
- * doesn't matter anyway.
+ * Check whether bio splitting should be performed. This check may
+ * trigger the bio splitting code even if splitting is not necessary.
*/
+ if (blk_queue_sub_page_limits(lim) && bio->bi_io_vec &&
+ bio->bi_io_vec->bv_len > lim->max_segment_size)
+ return true;
return lim->chunk_sectors || bio->bi_vcnt != 1 ||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
}