@@ -329,7 +329,7 @@ int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
if (nsegs < lim->max_segments &&
bytes + bv.bv_len <= max_bytes &&
- bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
+ bv.bv_offset + bv.bv_len <= lim->min_segment_size) {
nsegs++;
bytes += bv.bv_len;
} else {
@@ -246,6 +246,7 @@ int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
unsigned int logical_block_sectors;
+ unsigned long seg_size;
int err;
/*
@@ -303,7 +304,7 @@ int blk_validate_limits(struct queue_limits *lim)
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
- if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
+ if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
@@ -341,7 +342,7 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->seg_boundary_mask)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
- if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
+ if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
return -EINVAL;
/*
@@ -362,10 +363,17 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->max_segment_size)
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
- if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
+ if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
return -EINVAL;
}
+ /* setup min segment size for building new segment in fast path */
+ if (lim->seg_boundary_mask > lim->max_segment_size - 1)
+ seg_size = lim->max_segment_size;
+ else
+ seg_size = lim->seg_boundary_mask + 1;
+ lim->min_segment_size = min_t(unsigned, seg_size, PAGE_SIZE);
+
/*
* We require drivers to at least do logical block aligned I/O, but
* historically could not check for that due to the separate calls
@@ -358,8 +358,12 @@ struct bio *bio_split_zone_append(struct bio *bio,
static inline bool bio_may_need_split(struct bio *bio,
const struct queue_limits *lim)
{
- return lim->chunk_sectors || bio->bi_vcnt != 1 ||
- bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
+ if (lim->chunk_sectors)
+ return true;
+ if (bio->bi_vcnt != 1)
+ return true;
+ return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
+ lim->min_segment_size;
}
/**
@@ -367,6 +367,7 @@ struct queue_limits {
unsigned int max_sectors;
unsigned int max_user_sectors;
unsigned int max_segment_size;
+ unsigned int min_segment_size;
unsigned int physical_block_size;
unsigned int logical_block_size;
unsigned int alignment_offset;
@@ -1163,6 +1164,8 @@ static inline bool bdev_is_partition(struct block_device *bdev)
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
+ /* use minimized PAGE_SIZE as min segment size hint */
+ BLK_MIN_SEGMENT_SIZE = 4096,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};