Message ID | 20220406060516.409838-23-hch@lst.de (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | [01/27] target: remove an incorrect unmap zeroes data deduction | expand |
Christoph, > Move all the logic to limit the discard bio size into a common helper > so that it is better documented. Looks OK. Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
On 4/6/22 2:05 PM, Christoph Hellwig wrote: > Move all the logic to limit the discard bio size into a common helper > so that it is better documented. > > Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Coly Li <colyli@suse.de> Thanks for the change. Coly Li > --- > block/blk-lib.c | 59 ++++++++++++++++++++++++------------------------- > block/blk.h | 14 ------------ > 2 files changed, 29 insertions(+), 44 deletions(-) > > diff --git a/block/blk-lib.c b/block/blk-lib.c > index 237d60d8b5857..2ae32a722851c 100644 > --- a/block/blk-lib.c > +++ b/block/blk-lib.c > @@ -10,6 +10,32 @@ > > #include "blk.h" > > +static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) > +{ > + unsigned int discard_granularity = > + bdev_get_queue(bdev)->limits.discard_granularity; > + sector_t granularity_aligned_sector; > + > + if (bdev_is_partition(bdev)) > + sector += bdev->bd_start_sect; > + > + granularity_aligned_sector = > + round_up(sector, discard_granularity >> SECTOR_SHIFT); > + > + /* > + * Make sure subsequent bios start aligned to the discard granularity if > + * it needs to be split. > + */ > + if (granularity_aligned_sector != sector) > + return granularity_aligned_sector - sector; > + > + /* > + * Align the bio size to the discard granularity to make splitting the bio > + * at discard granularity boundaries easier in the driver if needed. > + */ > + return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; > +} > + > int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, > sector_t nr_sects, gfp_t gfp_mask, int flags, > struct bio **biop) > @@ -17,7 +43,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, > struct request_queue *q = bdev_get_queue(bdev); > struct bio *bio = *biop; > unsigned int op; > - sector_t bs_mask, part_offset = 0; > + sector_t bs_mask; > > if (bdev_read_only(bdev)) > return -EPERM; > @@ -48,36 +74,9 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, > if (!nr_sects) > return -EINVAL; > > - /* In case the discard request is in a partition */ > - if (bdev_is_partition(bdev)) > - part_offset = bdev->bd_start_sect; > - > while (nr_sects) { > - sector_t granularity_aligned_lba, req_sects; > - sector_t sector_mapped = sector + part_offset; > - > - granularity_aligned_lba = round_up(sector_mapped, > - q->limits.discard_granularity >> SECTOR_SHIFT); > - > - /* > - * Check whether the discard bio starts at a discard_granularity > - * aligned LBA, > - * - If no: set (granularity_aligned_lba - sector_mapped) to > - * bi_size of the first split bio, then the second bio will > - * start at a discard_granularity aligned LBA on the device. > - * - If yes: use bio_aligned_discard_max_sectors() as the max > - * possible bi_size of the first split bio. Then when this bio > - * is split in device drive, the split ones are very probably > - * to be aligned to discard_granularity of the device's queue. > - */ > - if (granularity_aligned_lba == sector_mapped) > - req_sects = min_t(sector_t, nr_sects, > - bio_aligned_discard_max_sectors(q)); > - else > - req_sects = min_t(sector_t, nr_sects, > - granularity_aligned_lba - sector_mapped); > - > - WARN_ON_ONCE((req_sects << 9) > UINT_MAX); > + sector_t req_sects = > + min(nr_sects, bio_discard_limit(bdev, sector)); > > bio = blk_next_bio(bio, bdev, 0, op, gfp_mask); > bio->bi_iter.bi_sector = sector; > diff --git a/block/blk.h b/block/blk.h > index 8ccbc6e076369..1fdc1d28e6d60 100644 > --- a/block/blk.h > +++ b/block/blk.h > @@ -346,20 +346,6 @@ static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) > return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; > } > > -/* > - * The max bio size which is aligned to q->limits.discard_granularity. This > - * is a hint to split large discard bio in generic block layer, then if device > - * driver needs to split the discard bio into smaller ones, their bi_size can > - * be very probably and easily aligned to discard_granularity of the device's > - * queue. > - */ > -static inline unsigned int bio_aligned_discard_max_sectors( > - struct request_queue *q) > -{ > - return round_down(UINT_MAX, q->limits.discard_granularity) >> > - SECTOR_SHIFT; > -} > - > /* > * Internal io_context interface > */
diff --git a/block/blk-lib.c b/block/blk-lib.c index 237d60d8b5857..2ae32a722851c 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -10,6 +10,32 @@ #include "blk.h" +static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) +{ + unsigned int discard_granularity = + bdev_get_queue(bdev)->limits.discard_granularity; + sector_t granularity_aligned_sector; + + if (bdev_is_partition(bdev)) + sector += bdev->bd_start_sect; + + granularity_aligned_sector = + round_up(sector, discard_granularity >> SECTOR_SHIFT); + + /* + * Make sure subsequent bios start aligned to the discard granularity if + * it needs to be split. + */ + if (granularity_aligned_sector != sector) + return granularity_aligned_sector - sector; + + /* + * Align the bio size to the discard granularity to make splitting the bio + * at discard granularity boundaries easier in the driver if needed. + */ + return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; +} + int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, int flags, struct bio **biop) @@ -17,7 +43,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, struct request_queue *q = bdev_get_queue(bdev); struct bio *bio = *biop; unsigned int op; - sector_t bs_mask, part_offset = 0; + sector_t bs_mask; if (bdev_read_only(bdev)) return -EPERM; @@ -48,36 +74,9 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, if (!nr_sects) return -EINVAL; - /* In case the discard request is in a partition */ - if (bdev_is_partition(bdev)) - part_offset = bdev->bd_start_sect; - while (nr_sects) { - sector_t granularity_aligned_lba, req_sects; - sector_t sector_mapped = sector + part_offset; - - granularity_aligned_lba = round_up(sector_mapped, - q->limits.discard_granularity >> SECTOR_SHIFT); - - /* - * Check whether the discard bio starts at a discard_granularity - * aligned LBA, - * - If no: set (granularity_aligned_lba - sector_mapped) to - * bi_size of the first split bio, then the second bio will - * start at a discard_granularity aligned LBA on the device. - * - If yes: use bio_aligned_discard_max_sectors() as the max - * possible bi_size of the first split bio. Then when this bio - * is split in device drive, the split ones are very probably - * to be aligned to discard_granularity of the device's queue. - */ - if (granularity_aligned_lba == sector_mapped) - req_sects = min_t(sector_t, nr_sects, - bio_aligned_discard_max_sectors(q)); - else - req_sects = min_t(sector_t, nr_sects, - granularity_aligned_lba - sector_mapped); - - WARN_ON_ONCE((req_sects << 9) > UINT_MAX); + sector_t req_sects = + min(nr_sects, bio_discard_limit(bdev, sector)); bio = blk_next_bio(bio, bdev, 0, op, gfp_mask); bio->bi_iter.bi_sector = sector; diff --git a/block/blk.h b/block/blk.h index 8ccbc6e076369..1fdc1d28e6d60 100644 --- a/block/blk.h +++ b/block/blk.h @@ -346,20 +346,6 @@ static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; } -/* - * The max bio size which is aligned to q->limits.discard_granularity. This - * is a hint to split large discard bio in generic block layer, then if device - * driver needs to split the discard bio into smaller ones, their bi_size can - * be very probably and easily aligned to discard_granularity of the device's - * queue. - */ -static inline unsigned int bio_aligned_discard_max_sectors( - struct request_queue *q) -{ - return round_down(UINT_MAX, q->limits.discard_granularity) >> - SECTOR_SHIFT; -} - /* * Internal io_context interface */
Move all the logic to limit the discard bio size into a common helper so that it is better documented. Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/blk-lib.c | 59 ++++++++++++++++++++++++------------------------- block/blk.h | 14 ------------ 2 files changed, 29 insertions(+), 44 deletions(-)