@@ -353,7 +353,8 @@ void bio_chain(struct bio *bio, struct bio *parent)
EXPORT_SYMBOL(bio_chain);
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
- unsigned int nr_pages, unsigned int opf, gfp_t gfp)
+ sector_t sect, unsigned int nr_pages,
+ unsigned int opf, gfp_t gfp)
{
struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
@@ -362,6 +363,8 @@ struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
submit_bio(bio);
}
+ new->bi_iter.bi_sector = sect;
+
return new;
}
EXPORT_SYMBOL_GPL(blk_next_bio);
@@ -79,8 +79,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
- bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
- bio->bi_iter.bi_sector = sector;
+ bio = blk_next_bio(bio, bdev, sector, 0, op, gfp_mask);
bio->bi_iter.bi_size = req_sects << 9;
sector += req_sects;
nr_sects -= req_sects;
@@ -167,8 +166,8 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
max_write_same_sectors = bio_allowed_max_sectors(q);
while (nr_sects) {
- bio = blk_next_bio(bio, bdev, 1, REQ_OP_WRITE_SAME, gfp_mask);
- bio->bi_iter.bi_sector = sector;
+ bio = blk_next_bio(bio, bdev, sector, 1, REQ_OP_WRITE_SAME,
+ gfp_mask);
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
@@ -224,6 +223,8 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
struct bio **biop, unsigned flags)
{
+ unsigned int opf = flags & BLKDEV_ZERO_NOUNMAP ? REQ_NOUNMAP : 0;
+ unsigned int op = REQ_OP_WRITE_ZEROES;
struct bio *bio = *biop;
unsigned int max_write_zeroes_sectors;
@@ -237,10 +238,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
return -EOPNOTSUPP;
while (nr_sects) {
- bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
- bio->bi_iter.bi_sector = sector;
- if (flags & BLKDEV_ZERO_NOUNMAP)
- bio->bi_opf |= REQ_NOUNMAP;
+ bio = blk_next_bio(bio, bdev, sector, 0, op | opf, gfp_mask);
if (nr_sects > max_write_zeroes_sectors) {
bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
@@ -274,6 +272,7 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
struct bio **biop)
{
+ unsigned int nr_pages = __blkdev_sectors_to_bio_pages(nr_sects);
struct bio *bio = *biop;
int bi_size = 0;
unsigned int sz;
@@ -282,9 +281,8 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
return -EPERM;
while (nr_sects != 0) {
- bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
- REQ_OP_WRITE, gfp_mask);
- bio->bi_iter.bi_sector = sector;
+ bio = blk_next_bio(bio, bdev, sector, nr_pages, REQ_OP_WRITE,
+ gfp_mask);
while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
@@ -215,9 +215,8 @@ static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
continue;
}
- bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
- gfp_mask);
- bio->bi_iter.bi_sector = sector;
+ bio = blk_next_bio(bio, bdev, sector, 0,
+ REQ_OP_ZONE_RESET | REQ_SYNC, gfp_mask);
sector += zone_sectors;
/* This may take a while, so be nice to others */
@@ -302,8 +301,8 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
}
while (sector < end_sector) {
- bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask);
- bio->bi_iter.bi_sector = sector;
+ bio = blk_next_bio(bio, bdev, sector, 0, op | REQ_SYNC,
+ gfp_mask);
sector += zone_sectors;
/* This may take a while, so be nice to others */
@@ -412,10 +412,9 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
while (sector < get_capacity(bdev->bd_disk)) {
if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
- bio = blk_next_bio(bio, bdev, 0,
+ bio = blk_next_bio(bio, bdev, sector, 0,
zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
GFP_KERNEL);
- bio->bi_iter.bi_sector = sector;
/* This may take a while, so be nice to others */
cond_resched();
}
@@ -788,6 +788,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
}
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
- unsigned int nr_pages, unsigned int opf, gfp_t gfp);
+ sector_t sect, unsigned int nr_pages,
+ unsigned int opf, gfp_t gfp);
#endif /* __LINUX_BIO_H */
blk_next_bio() is the central function which allocates bios for e.g. discard, write-same, write-zeroes, zone-mgmt and initializes common members of bios to avoid code duplication. The initialization of sector bio member is duplicated in disacrd, write-same, write-zeores, and NVMeOF Target zns backend etc. The callers of the blk_next_bio() __blkdev_issue_discard(), __blkdev_issue_write_same(), __blkdev_issue_write_zeroes(), __blkdev_issue_zero_pages(), blkdev_zone_reset_all_emulated(), blkdev_zone_mgmt() and nvmet_bdev_zone_mgmt_emulated_all() initialize bio->bi_iter.bi_sector after the call to blk_next_bio(), so we can safely move newly returned bio's sector initialization into the blk_next_bio(). Move sector member to the blk_next_bio() just like we have moved other members to blk_next_bio(). Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com> --- block/bio.c | 5 ++++- block/blk-lib.c | 20 +++++++++----------- block/blk-zoned.c | 9 ++++----- drivers/nvme/target/zns.c | 3 +-- include/linux/bio.h | 3 ++- 5 files changed, 20 insertions(+), 20 deletions(-)