diff mbox series

[1/1] block: move sector bio member into blk_next_bio()

Message ID 20220301022310.8579-2-kch@nvidia.com (mailing list archive)
State New, archived
Headers show
Series [1/1] block: move sector bio member into blk_next_bio() | expand

Commit Message

Chaitanya Kulkarni March 1, 2022, 2:23 a.m. UTC
blk_next_bio() is the central function which allocates bios for
e.g. discard, write-same, write-zeroes, zone-mgmt and initializes
common members of bios to avoid code duplication. The initialization of
sector bio member is duplicated in disacrd, write-same, write-zeores,
and NVMeOF Target zns backend etc.

The callers of the blk_next_bio() __blkdev_issue_discard(),
__blkdev_issue_write_same(), __blkdev_issue_write_zeroes(),
__blkdev_issue_zero_pages(), blkdev_zone_reset_all_emulated(),
blkdev_zone_mgmt() and nvmet_bdev_zone_mgmt_emulated_all() initialize
bio->bi_iter.bi_sector after the call to blk_next_bio(), so we can
safely move newly returned bio's sector initialization into the
blk_next_bio().

Move sector member to the blk_next_bio() just like we have moved other
members to blk_next_bio().

Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
 block/bio.c               |  5 ++++-
 block/blk-lib.c           | 20 +++++++++-----------
 block/blk-zoned.c         |  9 ++++-----
 drivers/nvme/target/zns.c |  3 +--
 include/linux/bio.h       |  3 ++-
 5 files changed, 20 insertions(+), 20 deletions(-)

Comments

Christoph Hellwig March 1, 2022, 11:25 a.m. UTC | #1
It would be nice to just calculate the sector from the previous
bio using bio_end_sector, but that doesn't really work with the
current blk_next_bio pattern.  It might make sense to just take the
initial allocation case out of blk_next_bio and do that manually now
that the bio_alloc interface makes a lot more sense.
Chaitanya Kulkarni March 8, 2022, 11:19 p.m. UTC | #2
On 3/1/22 03:25, Christoph Hellwig wrote:
> It would be nice to just calculate the sector from the previous
> bio using bio_end_sector, but that doesn't really work with the
> current blk_next_bio pattern.  It might make sense to just take the
> initial allocation case out of blk_next_bio and do that manually now
> that the bio_alloc interface makes a lot more sense.
> 

I found existing pattern easier to read with blk_next_bio(),
as removal of blk_next_bio() will result in the duplication of code for
allocation which blk_next_bio() avoids, unless I didn't understand your
comment.

-ck
diff mbox series

Patch

diff --git a/block/bio.c b/block/bio.c
index b15f5466ce08..0a68091685ee 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -353,7 +353,8 @@  void bio_chain(struct bio *bio, struct bio *parent)
 EXPORT_SYMBOL(bio_chain);
 
 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
-		unsigned int nr_pages, unsigned int opf, gfp_t gfp)
+			 sector_t sect, unsigned int nr_pages,
+			 unsigned int opf, gfp_t gfp)
 {
 	struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
 
@@ -362,6 +363,8 @@  struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
 		submit_bio(bio);
 	}
 
+	new->bi_iter.bi_sector = sect;
+
 	return new;
 }
 EXPORT_SYMBOL_GPL(blk_next_bio);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index fc6ea52e7482..3407b1afc079 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -79,8 +79,7 @@  int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
 		WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
 
-		bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
-		bio->bi_iter.bi_sector = sector;
+		bio = blk_next_bio(bio, bdev, sector, 0, op, gfp_mask);
 		bio->bi_iter.bi_size = req_sects << 9;
 		sector += req_sects;
 		nr_sects -= req_sects;
@@ -167,8 +166,8 @@  static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
 	max_write_same_sectors = bio_allowed_max_sectors(q);
 
 	while (nr_sects) {
-		bio = blk_next_bio(bio, bdev, 1, REQ_OP_WRITE_SAME, gfp_mask);
-		bio->bi_iter.bi_sector = sector;
+		bio = blk_next_bio(bio, bdev, sector, 1, REQ_OP_WRITE_SAME,
+				   gfp_mask);
 		bio->bi_vcnt = 1;
 		bio->bi_io_vec->bv_page = page;
 		bio->bi_io_vec->bv_offset = 0;
@@ -224,6 +223,8 @@  static int __blkdev_issue_write_zeroes(struct block_device *bdev,
 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
 		struct bio **biop, unsigned flags)
 {
+	unsigned int opf = flags & BLKDEV_ZERO_NOUNMAP ? REQ_NOUNMAP : 0;
+	unsigned int op = REQ_OP_WRITE_ZEROES;
 	struct bio *bio = *biop;
 	unsigned int max_write_zeroes_sectors;
 
@@ -237,10 +238,7 @@  static int __blkdev_issue_write_zeroes(struct block_device *bdev,
 		return -EOPNOTSUPP;
 
 	while (nr_sects) {
-		bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
-		bio->bi_iter.bi_sector = sector;
-		if (flags & BLKDEV_ZERO_NOUNMAP)
-			bio->bi_opf |= REQ_NOUNMAP;
+		bio = blk_next_bio(bio, bdev, sector, 0, op | opf, gfp_mask);
 
 		if (nr_sects > max_write_zeroes_sectors) {
 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
@@ -274,6 +272,7 @@  static int __blkdev_issue_zero_pages(struct block_device *bdev,
 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
 		struct bio **biop)
 {
+	unsigned int nr_pages = __blkdev_sectors_to_bio_pages(nr_sects);
 	struct bio *bio = *biop;
 	int bi_size = 0;
 	unsigned int sz;
@@ -282,9 +281,8 @@  static int __blkdev_issue_zero_pages(struct block_device *bdev,
 		return -EPERM;
 
 	while (nr_sects != 0) {
-		bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
-				   REQ_OP_WRITE, gfp_mask);
-		bio->bi_iter.bi_sector = sector;
+		bio = blk_next_bio(bio, bdev, sector, nr_pages, REQ_OP_WRITE,
+				   gfp_mask);
 
 		while (nr_sects != 0) {
 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 602bef54c813..0a8680df3f1c 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -215,9 +215,8 @@  static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
 			continue;
 		}
 
-		bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
-				   gfp_mask);
-		bio->bi_iter.bi_sector = sector;
+		bio = blk_next_bio(bio, bdev, sector, 0,
+				   REQ_OP_ZONE_RESET | REQ_SYNC, gfp_mask);
 		sector += zone_sectors;
 
 		/* This may take a while, so be nice to others */
@@ -302,8 +301,8 @@  int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
 	}
 
 	while (sector < end_sector) {
-		bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask);
-		bio->bi_iter.bi_sector = sector;
+		bio = blk_next_bio(bio, bdev, sector, 0, op | REQ_SYNC,
+				   gfp_mask);
 		sector += zone_sectors;
 
 		/* This may take a while, so be nice to others */
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 3e421217a7ad..cb4ba3f3a95e 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -412,10 +412,9 @@  static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
 
 	while (sector < get_capacity(bdev->bd_disk)) {
 		if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
-			bio = blk_next_bio(bio, bdev, 0,
+			bio = blk_next_bio(bio, bdev, sector, 0,
 				zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
 				GFP_KERNEL);
-			bio->bi_iter.bi_sector = sector;
 			/* This may take a while, so be nice to others */
 			cond_resched();
 		}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7523aba4ddf7..c34fd40c8867 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -788,6 +788,7 @@  static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
 }
 
 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
-		unsigned int nr_pages, unsigned int opf, gfp_t gfp);
+			 sector_t sect, unsigned int nr_pages,
+			 unsigned int opf, gfp_t gfp);
 
 #endif /* __LINUX_BIO_H */