diff mbox series

[v4,7/8] block: add nowait flag for __blkdev_issue_zero_pages

Message ID 387caa9b7a23061f19034a8afd1dfcf017d8fe35.1725621577.git.asml.silence@gmail.com (mailing list archive)
State New, archived
Headers show
Series implement async block discards and other ops via io_uring | expand

Commit Message

Pavel Begunkov Sept. 6, 2024, 10:57 p.m. UTC
To reuse __blkdev_issue_zero_pages() in the following patch, we need to
make it work with non-blocking requests. Add a new nowait flag we can
pass inside. Return errors if something went wrong, and check
bio_alloc() for failures, which wasn't supposed to happen before because
of what gfp flags the callers are passing. Note that there might be a
bio passed back even when the function returned an error. To limit the
scope of the patch, don't add return code handling to callers, that can
be deferred to a follow up.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 block/blk-lib.c        | 24 +++++++++++++++++++-----
 include/linux/bio.h    |  4 ++++
 include/linux/blkdev.h |  1 +
 3 files changed, 24 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-lib.c b/block/blk-lib.c
index c94c67a75f7e..0d8f1b93b4c3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -193,20 +193,32 @@  static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
 	return min(pages, (sector_t)BIO_MAX_VECS);
 }
 
-static void __blkdev_issue_zero_pages(struct block_device *bdev,
+int blkdev_issue_zero_pages_bio(struct block_device *bdev,
 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
 		struct bio **biop, unsigned int flags)
 {
+	blk_opf_t opf = REQ_OP_WRITE;
+
+	if (flags & BLKDEV_ZERO_PAGES_NOWAIT) {
+		sector_t max_bio_sectors = BIO_MAX_VECS << PAGE_SECTORS_SHIFT;
+
+		if (nr_sects > max_bio_sectors)
+			return -EAGAIN;
+		opf |= REQ_NOWAIT;
+	}
+
 	while (nr_sects) {
 		unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
 		struct bio *bio;
 
-		bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
+		bio = bio_alloc(bdev, nr_vecs, opf, gfp_mask);
+		if (!bio)
+			return -ENOMEM;
 		bio->bi_iter.bi_sector = sector;
 
 		if ((flags & BLKDEV_ZERO_KILLABLE) &&
 		    fatal_signal_pending(current))
-			break;
+			return -EINTR;
 
 		do {
 			unsigned int len, added;
@@ -223,6 +235,8 @@  static void __blkdev_issue_zero_pages(struct block_device *bdev,
 		*biop = bio_chain_and_submit(*biop, bio);
 		cond_resched();
 	}
+
+	return 0;
 }
 
 static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
@@ -236,7 +250,7 @@  static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
 		return -EOPNOTSUPP;
 
 	blk_start_plug(&plug);
-	__blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags);
+	blkdev_issue_zero_pages_bio(bdev, sector, nr_sects, gfp, &bio, flags);
 	if (bio) {
 		if ((flags & BLKDEV_ZERO_KILLABLE) &&
 		    fatal_signal_pending(current)) {
@@ -286,7 +300,7 @@  int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 	} else {
 		if (flags & BLKDEV_ZERO_NOFALLBACK)
 			return -EOPNOTSUPP;
-		__blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
+		blkdev_issue_zero_pages_bio(bdev, sector, nr_sects, gfp_mask,
 				biop, flags);
 	}
 	return 0;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 78ead424484c..87d85b326e1e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -686,4 +686,8 @@  struct bio *blk_alloc_discard_bio(struct block_device *bdev,
 
 sector_t bio_discard_limit(struct block_device *bdev, sector_t sector);
 
+int blkdev_issue_zero_pages_bio(struct block_device *bdev,
+		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
+		struct bio **biop, unsigned int flags);
+
 #endif /* __LINUX_BIO_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 643c9020a35a..bf1aa951fda2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1098,6 +1098,7 @@  int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
 #define BLKDEV_ZERO_NOUNMAP	(1 << 0)  /* do not free blocks */
 #define BLKDEV_ZERO_NOFALLBACK	(1 << 1)  /* don't write explicit zeroes */
 #define BLKDEV_ZERO_KILLABLE	(1 << 2)  /* interruptible by fatal signals */
+#define BLKDEV_ZERO_PAGES_NOWAIT (1 << 3) /* non-blocking submission  */
 
 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,