@@ -192,20 +192,32 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
return min(pages, (sector_t)BIO_MAX_VECS);
}
-static void __blkdev_issue_zero_pages(struct block_device *bdev,
+int blkdev_issue_zero_pages_bio(struct block_device *bdev,
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
struct bio **biop, unsigned int flags)
{
+ blk_opf_t opf = REQ_OP_WRITE;
+
+ if (flags & BLKDEV_ZERO_PAGES_NOWAIT) {
+ sector_t max_bio_sectors = BIO_MAX_VECS << PAGE_SECTORS_SHIFT;
+
+ if (nr_sects > max_bio_sectors)
+ return -EAGAIN;
+ opf |= REQ_NOWAIT;
+ }
+
while (nr_sects) {
unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
struct bio *bio;
bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
+ if (!bio)
+ return -ENOMEM;
bio->bi_iter.bi_sector = sector;
if ((flags & BLKDEV_ZERO_KILLABLE) &&
fatal_signal_pending(current))
- break;
+ return -EINTR;
do {
unsigned int len, added;
@@ -222,6 +234,8 @@ static void __blkdev_issue_zero_pages(struct block_device *bdev,
*biop = bio_chain_and_submit(*biop, bio);
cond_resched();
}
+
+ return 0;
}
static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
@@ -235,7 +249,7 @@ static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
return -EOPNOTSUPP;
blk_start_plug(&plug);
- __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags);
+ blkdev_issue_zero_pages_bio(bdev, sector, nr_sects, gfp, &bio, flags);
if (bio) {
if ((flags & BLKDEV_ZERO_KILLABLE) &&
fatal_signal_pending(current)) {
@@ -285,7 +299,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
} else {
if (flags & BLKDEV_ZERO_NOFALLBACK)
return -EOPNOTSUPP;
- __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
+ blkdev_issue_zero_pages_bio(bdev, sector, nr_sects, gfp_mask,
biop, flags);
}
return 0;
@@ -684,4 +684,8 @@ struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
struct bio *blk_alloc_discard_bio(struct block_device *bdev,
sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask);
+int blkdev_issue_zero_pages_bio(struct block_device *bdev,
+ sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
+ struct bio **biop, unsigned int flags);
+
#endif /* __LINUX_BIO_H */
@@ -1098,6 +1098,7 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */
+#define BLKDEV_ZERO_PAGES_NOWAIT (1 << 3) /* non-blocking submission */
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
To reuse __blkdev_issue_zero_pages() in the following patch, we need to make it work with non-blocking requests. Add a new nowait flag we can pass inside. Return errors if something went wrong, and check bio_alloc() for failures, which wasn't supposed to happen before because of what gfp flags the callers are passing. Note that there might be a bio passed back even when the function returned an error. To limit the scope of the patch, don't add return code handling to callers, that can be deferred to a follow up. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- block/blk-lib.c | 22 ++++++++++++++++++---- include/linux/bio.h | 4 ++++ include/linux/blkdev.h | 1 + 3 files changed, 23 insertions(+), 4 deletions(-)