@@ -772,6 +772,67 @@ static void bio_cmd_bio_end_io(struct bio *bio)
bio_put(bio);
}
+static int blkdev_cmd_write_zeroes(struct io_uring_cmd *cmd,
+ struct block_device *bdev,
+ uint64_t start, uint64_t len, bool nowait)
+{
+
+ sector_t bs_mask = (bdev_logical_block_size(bdev) >> SECTOR_SHIFT) - 1;
+ sector_t limit = bdev_write_zeroes_sectors(bdev);
+ sector_t sector = start >> SECTOR_SHIFT;
+ sector_t nr_sects = len >> SECTOR_SHIFT;
+ struct bio *prev = NULL, *bio;
+ gfp_t gfp = nowait ? GFP_NOWAIT : GFP_KERNEL;
+ int err;
+
+ if (!(file_to_blk_mode(cmd->file) & BLK_OPEN_WRITE))
+ return -EBADF;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+ err = blk_validate_byte_range(bdev, start, len);
+ if (err)
+ return err;
+
+ if (!limit)
+ return -EOPNOTSUPP;
+ /*
+ * Don't allow multi-bio non-blocking submissions as subsequent bios
+ * may fail but we won't get a direct indication of that. Normally,
+ * the caller should retry from a blocking context.
+ */
+ if (nowait && nr_sects > limit)
+ return -EAGAIN;
+
+ err = filemap_invalidate_pages(bdev->bd_mapping, start,
+ start + len - 1, nowait);
+ if (err)
+ return err;
+
+ limit = min(limit, (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
+ while (nr_sects) {
+ sector_t bio_sects = min(nr_sects, limit);
+
+ bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES|REQ_NOUNMAP, gfp);
+ if (!bio)
+ break;
+ if (nowait)
+ bio->bi_opf |= REQ_NOWAIT;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
+ sector += bio_sects;
+ nr_sects -= bio_sects;
+
+ prev = bio_chain_and_submit(prev, bio);
+ }
+ if (!prev)
+ return -EAGAIN;
+
+ prev->bi_private = cmd;
+ prev->bi_end_io = bio_cmd_bio_end_io;
+ submit_bio(prev);
+ return -EIOCBQUEUED;
+}
+
static int blkdev_cmd_discard(struct io_uring_cmd *cmd,
struct block_device *bdev,
uint64_t start, uint64_t len, bool nowait)
@@ -841,6 +902,9 @@ int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
switch (cmd_op) {
case BLOCK_URING_CMD_DISCARD:
return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait);
+ case BLOCK_URING_CMD_WRITE_ZEROES:
+ return blkdev_cmd_write_zeroes(cmd, bdev, start, len,
+ bic->nowait);
}
return -EINVAL;
}
@@ -209,6 +209,7 @@ struct fsxattr {
*/
#define BLOCK_URING_CMD_DISCARD _IO(0x12,137)
+#define BLOCK_URING_CMD_WRITE_ZEROES _IO(0x12,138)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
Add another io_uring cmd for block layer implementing asynchronous write zeroes. It reuses helpers we've added for async discards, and inherits the code structure as well as all considerations in regards to page cache races. Suggested-by: Conrad Meyer <conradmeyer@meta.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- block/ioctl.c | 64 +++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/fs.h | 1 + 2 files changed, 65 insertions(+)