@@ -18,6 +18,9 @@
#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
#include <linux/cdev.h>
+#include <linux/io_uring/cmd.h>
+#include <linux/types.h>
+#include <linux/uio.h>
#define PART_BITS 4
#define VQ_NAME_LEN 16
@@ -54,6 +57,20 @@ static struct class *vd_chr_class;
static struct workqueue_struct *virtblk_wq;
+struct virtblk_uring_cmd_pdu {
+ struct request *req;
+ struct bio *bio;
+ int status;
+};
+
+struct virtblk_command {
+ struct virtio_blk_outhdr out_hdr;
+
+ __u64 data;
+ __u32 data_len;
+ __u32 flag;
+};
+
struct virtio_blk_vq {
struct virtqueue *vq;
spinlock_t lock;
@@ -122,6 +139,16 @@ struct virtblk_req {
struct scatterlist sg[];
};
+static bool virtblk_is_write(struct virtblk_command *cmd)
+{
+ return cmd->out_hdr.type & VIRTIO_BLK_T_OUT;
+}
+
+static void __user *virtblk_to_user_ptr(uintptr_t ptrval)
+{
+ return (void __user *)ptrval;
+}
+
static inline blk_status_t virtblk_result(u8 status)
{
switch (status) {
@@ -259,9 +286,6 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
return BLK_STS_NOTSUPP;
- /* Set fields for all request types */
- vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
-
switch (req_op(req)) {
case REQ_OP_READ:
type = VIRTIO_BLK_T_IN;
@@ -309,9 +333,11 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
type = VIRTIO_BLK_T_ZONE_RESET_ALL;
break;
case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
/*
* Out header has already been prepared by the caller (virtblk_get_id()
- * or virtblk_submit_zone_report()), nothing to do here.
+ * virtblk_submit_zone_report() or io_uring passthrough cmd), nothing
+ * to do here.
*/
return 0;
default:
@@ -323,6 +349,7 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
vbr->in_hdr_len = in_hdr_len;
vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
type == VIRTIO_BLK_T_SECURE_ERASE) {
@@ -832,6 +859,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
vbr = blk_mq_rq_to_pdu(req);
vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
vbr->out_hdr.sector = 0;
err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
@@ -1250,6 +1278,197 @@ static const struct blk_mq_ops virtio_mq_ops = {
.poll = virtblk_poll,
};
+static inline struct virtblk_uring_cmd_pdu *virtblk_get_uring_cmd_pdu(
+ struct io_uring_cmd *ioucmd)
+{
+ return (struct virtblk_uring_cmd_pdu *)&ioucmd->pdu;
+}
+
+static void virtblk_uring_task_cb(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ struct virtblk_uring_cmd_pdu *pdu = virtblk_get_uring_cmd_pdu(ioucmd);
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(pdu->req);
+ u64 result = 0;
+
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+
+ /* currently result has no use, it should be zero as cqe->res */
+ io_uring_cmd_done(ioucmd, vbr->in_hdr.status, result, issue_flags);
+}
+
+static enum rq_end_io_ret virtblk_uring_cmd_end_io(struct request *req,
+ blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct virtblk_uring_cmd_pdu *pdu = virtblk_get_uring_cmd_pdu(ioucmd);
+
+ /*
+ * For iopoll, complete it directly. Note that using the uring_cmd
+ * helper for this is safe only because we check blk_rq_is_poll().
+ * As that returns false if we're NOT on a polled queue, then it's
+ * safe to use the polled completion helper.
+ *
+ * Otherwise, move the completion to task work.
+ */
+ if (blk_rq_is_poll(req)) {
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+ io_uring_cmd_iopoll_done(ioucmd, 0, pdu->status);
+ } else {
+ io_uring_cmd_do_in_task_lazy(ioucmd, virtblk_uring_task_cb);
+ }
+
+ return RQ_END_IO_FREE;
+}
+
+static struct virtblk_req *virtblk_req(struct request *req)
+{
+ return blk_mq_rq_to_pdu(req);
+}
+
+static enum req_op virtblk_req_op(struct virtblk_command *cmd)
+{
+ return virtblk_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
+}
+
+static struct request *virtblk_alloc_user_request(
+ struct request_queue *q, struct virtblk_command *cmd,
+ unsigned int rq_flags, blk_mq_req_flags_t blk_flags)
+{
+ struct request *req;
+
+ req = blk_mq_alloc_request(q, virtblk_req_op(cmd) | rq_flags, blk_flags);
+ if (IS_ERR(req))
+ return req;
+
+ req->rq_flags |= RQF_DONTPREP;
+ memcpy(&virtblk_req(req)->out_hdr, &cmd->out_hdr, sizeof(struct virtio_blk_outhdr));
+ return req;
+}
+
+static int virtblk_map_user_request(struct request *req, u64 ubuffer,
+ unsigned int bufflen, struct io_uring_cmd *ioucmd,
+ bool vec)
+{
+ struct request_queue *q = req->q;
+ struct virtio_blk *vblk = q->queuedata;
+ struct block_device *bdev = vblk ? vblk->disk->part0 : NULL;
+ struct bio *bio = NULL;
+ int ret;
+
+ if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ struct iov_iter iter;
+
+ /* fixedbufs is only for non-vectored io */
+ if (WARN_ON_ONCE(vec))
+ return -EINVAL;
+ ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+ rq_data_dir(req), &iter, ioucmd);
+ if (ret < 0)
+ goto out;
+ ret = blk_rq_map_user_iov(q, req, NULL,
+ &iter, GFP_KERNEL);
+ } else {
+ ret = blk_rq_map_user_io(req, NULL,
+ virtblk_to_user_ptr(ubuffer),
+ bufflen, GFP_KERNEL, vec, 0,
+ 0, rq_data_dir(req));
+ }
+ if (ret)
+ goto out;
+
+ bio = req->bio;
+ if (bdev)
+ bio_set_dev(bio, bdev);
+ return 0;
+
+out:
+ blk_mq_free_request(req);
+ return ret;
+}
+
+static int virtblk_uring_cmd_io(struct virtio_blk *vblk,
+ struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
+{
+ struct virtblk_uring_cmd_pdu *pdu = virtblk_get_uring_cmd_pdu(ioucmd);
+ const struct virtblk_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
+ struct request_queue *q = vblk->disk->queue;
+ struct virtblk_req *vbr;
+ struct virtblk_command d;
+ struct request *req;
+ unsigned int rq_flags = 0;
+ blk_mq_req_flags_t blk_flags = 0;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ d.out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, READ_ONCE(cmd->ioprio));
+ d.out_hdr.type = cpu_to_virtio32(vblk->vdev, READ_ONCE(cmd->type));
+ d.out_hdr.sector = cpu_to_virtio32(vblk->vdev, READ_ONCE(cmd->sector));
+ d.data = READ_ONCE(cmd->data);
+ d.data_len = READ_ONCE(cmd->data_len);
+
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ rq_flags = REQ_NOWAIT;
+ blk_flags = BLK_MQ_REQ_NOWAIT;
+ }
+ if (issue_flags & IO_URING_F_IOPOLL)
+ rq_flags |= REQ_POLLED;
+
+ req = virtblk_alloc_user_request(q, &d, rq_flags, blk_flags);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ vbr = virtblk_req(req);
+ vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
+ if (d.data && d.data_len) {
+ ret = virtblk_map_user_request(req, d.data, d.data_len, ioucmd, vec);
+ if (ret)
+ return ret;
+ }
+
+ /* to free bio on completion, as req->bio will be null at that time */
+ pdu->bio = req->bio;
+ pdu->req = req;
+ req->end_io_data = ioucmd;
+ req->end_io = virtblk_uring_cmd_end_io;
+ blk_execute_rq_nowait(req, false);
+ return -EIOCBQUEUED;
+}
+
+
+static int virtblk_uring_cmd(struct virtio_blk *vblk, struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct virtblk_uring_cmd_pdu) > sizeof(ioucmd->pdu));
+
+ switch (ioucmd->cmd_op) {
+ case VIRTBLK_URING_CMD_IO:
+ ret = virtblk_uring_cmd_io(vblk, ioucmd, issue_flags, false);
+ break;
+ case VIRTBLK_URING_CMD_IO_VEC:
+ ret = virtblk_uring_cmd_io(vblk, ioucmd, issue_flags, true);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static int virtblk_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ struct virtio_blk *vblk = container_of(file_inode(ioucmd->file)->i_cdev,
+ struct virtio_blk, cdev);
+
+ return virtblk_uring_cmd(vblk, ioucmd, issue_flags);
+}
+
static void virtblk_cdev_rel(struct device *dev)
{
ida_free(&vd_chr_minor_ida, MINOR(dev->devt));
@@ -1297,6 +1516,7 @@ static int virtblk_cdev_add(struct virtio_blk *vblk,
static const struct file_operations virtblk_chr_fops = {
.owner = THIS_MODULE,
+ .uring_cmd = virtblk_chr_uring_cmd,
};
static unsigned int virtblk_queue_depth;
@@ -313,6 +313,22 @@ struct virtio_scsi_inhdr {
};
#endif /* !VIRTIO_BLK_NO_LEGACY */
+struct virtblk_uring_cmd {
+ /* VIRTIO_BLK_T* */
+ __u32 type;
+ /* io priority. */
+ __u32 ioprio;
+ /* Sector (ie. 512 byte offset) */
+ __u64 sector;
+
+ __u64 data;
+ __u32 data_len;
+ __u32 flag;
+};
+
+#define VIRTBLK_URING_CMD_IO 1
+#define VIRTBLK_URING_CMD_IO_VEC 2
+
/* And this is the final byte of the write scatter-gather list. */
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
Add ->uring_cmd() support for virtio-blk chardev (/dev/vdXc0). According to virtio spec, in addition to passing 'hdr' info into kernel, we also need to pass vaddr & data length of the 'iov' requeired for the writev/readv op. Signed-off-by: Ferry Meng <mengferry@linux.alibaba.com> --- drivers/block/virtio_blk.c | 228 +++++++++++++++++++++++++++++++- include/uapi/linux/virtio_blk.h | 16 +++ 2 files changed, 240 insertions(+), 4 deletions(-)