@@ -1271,6 +1271,9 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
ub_cmd->result);
+ if (issue_flags & IO_URING_F_FUSED)
+ return -EOPNOTSUPP;
+
if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
goto out;
@@ -2169,6 +2172,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
struct ublk_device *ub = NULL;
int ret = -EINVAL;
+ if (issue_flags & IO_URING_F_FUSED)
+ return -EOPNOTSUPP;
+
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
@@ -30,6 +30,7 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/security.h>
+#include <linux/io_uring.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
@@ -482,6 +483,9 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
{
+ if (issue_flags & IO_URING_F_FUSED)
+ return -EOPNOTSUPP;
+
return 0;
}
@@ -773,6 +773,9 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
struct nvme_ns, cdev);
+ if (issue_flags & IO_URING_F_FUSED)
+ return -EOPNOTSUPP;
+
return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
}
@@ -878,6 +881,9 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
struct nvme_ns *ns = nvme_find_path(head);
int ret = -EINVAL;
+ if (issue_flags & IO_URING_F_FUSED)
+ return -EOPNOTSUPP;
+
if (ns)
ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
srcu_read_unlock(&head->srcu, srcu_idx);
@@ -915,6 +921,9 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
struct nvme_ctrl *ctrl = ioucmd->file->private_data;
int ret;
+ if (issue_flags & IO_URING_F_FUSED)
+ return -EOPNOTSUPP;
+
/* IOPOLL not supported yet */
if (issue_flags & IO_URING_F_IOPOLL)
return -EOPNOTSUPP;
@@ -20,6 +20,13 @@ enum io_uring_cmd_flags {
IO_URING_F_SQE128 = (1 << 8),
IO_URING_F_CQE32 = (1 << 9),
IO_URING_F_IOPOLL = (1 << 10),
+
+ /* for FUSED_CMD only */
+ IO_URING_F_FUSED_WRITE = (1 << 11), /* slave writes to buffer */
+ IO_URING_F_FUSED_READ = (1 << 12), /* slave reads from buffer */
+ /* driver incapable of FUSED_CMD should fail cmd when seeing F_FUSED */
+ IO_URING_F_FUSED = IO_URING_F_FUSED_WRITE |
+ IO_URING_F_FUSED_READ,
};
struct io_uring_cmd {
Add flag IO_URING_F_FUSED and prepare for supporting IO_URING_OP_FUSED_CMD, which is still one type of IO_URING_OP_URING_CMD, so it is reasonable to reuse ->uring_cmd() for handling IO_URING_F_FUSED_CMD. Just IO_URING_F_FUSED_CMD will carry one 64byte SQE as payload which is handled by one slave request. The master uring command will provide kernel buffer to the slave request. Mark all existed drivers to not support IO_URING_F_FUSED_CMD, given it depends if driver is capable of handling the slave request. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- drivers/block/ublk_drv.c | 6 ++++++ drivers/char/mem.c | 4 ++++ drivers/nvme/host/ioctl.c | 9 +++++++++ include/linux/io_uring.h | 7 +++++++ 4 files changed, 26 insertions(+)