@@ -16,6 +16,7 @@ enum io_uring_cmd_flags {
IO_URING_F_SQE128 = 4,
IO_URING_F_CQE32 = 8,
IO_URING_F_IOPOLL = 16,
+ IO_URING_F_FIXEDBUFS = 32,
};
struct io_uring_cmd {
@@ -28,7 +29,7 @@ struct io_uring_cmd {
void *cookie;
};
u32 cmd_op;
- u32 pad;
+ u32 flags;
u8 pdu[32]; /* available inline for free use */
};
@@ -56,6 +56,7 @@ struct io_uring_sqe {
__u32 hardlink_flags;
__u32 xattr_flags;
__u32 msg_ring_flags;
+ __u32 uring_cmd_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@@ -219,6 +220,14 @@ enum io_uring_op {
IORING_OP_LAST,
};
+/*
+ * sqe->uring_cmd_flags
+ * IORING_URING_CMD_FIXED use registered buffer; pass thig flag
+ * along with setting sqe->buf_index.
+ */
+#define IORING_URING_CMD_FIXED (1U << 0)
+
+
/*
* sqe->fsync_flags
*/
@@ -3,6 +3,7 @@
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/io_uring.h>
+#include <linux/nospec.h>
#include <uapi/linux/io_uring.h>
@@ -76,8 +77,21 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- if (sqe->rw_flags || sqe->__pad1)
+ if (sqe->__pad1)
return -EINVAL;
+
+ ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
+ req->buf_index = READ_ONCE(sqe->buf_index);
+ if (ioucmd->flags & IORING_URING_CMD_FIXED) {
+ struct io_ring_ctx *ctx = req->ctx;
+ u16 index;
+
+ if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+ return -EFAULT;
+ index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+ req->imu = ctx->user_bufs[index];
+ io_req_set_rsrc_node(req, ctx, 0);
+ }
ioucmd->cmd = sqe->cmd;
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
return 0;
@@ -102,6 +116,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
req->iopoll_completed = 0;
WRITE_ONCE(ioucmd->cookie, NULL);
}
+ if (ioucmd->flags & IORING_URING_CMD_FIXED)
+ issue_flags |= IO_URING_F_FIXEDBUFS;
if (req_has_async_data(req))
ioucmd->cmd = req->async_data;