@@ -716,6 +716,14 @@ struct io_sr_msg {
unsigned int flags;
};
+struct io_sendzc {
+ struct file *file;
+ void __user *buf;
+ size_t len;
+ u16 slot_idx;
+ int msg_flags;
+};
+
struct io_open {
struct file *file;
int dfd;
@@ -1044,6 +1052,7 @@ struct io_kiocb {
struct io_socket sock;
struct io_nop nop;
struct io_uring_cmd uring_cmd;
+ struct io_sendzc msgzc;
};
u8 opcode;
@@ -1384,6 +1393,13 @@ static const struct io_op_def io_op_defs[] = {
.needs_async_setup = 1,
.async_size = uring_cmd_pdu_size(1),
},
+ [IORING_OP_SENDZC] = {
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ .pollout = 1,
+ .audit_skip = 1,
+ .ioprio = 1,
+ },
};
/* requests with any of those set should undergo io_disarm_next() */
@@ -1525,6 +1541,8 @@ const char *io_uring_get_opcode(u8 opcode)
return "SOCKET";
case IORING_OP_URING_CMD:
return "URING_CMD";
+ case IORING_OP_SENDZC:
+ return "URING_SENDZC";
case IORING_OP_LAST:
return "INVALID";
}
@@ -2920,7 +2938,6 @@ static struct io_notif *io_alloc_notif(struct io_ring_ctx *ctx,
return notif;
}
-__attribute__((unused))
static inline struct io_notif *io_get_notif(struct io_ring_ctx *ctx,
struct io_notif_slot *slot)
{
@@ -2929,7 +2946,6 @@ static inline struct io_notif *io_get_notif(struct io_ring_ctx *ctx,
return slot->notif;
}
-__attribute__((unused))
static inline struct io_notif_slot *io_get_notif_slot(struct io_ring_ctx *ctx,
int idx)
__must_hold(&ctx->uring_lock)
@@ -6546,6 +6562,83 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
return 0;
}
+static int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_sendzc *zc = &req->msgzc;
+
+ if (READ_ONCE(sqe->ioprio) || READ_ONCE(sqe->addr2) || READ_ONCE(sqe->__pad2[0]))
+ return -EINVAL;
+
+ zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ zc->len = READ_ONCE(sqe->len);
+ zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
+ zc->slot_idx = READ_ONCE(sqe->notification_idx);
+ if (zc->msg_flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+#ifdef CONFIG_COMPAT
+ if (req->ctx->compat)
+ zc->msg_flags |= MSG_CMSG_COMPAT;
+#endif
+ return 0;
+}
+
+static int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_sendzc *zc = &req->msgzc;
+ struct io_notif_slot *notif_slot;
+ struct io_notif *notif;
+ struct msghdr msg;
+ struct iovec iov;
+ struct socket *sock;
+ unsigned msg_flags;
+ int ret, min_ret = 0;
+
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ return -EAGAIN;
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ notif_slot = io_get_notif_slot(ctx, zc->slot_idx);
+ if (!notif_slot)
+ return -EINVAL;
+ notif = io_get_notif(ctx, notif_slot);
+ if (!notif)
+ return -ENOMEM;
+
+ msg.msg_name = NULL;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_namelen = 0;
+ msg.msg_managed_data = 0;
+
+ ret = import_single_range(WRITE, zc->buf, zc->len, &iov, &msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+
+ msg_flags = zc->msg_flags | MSG_ZEROCOPY;
+ if (issue_flags & IO_URING_F_NONBLOCK)
+ msg_flags |= MSG_DONTWAIT;
+ if (msg_flags & MSG_WAITALL)
+ min_ret = iov_iter_count(&msg.msg_iter);
+
+ msg.msg_flags = msg_flags;
+ msg.msg_ubuf = ¬if->uarg;
+ ret = sock_sendmsg(sock, &msg);
+
+ if (unlikely(ret < min_ret)) {
+ if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ req_set_fail(req);
+ }
+
+ __io_req_complete(req, issue_flags, ret, 0);
+ return 0;
+}
+
static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
@@ -7064,6 +7157,7 @@ IO_NETOP_PREP_ASYNC(connect);
IO_NETOP_PREP(accept);
IO_NETOP_PREP(socket);
IO_NETOP_PREP(shutdown);
+IO_NETOP_PREP(sendzc);
IO_NETOP_FN(send);
IO_NETOP_FN(recv);
#endif /* CONFIG_NET */
@@ -8389,6 +8483,8 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
case IORING_OP_SENDMSG:
case IORING_OP_SEND:
return io_sendmsg_prep(req, sqe);
+ case IORING_OP_SENDZC:
+ return io_sendzc_prep(req, sqe);
case IORING_OP_RECVMSG:
case IORING_OP_RECV:
return io_recvmsg_prep(req, sqe);
@@ -8689,6 +8785,9 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
case IORING_OP_SEND:
ret = io_send(req, issue_flags);
break;
+ case IORING_OP_SENDZC:
+ ret = io_sendzc(req, issue_flags);
+ break;
case IORING_OP_RECVMSG:
ret = io_recvmsg(req, issue_flags);
break;
@@ -61,6 +61,10 @@ struct io_uring_sqe {
union {
__s32 splice_fd_in;
__u32 file_index;
+ struct {
+ __u16 notification_idx;
+ __u16 __pad;
+ } __attribute__((packed));
};
union {
struct {
@@ -190,6 +194,7 @@ enum io_uring_op {
IORING_OP_GETXATTR,
IORING_OP_SOCKET,
IORING_OP_URING_CMD,
+ IORING_OP_SENDZC,
/* this goes last, obviously */
IORING_OP_LAST,
Add a new io_uring opcode IORING_OP_SENDZC. The main distinction from IORING_OP_SEND is that the user should specify a notification slot index in sqe::notification_idx and the buffers are safe to reuse only when the used notification is flushed and completes. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- fs/io_uring.c | 103 +++++++++++++++++++++++++++++++++- include/uapi/linux/io_uring.h | 5 ++ 2 files changed, 106 insertions(+), 2 deletions(-)