@@ -272,6 +272,13 @@ enum io_uring_op {
*/
#define IORING_ACCEPT_MULTISHOT (1U << 0)
+/*
+ * IORING_OP_SENDZC flags
+ */
+enum {
+ IORING_SENDZC_FIXED_BUF = (1U << 0),
+};
+
/*
* IO completion data structure (Completion Queue Entry)
*/
@@ -14,6 +14,7 @@
#include "kbuf.h"
#include "net.h"
#include "notif.h"
+#include "rsrc.h"
#if defined(CONFIG_NET)
struct io_shutdown {
@@ -65,6 +66,7 @@ struct io_sendzc {
size_t len;
u16 slot_idx;
int msg_flags;
+ unsigned zc_flags;
int addr_len;
void __user *addr;
};
@@ -782,11 +784,14 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}
+#define IO_SENDZC_VALID_FLAGS IORING_SENDZC_FIXED_BUF
+
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sendzc *zc = io_kiocb_to_cmd(req);
+ struct io_ring_ctx *ctx = req->ctx;
- if (READ_ONCE(sqe->ioprio) || READ_ONCE(sqe->__pad2[0]))
+ if (READ_ONCE(sqe->__pad2[0]))
return -EINVAL;
zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
@@ -799,6 +804,20 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
zc->addr_len = READ_ONCE(sqe->addr_len);
+ zc->zc_flags = READ_ONCE(sqe->ioprio);
+ if (zc->zc_flags & ~IO_SENDZC_VALID_FLAGS)
+ return -EINVAL;
+
+ if (zc->zc_flags & IORING_SENDZC_FIXED_BUF) {
+ unsigned idx = READ_ONCE(sqe->buf_index);
+
+ if (unlikely(idx >= ctx->nr_user_bufs))
+ return -EFAULT;
+ idx = array_index_nospec(idx, ctx->nr_user_bufs);
+ req->imu = READ_ONCE(ctx->user_bufs[idx]);
+ io_req_set_rsrc_node(req, ctx, 0);
+ }
+
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
zc->msg_flags |= MSG_CMSG_COMPAT;
@@ -836,12 +855,21 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
- msg.msg_managed_data = 0;
+ msg.msg_managed_data = 1;
- ret = import_single_range(WRITE, zc->buf, zc->len, &iov, &msg.msg_iter);
- if (unlikely(ret))
- return ret;
- mm_account_pinned_pages(¬if->uarg.mmp, zc->len);
+ if (zc->zc_flags & IORING_SENDZC_FIXED_BUF) {
+ ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
+ (u64)zc->buf, zc->len);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ msg.msg_managed_data = 0;
+ ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
+ &msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+ mm_account_pinned_pages(¬if->uarg.mmp, zc->len);
+ }
if (zc->addr) {
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &address);
Allow zerocopy sends to use fixed buffers. There is an optimisation for this case, the network layer don't need to reference the pages, see SKBFL_MANAGED_FRAG_REFS, so io_uring have to ensure validity of fixed buffers until the notifier is released. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- include/uapi/linux/io_uring.h | 7 ++++++ io_uring/net.c | 40 +++++++++++++++++++++++++++++------ 2 files changed, 41 insertions(+), 6 deletions(-)