diff mbox series

[9/9] io_uring/epoll: add multishot support for IORING_OP_EPOLL_WAIT

Message ID 20250203163114.124077-10-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series io_uring epoll wait support | expand

Commit Message

Jens Axboe Feb. 3, 2025, 4:23 p.m. UTC
As with other multishot requests, submitting a multishot epoll wait
request will keep it re-armed post the initial trigger. This allows
multiple epoll wait completions per request submitted, every time
events are available. If more completions are expected for this
epoll wait request, then IORING_CQE_F_MORE will be set in the posted
cqe->flags.

For multishot, the request remains on the epoll callback waitqueue
head. This means that epoll doesn't need to juggle the ep->lock
writelock (and disable/enable IRQs) for each invocation of the
reaping loop. That should translate into nice efficiency gains.

Use by setting IORING_EPOLL_WAIT_MULTISHOT in the sqe->epoll_flags
member.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 include/uapi/linux/io_uring.h |  6 ++++++
 io_uring/epoll.c              | 40 ++++++++++++++++++++++++++---------
 2 files changed, 36 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index a559e1e1544a..93f504b6d4ec 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -73,6 +73,7 @@  struct io_uring_sqe {
 		__u32		futex_flags;
 		__u32		install_fd_flags;
 		__u32		nop_flags;
+		__u32		epoll_flags;
 	};
 	__u64	user_data;	/* data to be passed back at completion time */
 	/* pack this to avoid bogus arm OABI complaints */
@@ -405,6 +406,11 @@  enum io_uring_op {
 #define IORING_ACCEPT_DONTWAIT	(1U << 1)
 #define IORING_ACCEPT_POLL_FIRST	(1U << 2)
 
+/*
+ * epoll_wait flags, stored in sqe->epoll_flags
+ */
+#define IORING_EPOLL_WAIT_MULTISHOT	(1U << 0)
+
 /*
  * IORING_OP_MSG_RING command types, stored in sqe->addr
  */
diff --git a/io_uring/epoll.c b/io_uring/epoll.c
index 2a9c679516c8..730f4b729f5b 100644
--- a/io_uring/epoll.c
+++ b/io_uring/epoll.c
@@ -24,6 +24,7 @@  struct io_epoll {
 struct io_epoll_wait {
 	struct file			*file;
 	int				maxevents;
+	int				flags;
 	struct epoll_event __user	*events;
 	struct wait_queue_entry		wait;
 };
@@ -145,12 +146,15 @@  static void io_epoll_retry(struct io_kiocb *req, struct io_tw_state *ts)
 	io_req_task_submit(req, ts);
 }
 
-static int io_epoll_execute(struct io_kiocb *req)
+static int io_epoll_execute(struct io_kiocb *req, __poll_t mask)
 {
 	struct io_epoll_wait *iew = io_kiocb_to_cmd(req, struct io_epoll_wait);
 
 	if (io_poll_get_ownership(req)) {
-		list_del_init_careful(&iew->wait.entry);
+		if (mask & EPOLL_URING_WAKE)
+			req->flags &= ~REQ_F_APOLL_MULTISHOT;
+		if (!(req->flags & REQ_F_APOLL_MULTISHOT))
+			list_del_init_careful(&iew->wait.entry);
 		req->io_task_work.func = io_epoll_retry;
 		io_req_task_work_add(req);
 		return 1;
@@ -159,13 +163,13 @@  static int io_epoll_execute(struct io_kiocb *req)
 	return 0;
 }
 
-static __cold int io_epoll_pollfree_wake(struct io_kiocb *req)
+static __cold int io_epoll_pollfree_wake(struct io_kiocb *req, __poll_t mask)
 {
 	struct io_epoll_wait *iew = io_kiocb_to_cmd(req, struct io_epoll_wait);
 
 	io_poll_mark_cancelled(req);
 	list_del_init_careful(&iew->wait.entry);
-	io_epoll_execute(req);
+	io_epoll_execute(req, mask);
 	return 1;
 }
 
@@ -176,18 +180,23 @@  static int io_epoll_wait_fn(struct wait_queue_entry *wait, unsigned mode,
 	__poll_t mask = key_to_poll(key);
 
 	if (unlikely(mask & POLLFREE))
-		return io_epoll_pollfree_wake(req);
+		return io_epoll_pollfree_wake(req, mask);
 
-	return io_epoll_execute(req);
+	return io_epoll_execute(req, mask);
 }
 
 int io_epoll_wait_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	struct io_epoll_wait *iew = io_kiocb_to_cmd(req, struct io_epoll_wait);
 
-	if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+	if (sqe->off || sqe->buf_index || sqe->splice_fd_in)
 		return -EINVAL;
 
+	iew->flags = READ_ONCE(sqe->epoll_flags);
+	if (iew->flags & ~IORING_EPOLL_WAIT_MULTISHOT)
+		return -EINVAL;
+	else if (iew->flags & IORING_EPOLL_WAIT_MULTISHOT)
+		req->flags |= REQ_F_APOLL_MULTISHOT;
 	iew->maxevents = READ_ONCE(sqe->len);
 	iew->events = u64_to_user_ptr(READ_ONCE(sqe->addr));
 
@@ -195,6 +204,7 @@  int io_epoll_wait_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	iew->wait.private = req;
 	iew->wait.func = io_epoll_wait_fn;
 	INIT_LIST_HEAD(&iew->wait.entry);
+	INIT_HLIST_NODE(&req->hash_node);
 	atomic_set(&req->poll_refs, 0);
 	return 0;
 }
@@ -205,9 +215,11 @@  int io_epoll_wait(struct io_kiocb *req, unsigned int issue_flags)
 	struct io_ring_ctx *ctx = req->ctx;
 	int ret;
 
-	io_ring_submit_lock(ctx, issue_flags);
-	hlist_add_head(&req->hash_node, &ctx->epoll_list);
-	io_ring_submit_unlock(ctx, issue_flags);
+	if (hlist_unhashed(&req->hash_node)) {
+		io_ring_submit_lock(ctx, issue_flags);
+		hlist_add_head(&req->hash_node, &ctx->epoll_list);
+		io_ring_submit_unlock(ctx, issue_flags);
+	}
 
 	/*
 	 * Timeout is fake here, it doesn't indicate any kind of sleep time.
@@ -219,9 +231,17 @@  int io_epoll_wait(struct io_kiocb *req, unsigned int issue_flags)
 		return IOU_ISSUE_SKIP_COMPLETE;
 	else if (ret < 0)
 		req_set_fail(req);
+
+	if (ret >= 0 && req->flags & REQ_F_APOLL_MULTISHOT &&
+	    io_req_post_cqe(req, ret, IORING_CQE_F_MORE))
+		return IOU_ISSUE_SKIP_COMPLETE;
+
 	io_ring_submit_lock(ctx, issue_flags);
 	hlist_del_init(&req->hash_node);
 	io_ring_submit_unlock(ctx, issue_flags);
 	io_req_set_res(req, ret, 0);
+
+	if (issue_flags & IO_URING_F_MULTISHOT)
+		return IOU_STOP_MULTISHOT;
 	return IOU_OK;
 }