diff mbox series

[for-next,12/12] io_uring: poll_add retarget_rsrc support

Message ID 20221031134126.82928-13-dylany@meta.com (mailing list archive)
State New
Headers show
Series io_uring: retarget rsrc nodes periodically | expand

Commit Message

Dylan Yudaken Oct. 31, 2022, 1:41 p.m. UTC
Add can_retarget_rsrc handler for poll.

Note that the copy of fd is stashed in the middle of the struct io_poll as
there is a hole there, and this is the only way to ensure that the
structure does not grow beyond the size of struct io_cmd_data.

Signed-off-by: Dylan Yudaken <dylany@meta.com>
---
 io_uring/opdef.c |  1 +
 io_uring/poll.c  | 12 ++++++++++++
 io_uring/poll.h  |  2 ++
 3 files changed, 15 insertions(+)
diff mbox series

Patch

diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 5159b3abc2b2..952ea8ff5032 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -133,6 +133,7 @@  const struct io_op_def io_op_defs[] = {
 		.name			= "POLL_ADD",
 		.prep			= io_poll_add_prep,
 		.issue			= io_poll_add,
+		.can_retarget_rsrc	= io_poll_can_retarget_rsrc,
 	},
 	[IORING_OP_POLL_REMOVE] = {
 		.audit_skip		= 1,
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 0d9f49c575e0..fde8060b9399 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -863,6 +863,7 @@  int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 		return -EINVAL;
 
 	poll->events = io_poll_parse_events(sqe, flags);
+	poll->fd = req->cqe.fd;
 	return 0;
 }
 
@@ -963,3 +964,14 @@  void io_apoll_cache_free(struct io_cache_entry *entry)
 {
 	kfree(container_of(entry, struct async_poll, cache));
 }
+
+bool io_poll_can_retarget_rsrc(struct io_kiocb *req)
+{
+	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
+
+	if (req->flags & REQ_F_FIXED_FILE &&
+	    io_file_peek_fixed(req, poll->fd) != req->file)
+		return false;
+
+	return true;
+}
diff --git a/io_uring/poll.h b/io_uring/poll.h
index 5f3bae50fc81..dcc4b06bcea1 100644
--- a/io_uring/poll.h
+++ b/io_uring/poll.h
@@ -12,6 +12,7 @@  struct io_poll {
 	struct file			*file;
 	struct wait_queue_head		*head;
 	__poll_t			events;
+	int				fd; /* only used by poll_add */
 	struct wait_queue_entry		wait;
 };
 
@@ -37,3 +38,4 @@  bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
 			bool cancel_all);
 
 void io_apoll_cache_free(struct io_cache_entry *entry);
+bool io_poll_can_retarget_rsrc(struct io_kiocb *req);