@@ -133,6 +133,7 @@ const struct io_op_def io_op_defs[] = {
.name = "POLL_ADD",
.prep = io_poll_add_prep,
.issue = io_poll_add,
+ .can_retarget_rsrc = io_poll_can_retarget_rsrc,
},
[IORING_OP_POLL_REMOVE] = {
.audit_skip = 1,
@@ -863,6 +863,7 @@ int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
poll->events = io_poll_parse_events(sqe, flags);
+ poll->fd = req->cqe.fd;
return 0;
}
@@ -963,3 +964,14 @@ void io_apoll_cache_free(struct io_cache_entry *entry)
{
kfree(container_of(entry, struct async_poll, cache));
}
+
+bool io_poll_can_retarget_rsrc(struct io_kiocb *req)
+{
+ struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
+
+ if (req->flags & REQ_F_FIXED_FILE &&
+ io_file_peek_fixed(req, poll->fd) != req->file)
+ return false;
+
+ return true;
+}
@@ -12,6 +12,7 @@ struct io_poll {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
+ int fd; /* only used by poll_add */
struct wait_queue_entry wait;
};
@@ -37,3 +38,4 @@ bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
bool cancel_all);
void io_apoll_cache_free(struct io_cache_entry *entry);
+bool io_poll_can_retarget_rsrc(struct io_kiocb *req);
Add can_retarget_rsrc handler for poll. Note that the copy of fd is stashed in the middle of the struct io_poll as there is a hole there, and this is the only way to ensure that the structure does not grow beyond the size of struct io_cmd_data. Signed-off-by: Dylan Yudaken <dylany@meta.com> --- io_uring/opdef.c | 1 + io_uring/poll.c | 12 ++++++++++++ io_uring/poll.h | 2 ++ 3 files changed, 15 insertions(+)