diff mbox series

[for-next,04/10] io_uring: lock on remove in io_apoll_task_func

Message ID 20221121100353.371865-5-dylany@meta.com (mailing list archive)
State New
Headers show
Series io_uring: batch multishot completions | expand

Commit Message

Dylan Yudaken Nov. 21, 2022, 10:03 a.m. UTC
This allows using io_req_defer_failed rather than post in all cases. The
alternative would be to branch based on *locked and decide whether to post
or defer the completion.
However all of the non-error paths in io_poll_check_events that do not do
not return IOU_POLL_NO_ACTION end up locking anyway, and locking here does
reduce the logic complexity, so  it seems reasonable to lock always and
then also defer the completion on failure always.

This also means that only io_req_defer_failed needs exporting from
io_uring.h

Signed-off-by: Dylan Yudaken <dylany@meta.com>
---
 io_uring/io_uring.c | 4 ++--
 io_uring/io_uring.h | 2 +-
 io_uring/poll.c     | 5 +++--
 3 files changed, 6 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index d9bd18e3a603..03946f46dadc 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -878,14 +878,14 @@  static inline void io_req_prep_failed(struct io_kiocb *req, s32 res)
 		def->fail(req);
 }
 
-static void io_req_defer_failed(struct io_kiocb *req, s32 res)
+void io_req_defer_failed(struct io_kiocb *req, s32 res)
 	__must_hold(&ctx->uring_lock)
 {
 	io_req_prep_failed(req, res);
 	io_req_complete_defer(req);
 }
 
-void io_req_post_failed(struct io_kiocb *req, s32 res)
+static void io_req_post_failed(struct io_kiocb *req, s32 res)
 {
 	io_req_prep_failed(req, res);
 	io_req_complete_post(req);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index ee3139947fcc..1daf236513cc 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -29,7 +29,7 @@  bool io_req_cqe_overflow(struct io_kiocb *req);
 int io_run_task_work_sig(struct io_ring_ctx *ctx);
 int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
 int io_run_local_work(struct io_ring_ctx *ctx);
-void io_req_post_failed(struct io_kiocb *req, s32 res);
+void io_req_defer_failed(struct io_kiocb *req, s32 res);
 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
 void io_req_complete_post(struct io_kiocb *req);
 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
diff --git a/io_uring/poll.c b/io_uring/poll.c
index e0a4faa010b3..2b77d18a67a7 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -296,15 +296,16 @@  static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
 	if (ret == IOU_POLL_NO_ACTION)
 		return;
 
+	io_tw_lock(req->ctx, locked);
 	io_poll_remove_entries(req);
 	io_poll_tw_hash_eject(req, locked);
 
 	if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
-		io_req_complete_post(req);
+		io_req_task_complete(req, locked);
 	else if (ret == IOU_POLL_DONE)
 		io_req_task_submit(req, locked);
 	else
-		io_req_post_failed(req, ret);
+		io_req_defer_failed(req, ret);
 }
 
 static void __io_poll_execute(struct io_kiocb *req, int mask)