diff mbox series

[for-next,v2,04/13] io_uring: lock on remove in io_apoll_task_func

Message ID 20221123110614.3297343-5-dylany@meta.com (mailing list archive)
State New
Headers show
Series io_uring: batch multishot completions | expand

Commit Message

Dylan Yudaken Nov. 23, 2022, 11:06 a.m. UTC
This allows using io_req_defer_failed rather than post in all cases. The
alternative would be to branch based on *locked and decide whether to post
or defer the completion.
However all of the non-error paths in io_poll_check_events that do not do
not return IOU_POLL_NO_ACTION end up locking anyway, and locking here does
reduce the logic complexity, so  it seems reasonable to lock always and
then also defer the completion on failure always.

This also means that only io_req_defer_failed needs exporting from
io_uring.h

Signed-off-by: Dylan Yudaken <dylany@meta.com>
---
 io_uring/io_uring.c | 4 ++--
 io_uring/io_uring.h | 2 +-
 io_uring/poll.c     | 5 +++--
 3 files changed, 6 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 1e23adb7b0c5..5a620001df2e 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -874,14 +874,14 @@  static inline void io_req_prep_failed(struct io_kiocb *req, s32 res)
 		def->fail(req);
 }
 
-static void io_req_defer_failed(struct io_kiocb *req, s32 res)
+void io_req_defer_failed(struct io_kiocb *req, s32 res)
 	__must_hold(&ctx->uring_lock)
 {
 	io_req_prep_failed(req, res);
 	io_req_complete_defer(req);
 }
 
-void io_req_post_failed(struct io_kiocb *req, s32 res)
+static void io_req_post_failed(struct io_kiocb *req, s32 res)
 {
 	io_req_prep_failed(req, res);
 	io_req_complete_post(req);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 4d2d0926a42b..ffab0d2d33c0 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -30,7 +30,7 @@  bool io_req_cqe_overflow(struct io_kiocb *req);
 int io_run_task_work_sig(struct io_ring_ctx *ctx);
 int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
 int io_run_local_work(struct io_ring_ctx *ctx);
-void io_req_post_failed(struct io_kiocb *req, s32 res);
+void io_req_defer_failed(struct io_kiocb *req, s32 res);
 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
 void io_req_complete_post(struct io_kiocb *req);
 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
diff --git a/io_uring/poll.c b/io_uring/poll.c
index ceb8255b54eb..4bd43e6f5b72 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -308,15 +308,16 @@  static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
 	if (ret == IOU_POLL_NO_ACTION)
 		return;
 
+	io_tw_lock(req->ctx, locked);
 	io_poll_remove_entries(req);
 	io_poll_tw_hash_eject(req, locked);
 
 	if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
-		io_req_complete_post(req);
+		io_req_task_complete(req, locked);
 	else if (ret == IOU_POLL_DONE)
 		io_req_task_submit(req, locked);
 	else
-		io_req_post_failed(req, ret);
+		io_req_defer_failed(req, ret);
 }
 
 static void __io_poll_execute(struct io_kiocb *req, int mask)