diff mbox series

[2/5] io_uring: defer iowq cqe overflow via task_work

Message ID 9046410ac27e18f2baa6f7cdb363ec921cbc3b79.1742829388.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series reissue fix and various cleanups | expand

Commit Message

Pavel Begunkov March 24, 2025, 3:32 p.m. UTC
Don't handle CQE overflows in io_req_complete_post() and defer it to
flush_completions. It cuts some duplication, and I also want to limit
the number of places directly overflowing completions.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index e6c462948273..1fcfe62cecd9 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -892,6 +892,7 @@  bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
 static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
+	bool completed = true;
 
 	/*
 	 * All execution paths but io-wq use the deferred completions by
@@ -905,18 +906,20 @@  static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
 	 * the submitter task context, IOPOLL protects with uring_lock.
 	 */
 	if (ctx->lockless_cq || (req->flags & REQ_F_REISSUE)) {
+defer_complete:
 		req->io_task_work.func = io_req_task_complete;
 		io_req_task_work_add(req);
 		return;
 	}
 
 	io_cq_lock(ctx);
-	if (!(req->flags & REQ_F_CQE_SKIP)) {
-		if (!io_fill_cqe_req(ctx, req))
-			io_req_cqe_overflow(req);
-	}
+	if (!(req->flags & REQ_F_CQE_SKIP))
+		completed = io_fill_cqe_req(ctx, req);
 	io_cq_unlock_post(ctx);
 
+	if (!completed)
+		goto defer_complete;
+
 	/*
 	 * We don't free the request here because we know it's called from
 	 * io-wq only, which holds a reference, so it cannot be the last put.