diff mbox series

[v2,2/8] io_uring: optimie local tw add ctx pinning

Message ID cbdfcb6b232627f30e9e50ef91f13c4f05910247.1680782017.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series optimise resheduling due to deferred tw | expand

Commit Message

Pavel Begunkov April 6, 2023, 1:20 p.m. UTC
We currently pin the ctx for io_req_local_work_add() with
percpu_ref_get/put, which imply two rcu_read_lock/unlock pairs and some
extra overhead on top in the fast path. Replace it with a pure rcu read
and let io_ring_exit_work() synchronise against it.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 29a0516ee5ce..fb7215b543cd 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1332,9 +1332,9 @@  void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
 	struct io_ring_ctx *ctx = req->ctx;
 
 	if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
-		percpu_ref_get(&ctx->refs);
+		rcu_read_lock();
 		io_req_local_work_add(req);
-		percpu_ref_put(&ctx->refs);
+		rcu_read_unlock();
 		return;
 	}
 
@@ -3052,6 +3052,10 @@  static __cold void io_ring_exit_work(struct work_struct *work)
 	spin_lock(&ctx->completion_lock);
 	spin_unlock(&ctx->completion_lock);
 
+	/* pairs with RCU read section in io_req_local_work_add() */
+	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+		synchronize_rcu();
+
 	io_ring_ctx_free(ctx);
 }