diff mbox series

[4/5] io_uring: consider ring dead once the ref is marked dying

Message ID 20240604191314.454554-5-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Wait on cancelations at release time | expand

Commit Message

Jens Axboe June 4, 2024, 7:01 p.m. UTC
Don't gate this on the task exiting flag. It's generally not a good idea
to gate it on the task PF_EXITING flag anyway. Once the ring is starting
to go through ring teardown, the ref is marked as dying. Use that as
our fallback/cancel mechanism.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 io_uring/io_uring.c | 9 +++++++--
 io_uring/io_uring.h | 3 ++-
 2 files changed, 9 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 841a5dd6ba89..5a4699170136 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -528,7 +528,11 @@  static void io_queue_iowq(struct io_kiocb *req)
 	 * procedure rather than attempt to run this request (or create a new
 	 * worker for it).
 	 */
-	if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
+	WARN_ON_ONCE(!io_ring_ref_is_dying(req->ctx) &&
+		     !same_thread_group(req->task, current));
+
+	if (!same_thread_group(req->task, current) ||
+	    io_ring_ref_is_dying(req->ctx))
 		req->work.flags |= IO_WQ_WORK_CANCEL;
 
 	trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
@@ -1196,7 +1200,8 @@  static void io_req_normal_work_add(struct io_kiocb *req)
 		return;
 	}
 
-	if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
+	if (!io_ring_ref_is_dying(ctx) &&
+	    !task_work_add(req->task, &tctx->task_work, ctx->notify_method))
 		return;
 
 	io_fallback_tw(tctx, false);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index cd43924eed04..55eac07d5fe0 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -11,6 +11,7 @@ 
 #include "io-wq.h"
 #include "slist.h"
 #include "filetable.h"
+#include "refs.h"
 
 #ifndef CREATE_TRACE_POINTS
 #include <trace/events/io_uring.h>
@@ -122,7 +123,7 @@  static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
 		 * Not from an SQE, as those cannot be submitted, but via
 		 * updating tagged resources.
 		 */
-		if (ctx->submitter_task->flags & PF_EXITING)
+		if (io_ring_ref_is_dying(ctx))
 			lockdep_assert(current_work());
 		else
 			lockdep_assert(current == ctx->submitter_task);