@@ -555,7 +555,8 @@ static void io_queue_iowq(struct io_kiocb *req)
* procedure rather than attempt to run this request (or create a new
* worker for it).
*/
- if (WARN_ON_ONCE(!same_thread_group(tctx->task, current)))
+ if (WARN_ON_ONCE(!same_thread_group(tctx->task, current) ||
+ percpu_ref_is_dying(&req->ctx->refs)))
atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
@@ -1246,7 +1247,8 @@ static void io_req_normal_work_add(struct io_kiocb *req)
return;
}
- if (likely(!task_work_add(tctx->task, &tctx->task_work, ctx->notify_method)))
+ if (!percpu_ref_is_dying(&ctx->refs) &&
+ !task_work_add(tctx->task, &tctx->task_work, ctx->notify_method))
return;
io_fallback_tw(tctx, false);
For queueing work to io-wq or adding normal task_work, io_uring will cancel the work items if the task is going away. If the ring is starting to go through teardown, the ref is marked as dying. Use that as well for the fallback/cancel mechanism. For deferred task_work, this is done out-of-line as part of the exit work handling. Hence it doesn't need any extra checks in the hot path. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- io_uring/io_uring.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)