@@ -1095,6 +1095,20 @@ void tctx_task_work(struct callback_head *cb)
trace_io_uring_task_work_run(tctx, count, loops);
}
+static __cold void io_fallback_tw(struct io_uring_task *tctx)
+{
+ struct llist_node *node = llist_del_all(&tctx->task_list);
+ struct io_kiocb *req;
+
+ while (node) {
+ req = container_of(node, struct io_kiocb, io_task_work.node);
+ node = node->next;
+ if (llist_add(&req->io_task_work.node,
+ &req->ctx->fallback_llist))
+ schedule_delayed_work(&req->ctx->fallback_work, 1);
+ }
+}
+
static void io_req_local_work_add(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1121,7 +1135,6 @@ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
{
struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx;
- struct llist_node *node;
if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
io_req_local_work_add(req);
@@ -1138,15 +1151,7 @@ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
return;
- node = llist_del_all(&tctx->task_list);
-
- while (node) {
- req = container_of(node, struct io_kiocb, io_task_work.node);
- node = node->next;
- if (llist_add(&req->io_task_work.node,
- &req->ctx->fallback_llist))
- schedule_delayed_work(&req->ctx->fallback_work, 1);
- }
+ io_fallback_tw(tctx);
}
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
When the target process is dying and so task_work_add() is not allowed we push all task_work item to the fallback workqueue. Move the part responsible for moving tw items out of __io_req_task_work_add() into a separate function. Makes it a bit cleaner and gives the compiler a bit of extra info. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/io_uring.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-)