@@ -1161,9 +1161,8 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
}
}
-int io_run_local_work(struct io_ring_ctx *ctx)
+int __io_run_local_work(struct io_ring_ctx *ctx, bool locked)
{
- bool locked;
struct llist_node *node;
struct llist_node fake;
struct llist_node *current_final = NULL;
@@ -1178,8 +1177,6 @@ int io_run_local_work(struct io_ring_ctx *ctx)
return -EEXIST;
}
- locked = mutex_trylock(&ctx->uring_lock);
-
node = io_llist_xchg(&ctx->work_llist, &fake);
ret = 0;
again:
@@ -1204,12 +1201,24 @@ int io_run_local_work(struct io_ring_ctx *ctx)
goto again;
}
- if (locked) {
+ if (locked)
io_submit_flush_completions(ctx);
- mutex_unlock(&ctx->uring_lock);
- }
trace_io_uring_local_work_run(ctx, ret, loops);
return ret;
+
+}
+
+int io_run_local_work(struct io_ring_ctx *ctx)
+{
+ bool locked;
+ int ret;
+
+ locked = mutex_trylock(&ctx->uring_lock);
+ ret = __io_run_local_work(ctx, locked);
+ if (locked)
+ mutex_unlock(&ctx->uring_lock);
+
+ return ret;
}
static void io_req_tw_post(struct io_kiocb *req, bool *locked)
@@ -27,6 +27,7 @@ enum {
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
bool io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
+int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
int io_run_local_work(struct io_ring_ctx *ctx);
void io_req_complete_failed(struct io_kiocb *req, s32 res);
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
We have a few spots that drop the mutex just to run local task_work, which immediately tries to grab it again. Add a helper that just passes in whether we're locked already. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- io_uring/io_uring.c | 23 ++++++++++++++++------- io_uring/io_uring.h | 1 + 2 files changed, 17 insertions(+), 7 deletions(-)