@@ -1044,20 +1044,20 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
/*
* Run queued task_work, returning the number of entries processed in *count.
* If more entries than max_entries are available, stop processing once this
- * is reached and return the rest of the list.
+ * is reached.
*/
-struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node,
- unsigned int *count,
- unsigned int max_entries)
+void io_handle_tw_list(struct io_wq_work_list *list, unsigned int *count,
+ unsigned int max_entries)
{
struct io_ring_ctx *ctx = NULL;
struct io_tw_state ts = { };
do {
- struct io_wq_work_node *next = node->next;
+ struct io_wq_work_node *node = list->first;
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
+ list->first = node->next;
if (req->ctx != ctx) {
ctx_flush_and_put(ctx, &ts);
ctx = req->ctx;
@@ -1067,17 +1067,15 @@ struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node,
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
req, &ts);
- node = next;
(*count)++;
if (unlikely(need_resched())) {
ctx_flush_and_put(ctx, &ts);
ctx = NULL;
cond_resched();
}
- } while (node && *count < max_entries);
+ } while (list->first && *count < max_entries);
ctx_flush_and_put(ctx, &ts);
- return node;
}
static __cold void __io_fallback_schedule(struct io_ring_ctx *ctx,
@@ -1137,41 +1135,39 @@ static void io_fallback_tw(struct io_uring_task *tctx, bool sync)
__io_fallback_tw(&tctx->task_list, &tctx->task_lock, sync);
}
-struct io_wq_work_node *__tctx_task_work_run(struct io_uring_task *tctx,
- unsigned int max_entries,
- unsigned int *count)
+void __tctx_task_work_run(struct io_uring_task *tctx,
+ struct io_wq_work_list *list,
+ unsigned int max_entries, unsigned int *count)
{
- struct io_wq_work_node *node;
-
if (unlikely(current->flags & PF_EXITING)) {
io_fallback_tw(tctx, true);
- return NULL;
+ return;
}
if (!READ_ONCE(tctx->task_list.first))
- return NULL;
+ return;
spin_lock_irq(&tctx->task_lock);
- node = tctx->task_list.first;
+ *list = tctx->task_list;
INIT_WQ_LIST(&tctx->task_list);
spin_unlock_irq(&tctx->task_lock);
- if (node)
- node = io_handle_tw_list(node, count, max_entries);
+ if (!wq_list_empty(list))
+ io_handle_tw_list(list, count, max_entries);
/* relaxed read is enough as only the task itself sets ->in_cancel */
if (unlikely(atomic_read(&tctx->in_cancel)))
io_uring_drop_tctx_refs(current);
trace_io_uring_task_work_run(tctx, *count);
- return node;
}
unsigned int tctx_task_work_run(struct io_uring_task *tctx)
{
+ struct io_wq_work_list list;
unsigned int count = 0;
- __tctx_task_work_run(tctx, UINT_MAX, &count);
+ __tctx_task_work_run(tctx, &list, UINT_MAX, &count);
return count;
}
@@ -91,10 +91,11 @@ void io_req_task_queue(struct io_kiocb *req);
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
-struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node,
- unsigned int *count, unsigned int max_entries);
-struct io_wq_work_node *__tctx_task_work_run(struct io_uring_task *tctx,
- unsigned int max_entries, unsigned int *count);
+void io_handle_tw_list(struct io_wq_work_list *list, unsigned int *count,
+ unsigned int max_entries);
+void __tctx_task_work_run(struct io_uring_task *tctx,
+ struct io_wq_work_list *list, unsigned int max_entries,
+ unsigned int *count);
unsigned int tctx_task_work_run(struct io_uring_task *tctx);
void tctx_task_work(struct callback_head *cb);
__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
@@ -221,29 +221,29 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
* than we were asked to process. Newly queued task_work isn't run until the
* retry list has been fully processed.
*/
-static unsigned int io_sq_tw(struct io_wq_work_node **retry_list, int max_entries)
+static unsigned int io_sq_tw(struct io_wq_work_list *retry_list, int max_entries)
{
struct io_uring_task *tctx = current->io_uring;
unsigned int count = 0;
- if (*retry_list) {
- *retry_list = io_handle_tw_list(*retry_list, &count, max_entries);
+ if (!wq_list_empty(retry_list)) {
+ io_handle_tw_list(retry_list, &count, max_entries);
if (count >= max_entries)
goto out;
max_entries -= count;
}
- *retry_list = __tctx_task_work_run(tctx, max_entries, &count);
+ __tctx_task_work_run(tctx, retry_list, max_entries, &count);
out:
if (task_work_pending(current))
task_work_run();
return count;
}
-static bool io_sq_tw_pending(struct io_wq_work_node *retry_list)
+static bool io_sq_tw_pending(struct io_wq_work_list *retry_list)
{
struct io_uring_task *tctx = current->io_uring;
- return retry_list || READ_ONCE(tctx->task_list.first);
+ return !wq_list_empty(retry_list) || !wq_list_empty(&tctx->task_list);
}
static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
@@ -259,7 +259,7 @@ static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
static int io_sq_thread(void *data)
{
- struct io_wq_work_node *retry_list = NULL;
+ struct io_wq_work_list retry_list;
struct io_sq_data *sqd = data;
struct io_ring_ctx *ctx;
struct rusage start;
@@ -292,6 +292,7 @@ static int io_sq_thread(void *data)
audit_uring_entry(IORING_OP_NOP);
audit_uring_exit(true, 0);
+ INIT_WQ_LIST(&retry_list);
mutex_lock(&sqd->lock);
while (1) {
bool cap_entries, sqt_spin = false;
@@ -332,7 +333,8 @@ static int io_sq_thread(void *data)
}
prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
- if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) {
+ if (!io_sqd_events_pending(sqd) &&
+ !io_sq_tw_pending(&retry_list)) {
bool needs_sched = true;
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@@ -371,7 +373,7 @@ static int io_sq_thread(void *data)
timeout = jiffies + sqd->sq_thread_idle;
}
- if (retry_list)
+ if (!wq_list_empty(&retry_list))
io_sq_tw(&retry_list, UINT_MAX);
io_uring_cancel_generic(true, sqd);
The normal task_work logic doesn't really need it, as it always runs all of the pending work. But for SQPOLL, it can now pass in its retry_list which simplifies the tracking of split up task_work running. This avoids passing io_wq_work_node around. Rather than pass in a list, SQPOLL could re-add the leftover items to the generic task_work list. But that requires re-locking the task_lock and using task_list for that, whereas having a separate retry list allows for skipping those steps. The downside is that now two lists need checking, but that's now it was before as well. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- io_uring/io_uring.c | 36 ++++++++++++++++-------------------- io_uring/io_uring.h | 9 +++++---- io_uring/sqpoll.c | 20 +++++++++++--------- 3 files changed, 32 insertions(+), 33 deletions(-)