diff mbox series

io_uring/sqpoll: use the correct check for pending task_work

Message ID 36d7ef98-38d9-4f45-8bac-f0032e0df695@kernel.dk (mailing list archive)
State New
Headers show
Series io_uring/sqpoll: use the correct check for pending task_work | expand

Commit Message

Jens Axboe Feb. 14, 2024, 8:59 p.m. UTC
A previous commit moved to using just the private task_work list for
SQPOLL, but it neglected to update the check for whether we have
pending task_work. Normally this is fine as we'll attempt to run it
unconditionally, but if we race with going to sleep AND task_work
being added, then we certainly need the right check here.

Fixes: af5d68f8892f ("io_uring/sqpoll: manage task_work privately")
Signed-off-by: Jens Axboe <axboe@kernel.dk>

---
diff mbox series

Patch

diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index f3979cacda13..82672eaaee81 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -246,6 +246,13 @@  static unsigned int io_sq_tw(struct llist_node **retry_list, int max_entries)
 	return count;
 }
 
+static bool io_sq_tw_pending(struct llist_node *retry_list)
+{
+	struct io_uring_task *tctx = current->io_uring;
+
+	return retry_list || !llist_empty(&tctx->task_list);
+}
+
 static int io_sq_thread(void *data)
 {
 	struct llist_node *retry_list = NULL;
@@ -301,7 +308,7 @@  static int io_sq_thread(void *data)
 		}
 
 		prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
-		if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
+		if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) {
 			bool needs_sched = true;
 
 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {