diff mbox series

[next,v1,2/2] io_uring: limit local tw done

Message ID 20241120221452.3762588-3-dw@davidwei.uk (mailing list archive)
State New
Headers show
Series limit local tw done | expand

Commit Message

David Wei Nov. 20, 2024, 10:14 p.m. UTC
Instead of eagerly running all available local tw, limit the amount of
local tw done to the max of IO_LOCAL_TW_DEFAULT_MAX (20) or wait_nr. The
value of 20 is chosen as a reasonable heuristic to allow enough work
batching but also keep latency down.

Add a retry_llist that maintains a list of local tw that couldn't be
done in time. No synchronisation is needed since it is only modified
within the task context.

Signed-off-by: David Wei <dw@davidwei.uk>
---
 include/linux/io_uring_types.h |  1 +
 io_uring/io_uring.c            | 43 +++++++++++++++++++++++++---------
 io_uring/io_uring.h            |  2 +-
 3 files changed, 34 insertions(+), 12 deletions(-)

Comments

Pavel Begunkov Nov. 20, 2024, 11:56 p.m. UTC | #1
On 11/20/24 22:14, David Wei wrote:
> Instead of eagerly running all available local tw, limit the amount of
> local tw done to the max of IO_LOCAL_TW_DEFAULT_MAX (20) or wait_nr. The
> value of 20 is chosen as a reasonable heuristic to allow enough work
> batching but also keep latency down.
> 
> Add a retry_llist that maintains a list of local tw that couldn't be
> done in time. No synchronisation is needed since it is only modified
> within the task context.
> 
> Signed-off-by: David Wei <dw@davidwei.uk>
> ---
>   include/linux/io_uring_types.h |  1 +
>   io_uring/io_uring.c            | 43 +++++++++++++++++++++++++---------
>   io_uring/io_uring.h            |  2 +-
>   3 files changed, 34 insertions(+), 12 deletions(-)
> 
> diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
> index 593c10a02144..011860ade268 100644
> --- a/include/linux/io_uring_types.h
> +++ b/include/linux/io_uring_types.h
> @@ -336,6 +336,7 @@ struct io_ring_ctx {
>   	 */
>   	struct {
>   		struct llist_head	work_llist;
> +		struct llist_head	retry_llist;

Fwiw, probably doesn't matter, but it doesn't even need
to be atomic, it's queued and spliced while holding
->uring_lock, the pending check is also synchronised as
there is only one possible task doing that.

>   		unsigned long		check_cq;
>   		atomic_t		cq_wait_nr;
>   		atomic_t		cq_timeouts;
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 83bf041d2648..c3a7d0197636 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -121,6 +121,7 @@
...
>   static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
>   			       int min_events)
>   {
>   	struct llist_node *node;
>   	unsigned int loops = 0;
> -	int ret = 0;
> +	int ret, limit;
>   
>   	if (WARN_ON_ONCE(ctx->submitter_task != current))
>   		return -EEXIST;
>   	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
>   		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
> +	limit = max(IO_LOCAL_TW_DEFAULT_MAX, min_events);
>   again:
> +	ret = __io_run_local_work_loop(&ctx->retry_llist.first, ts, limit);
> +	if (ctx->retry_llist.first)
> +		goto retry_done;
> +
>   	/*
>   	 * llists are in reverse order, flip it back the right way before
>   	 * running the pending items.
>   	 */
>   	node = llist_reverse_order(llist_del_all(&ctx->work_llist));
> -	while (node) {
> -		struct llist_node *next = node->next;
> -		struct io_kiocb *req = container_of(node, struct io_kiocb,
> -						    io_task_work.node);
> -		INDIRECT_CALL_2(req->io_task_work.func,
> -				io_poll_task_func, io_req_rw_complete,
> -				req, ts);
> -		ret++;
> -		node = next;
> -	}
> +	ret = __io_run_local_work_loop(&node, ts, ret);

One thing that is not so nice is that now we have this handling and
checks in the hot path, and __io_run_local_work_loop() most likely
gets uninlined.

I wonder, can we just requeue it via task_work again? We can even
add a variant efficiently adding a list instead of a single entry,
i.e. local_task_work_add(head, tail, ...);

I'm also curious what's the use case you've got that is hitting
the problem?
David Wei Nov. 21, 2024, 12:52 a.m. UTC | #2
On 2024-11-20 15:56, Pavel Begunkov wrote:
> On 11/20/24 22:14, David Wei wrote:
>> Instead of eagerly running all available local tw, limit the amount of
>> local tw done to the max of IO_LOCAL_TW_DEFAULT_MAX (20) or wait_nr. The
>> value of 20 is chosen as a reasonable heuristic to allow enough work
>> batching but also keep latency down.
>>
>> Add a retry_llist that maintains a list of local tw that couldn't be
>> done in time. No synchronisation is needed since it is only modified
>> within the task context.
>>
>> Signed-off-by: David Wei <dw@davidwei.uk>
>> ---
>>   include/linux/io_uring_types.h |  1 +
>>   io_uring/io_uring.c            | 43 +++++++++++++++++++++++++---------
>>   io_uring/io_uring.h            |  2 +-
>>   3 files changed, 34 insertions(+), 12 deletions(-)
>>
>> diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
>> index 593c10a02144..011860ade268 100644
>> --- a/include/linux/io_uring_types.h
>> +++ b/include/linux/io_uring_types.h
>> @@ -336,6 +336,7 @@ struct io_ring_ctx {
>>        */
>>       struct {
>>           struct llist_head    work_llist;
>> +        struct llist_head    retry_llist;
> 
> Fwiw, probably doesn't matter, but it doesn't even need
> to be atomic, it's queued and spliced while holding
> ->uring_lock, the pending check is also synchronised as
> there is only one possible task doing that.

Yeah, it doesn't. Keeping it as a llist_head means being able to re-use
helpers that take llist_head or llist_node.

> 
>>           unsigned long        check_cq;
>>           atomic_t        cq_wait_nr;
>>           atomic_t        cq_timeouts;
>> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
>> index 83bf041d2648..c3a7d0197636 100644
>> --- a/io_uring/io_uring.c
>> +++ b/io_uring/io_uring.c
>> @@ -121,6 +121,7 @@
> ...
>>   static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
>>                      int min_events)
>>   {
>>       struct llist_node *node;
>>       unsigned int loops = 0;
>> -    int ret = 0;
>> +    int ret, limit;
>>         if (WARN_ON_ONCE(ctx->submitter_task != current))
>>           return -EEXIST;
>>       if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
>>           atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
>> +    limit = max(IO_LOCAL_TW_DEFAULT_MAX, min_events);
>>   again:
>> +    ret = __io_run_local_work_loop(&ctx->retry_llist.first, ts, limit);
>> +    if (ctx->retry_llist.first)
>> +        goto retry_done;
>> +
>>       /*
>>        * llists are in reverse order, flip it back the right way before
>>        * running the pending items.
>>        */
>>       node = llist_reverse_order(llist_del_all(&ctx->work_llist));
>> -    while (node) {
>> -        struct llist_node *next = node->next;
>> -        struct io_kiocb *req = container_of(node, struct io_kiocb,
>> -                            io_task_work.node);
>> -        INDIRECT_CALL_2(req->io_task_work.func,
>> -                io_poll_task_func, io_req_rw_complete,
>> -                req, ts);
>> -        ret++;
>> -        node = next;
>> -    }
>> +    ret = __io_run_local_work_loop(&node, ts, ret);
> 
> One thing that is not so nice is that now we have this handling and
> checks in the hot path, and __io_run_local_work_loop() most likely
> gets uninlined.
> 
> I wonder, can we just requeue it via task_work again? We can even
> add a variant efficiently adding a list instead of a single entry,
> i.e. local_task_work_add(head, tail, ...);

That was an early idea, but it means re-reversing the list and then
atomically adding each node back to work_llist concurrently with e.g.
io_req_local_work_add().

Using a separate retry_llist means we don't need to concurrently add to
either retry_llist or work_llist.

> 
> I'm also curious what's the use case you've got that is hitting
> the problem?
> 

There is a Memcache-like workload that has load shedding based on the
time spent doing work. With epoll, the work of reading sockets and
processing a request is done by user, which can decide after some amount
of time to drop the remaining work if it takes too long. With io_uring,
the work of reading sockets is done eagerly inside of task work. If
there is a burst of work, then so much time is spent in task work
reading from sockets that, by the time control returns to user the
timeout has already elapsed.
Jens Axboe Nov. 21, 2024, 1:12 a.m. UTC | #3
On 11/20/24 4:56 PM, Pavel Begunkov wrote:
> On 11/20/24 22:14, David Wei wrote:
>> Instead of eagerly running all available local tw, limit the amount of
>> local tw done to the max of IO_LOCAL_TW_DEFAULT_MAX (20) or wait_nr. The
>> value of 20 is chosen as a reasonable heuristic to allow enough work
>> batching but also keep latency down.
>>
>> Add a retry_llist that maintains a list of local tw that couldn't be
>> done in time. No synchronisation is needed since it is only modified
>> within the task context.
>>
>> Signed-off-by: David Wei <dw@davidwei.uk>
>> ---
>>   include/linux/io_uring_types.h |  1 +
>>   io_uring/io_uring.c            | 43 +++++++++++++++++++++++++---------
>>   io_uring/io_uring.h            |  2 +-
>>   3 files changed, 34 insertions(+), 12 deletions(-)
>>
>> diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
>> index 593c10a02144..011860ade268 100644
>> --- a/include/linux/io_uring_types.h
>> +++ b/include/linux/io_uring_types.h
>> @@ -336,6 +336,7 @@ struct io_ring_ctx {
>>        */
>>       struct {
>>           struct llist_head    work_llist;
>> +        struct llist_head    retry_llist;
> 
> Fwiw, probably doesn't matter, but it doesn't even need
> to be atomic, it's queued and spliced while holding
> ->uring_lock, the pending check is also synchronised as
> there is only one possible task doing that.
> 
>>           unsigned long        check_cq;
>>           atomic_t        cq_wait_nr;
>>           atomic_t        cq_timeouts;
>> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
>> index 83bf041d2648..c3a7d0197636 100644
>> --- a/io_uring/io_uring.c
>> +++ b/io_uring/io_uring.c
>> @@ -121,6 +121,7 @@
> ...
>>   static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
>>                      int min_events)
>>   {
>>       struct llist_node *node;
>>       unsigned int loops = 0;
>> -    int ret = 0;
>> +    int ret, limit;
>>         if (WARN_ON_ONCE(ctx->submitter_task != current))
>>           return -EEXIST;
>>       if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
>>           atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
>> +    limit = max(IO_LOCAL_TW_DEFAULT_MAX, min_events);
>>   again:
>> +    ret = __io_run_local_work_loop(&ctx->retry_llist.first, ts, limit);
>> +    if (ctx->retry_llist.first)
>> +        goto retry_done;
>> +
>>       /*
>>        * llists are in reverse order, flip it back the right way before
>>        * running the pending items.
>>        */
>>       node = llist_reverse_order(llist_del_all(&ctx->work_llist));
>> -    while (node) {
>> -        struct llist_node *next = node->next;
>> -        struct io_kiocb *req = container_of(node, struct io_kiocb,
>> -                            io_task_work.node);
>> -        INDIRECT_CALL_2(req->io_task_work.func,
>> -                io_poll_task_func, io_req_rw_complete,
>> -                req, ts);
>> -        ret++;
>> -        node = next;
>> -    }
>> +    ret = __io_run_local_work_loop(&node, ts, ret);
> 
> One thing that is not so nice is that now we have this handling and
> checks in the hot path, and __io_run_local_work_loop() most likely
> gets uninlined.

I don't think that really matters, it's pretty light. The main overhead
in this function is not the call, it's reordering requests and touching
cachelines of the requests.

I think it's pretty light as-is and actually looks pretty good. It's
also similar to how sqpoll bites over longer task_work lines, and
arguably a mistake that we allow huge depths of this when we can avoid
it with deferred task_work.

> I wonder, can we just requeue it via task_work again? We can even
> add a variant efficiently adding a list instead of a single entry,
> i.e. local_task_work_add(head, tail, ...);

I think that can only work if we change work_llist to be a regular list
with regular locking. Otherwise it's a bit of a mess with the list being
reordered, and then you're spending extra cycles on potentially
reordering all the entries again.

> I'm also curious what's the use case you've got that is hitting
> the problem?

I'll let David answer that one, but some task_work can take a while to
run, eg if it's not just posting a completion.
diff mbox series

Patch

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 593c10a02144..011860ade268 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -336,6 +336,7 @@  struct io_ring_ctx {
 	 */
 	struct {
 		struct llist_head	work_llist;
+		struct llist_head	retry_llist;
 		unsigned long		check_cq;
 		atomic_t		cq_wait_nr;
 		atomic_t		cq_timeouts;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 83bf041d2648..c3a7d0197636 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -121,6 +121,7 @@ 
 
 #define IO_COMPL_BATCH			32
 #define IO_REQ_ALLOC_BATCH		8
+#define IO_LOCAL_TW_DEFAULT_MAX		20
 
 struct io_defer_entry {
 	struct list_head	list;
@@ -1255,6 +1256,8 @@  static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
 	struct llist_node *node = llist_del_all(&ctx->work_llist);
 
 	__io_fallback_tw(node, false);
+	node = llist_del_all(&ctx->retry_llist);
+	__io_fallback_tw(node, false);
 }
 
 static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
@@ -1269,37 +1272,55 @@  static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
 	return false;
 }
 
+static int __io_run_local_work_loop(struct llist_node **node,
+				    struct io_tw_state *ts,
+				    int events)
+{
+	while (*node) {
+		struct llist_node *next = (*node)->next;
+		struct io_kiocb *req = container_of(*node, struct io_kiocb,
+						    io_task_work.node);
+		INDIRECT_CALL_2(req->io_task_work.func,
+				io_poll_task_func, io_req_rw_complete,
+				req, ts);
+		*node = next;
+		if (--events <= 0)
+			break;
+	}
+
+	return events;
+}
+
 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
 			       int min_events)
 {
 	struct llist_node *node;
 	unsigned int loops = 0;
-	int ret = 0;
+	int ret, limit;
 
 	if (WARN_ON_ONCE(ctx->submitter_task != current))
 		return -EEXIST;
 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+	limit = max(IO_LOCAL_TW_DEFAULT_MAX, min_events);
 again:
+	ret = __io_run_local_work_loop(&ctx->retry_llist.first, ts, limit);
+	if (ctx->retry_llist.first)
+		goto retry_done;
+
 	/*
 	 * llists are in reverse order, flip it back the right way before
 	 * running the pending items.
 	 */
 	node = llist_reverse_order(llist_del_all(&ctx->work_llist));
-	while (node) {
-		struct llist_node *next = node->next;
-		struct io_kiocb *req = container_of(node, struct io_kiocb,
-						    io_task_work.node);
-		INDIRECT_CALL_2(req->io_task_work.func,
-				io_poll_task_func, io_req_rw_complete,
-				req, ts);
-		ret++;
-		node = next;
-	}
+	ret = __io_run_local_work_loop(&node, ts, ret);
+	ctx->retry_llist.first = node;
 	loops++;
 
+	ret = limit - ret;
 	if (io_run_local_work_continue(ctx, ret, min_events))
 		goto again;
+retry_done:
 	io_submit_flush_completions(ctx);
 	if (io_run_local_work_continue(ctx, ret, min_events))
 		goto again;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 69eb3b23a5a0..12abee607e4a 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -349,7 +349,7 @@  static inline int io_run_task_work(void)
 
 static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
 {
-	return !llist_empty(&ctx->work_llist);
+	return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
 }
 
 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)