diff mbox series

[for-next,4/7] io_uring: do not always run task work at the start of io_uring_enter

Message ID 20220815130911.988014-5-dylany@fb.com (mailing list archive)
State New
Headers show
Series io_uring: defer task work to when it is needed | expand

Commit Message

Dylan Yudaken Aug. 15, 2022, 1:09 p.m. UTC
It is normally better to wait for task work until after submissions. This
will allow greater batching if either work arrives in the meanwhile, or if
the submissions cause task work to be queued up.

For SQPOLL this also no longer runs task work, but this is handled inside
the SQPOLL loop anyway.

For IOPOLL io_iopoll_check will run task work anyway

Signed-off-by: Dylan Yudaken <dylany@fb.com>
---
 io_uring/io_uring.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

Comments

Pavel Begunkov Aug. 15, 2022, 1:50 p.m. UTC | #1
On 8/15/22 14:09, Dylan Yudaken wrote:
> It is normally better to wait for task work until after submissions. This
> will allow greater batching if either work arrives in the meanwhile, or if
> the submissions cause task work to be queued up.
> 
> For SQPOLL this also no longer runs task work, but this is handled inside
> the SQPOLL loop anyway.
> 
> For IOPOLL io_iopoll_check will run task work anyway

It's here to free resources (e.g. io_kiocb so can be reused in the
submission) and so, but we don't care much. Running them after
submission doesn't make much difference as either we go to
cq_wait, which will run it for us, or exit and again they'll be
executed.

In short, instead of moving we can just kill it.

> Signed-off-by: Dylan Yudaken <dylany@fb.com>
> ---
>   io_uring/io_uring.c | 6 ++++--
>   1 file changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 8cc4b28b1725..3b08369c3c60 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -2990,8 +2990,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
>   	struct fd f;
>   	long ret;
>   
> -	io_run_task_work();
> -
>   	if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
>   			       IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
>   			       IORING_ENTER_REGISTERED_RING)))
> @@ -3060,7 +3058,11 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
>   		if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
>   			goto iopoll_locked;
>   		mutex_unlock(&ctx->uring_lock);
> +		io_run_task_work();
> +	} else {
> +		io_run_task_work();
>   	}
> +
>   	if (flags & IORING_ENTER_GETEVENTS) {
>   		int ret2;
>   		if (ctx->syscall_iopoll) {
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 8cc4b28b1725..3b08369c3c60 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2990,8 +2990,6 @@  SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 	struct fd f;
 	long ret;
 
-	io_run_task_work();
-
 	if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
 			       IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
 			       IORING_ENTER_REGISTERED_RING)))
@@ -3060,7 +3058,11 @@  SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 		if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
 			goto iopoll_locked;
 		mutex_unlock(&ctx->uring_lock);
+		io_run_task_work();
+	} else {
+		io_run_task_work();
 	}
+
 	if (flags & IORING_ENTER_GETEVENTS) {
 		int ret2;
 		if (ctx->syscall_iopoll) {