diff mbox series

[5.19] io_uring: fix req->apoll_events

Message ID 0aef40399ba75b1a4d2c2e85e6e8fd93c02fc6e4.1655814213.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series [5.19] io_uring: fix req->apoll_events | expand

Commit Message

Pavel Begunkov June 21, 2022, 12:25 p.m. UTC
apoll_events should be set once in the beginning of poll arming just as
poll->events and not change after. However, currently io_uring resets it
on each __io_poll_execute() for no clear reason. There is also a place
in __io_arm_poll_handler() where we add EPOLLONESHOT to downgrade a
multishot, but forget to do the same thing with ->apoll_events, which is
buggy.

Fixes: 81459350d581e ("io_uring: cache req->apoll->events in req->cflags")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

Comments

Pavel Begunkov June 21, 2022, 12:28 p.m. UTC | #1
On 6/21/22 13:25, Pavel Begunkov wrote:
> apoll_events should be set once in the beginning of poll arming just as
> poll->events and not change after. However, currently io_uring resets it
> on each __io_poll_execute() for no clear reason. There is also a place
> in __io_arm_poll_handler() where we add EPOLLONESHOT to downgrade a
> multishot, but forget to do the same thing with ->apoll_events, which is
> buggy.
> 
> Fixes: 81459350d581e ("io_uring: cache req->apoll->events in req->cflags")
> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
> ---
>   fs/io_uring.c | 12 ++++++++----
>   1 file changed, 8 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 87c65a358678..ebda9a565fc0 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -6954,7 +6954,8 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
>   		io_req_complete_failed(req, ret);
>   }
>   
> -static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
> +static void __io_poll_execute(struct io_kiocb *req, int mask,
> +			      __poll_t __maybe_unused events)

Killing @events would add extra rebase conflicts, so left it here and will
send a clean up for 5.20.

>   {
>   	req->cqe.res = mask;
>   	/*
> @@ -6963,7 +6964,6 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
>   	 * CPU. We want to avoid pulling in req->apoll->events for that
>   	 * case.
>   	 */
> -	req->apoll_events = events;
>   	if (req->opcode == IORING_OP_POLL_ADD)
>   		req->io_task_work.func = io_poll_task_func;
>   	else
> @@ -7114,6 +7114,8 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
>   	io_init_poll_iocb(poll, mask, io_poll_wake);
>   	poll->file = req->file;
>   
> +	req->apoll_events = poll->events;
> +
>   	ipt->pt._key = mask;
>   	ipt->req = req;
>   	ipt->error = 0;
> @@ -7144,8 +7146,10 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
>   
>   	if (mask) {
>   		/* can't multishot if failed, just queue the event we've got */
> -		if (unlikely(ipt->error || !ipt->nr_entries))
> +		if (unlikely(ipt->error || !ipt->nr_entries)) {
>   			poll->events |= EPOLLONESHOT;
> +			req->apoll_events |= EPOLLONESHOT;
> +		}
>   		__io_poll_execute(req, mask, poll->events);
>   		return 0;
>   	}
> @@ -7392,7 +7396,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
>   		return -EINVAL;
>   
>   	io_req_set_refcount(req);
> -	req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
> +	poll->events = io_poll_parse_events(sqe, flags);
>   	return 0;
>   }
>
Hao Xu June 21, 2022, 1:28 p.m. UTC | #2
On 6/21/22 20:25, Pavel Begunkov wrote:
> apoll_events should be set once in the beginning of poll arming just as
> poll->events and not change after. However, currently io_uring resets it
> on each __io_poll_execute() for no clear reason. There is also a place
> in __io_arm_poll_handler() where we add EPOLLONESHOT to downgrade a
> multishot, but forget to do the same thing with ->apoll_events, which is
> buggy.
> 
> Fixes: 81459350d581e ("io_uring: cache req->apoll->events in req->cflags")
> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
> ---
>   fs/io_uring.c | 12 ++++++++----
>   1 file changed, 8 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 87c65a358678..ebda9a565fc0 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -6954,7 +6954,8 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
>   		io_req_complete_failed(req, ret);
>   }
>   
> -static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
> +static void __io_poll_execute(struct io_kiocb *req, int mask,
> +			      __poll_t __maybe_unused events)
>   {
>   	req->cqe.res = mask;
>   	/*
> @@ -6963,7 +6964,6 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
>   	 * CPU. We want to avoid pulling in req->apoll->events for that
>   	 * case.
>   	 */
> -	req->apoll_events = events;
>   	if (req->opcode == IORING_OP_POLL_ADD)
>   		req->io_task_work.func = io_poll_task_func;
>   	else
> @@ -7114,6 +7114,8 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
>   	io_init_poll_iocb(poll, mask, io_poll_wake);
>   	poll->file = req->file;
>   
> +	req->apoll_events = poll->events;
> +
>   	ipt->pt._key = mask;
>   	ipt->req = req;
>   	ipt->error = 0;
> @@ -7144,8 +7146,10 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
>   
>   	if (mask) {
>   		/* can't multishot if failed, just queue the event we've got */
> -		if (unlikely(ipt->error || !ipt->nr_entries))
> +		if (unlikely(ipt->error || !ipt->nr_entries)) {
>   			poll->events |= EPOLLONESHOT;
> +			req->apoll_events |= EPOLLONESHOT;
> +		}
>   		__io_poll_execute(req, mask, poll->events);
>   		return 0;
>   	}
> @@ -7392,7 +7396,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
>   		return -EINVAL;
>   
>   	io_req_set_refcount(req);
> -	req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
> +	poll->events = io_poll_parse_events(sqe, flags);
>   	return 0;
>   }
>   

Make sense,
Reviewed-by: Hao Xu <howeyxu@tencent.com>
Jens Axboe June 21, 2022, 1:49 p.m. UTC | #3
On Tue, 21 Jun 2022 13:25:06 +0100, Pavel Begunkov wrote:
> apoll_events should be set once in the beginning of poll arming just as
> poll->events and not change after. However, currently io_uring resets it
> on each __io_poll_execute() for no clear reason. There is also a place
> in __io_arm_poll_handler() where we add EPOLLONESHOT to downgrade a
> multishot, but forget to do the same thing with ->apoll_events, which is
> buggy.
> 
> [...]

Applied, thanks!

[1/1] io_uring: fix req->apoll_events
      commit: aacf2f9f382c91df73f33317e28a4c34c8038986

Best regards,
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 87c65a358678..ebda9a565fc0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6954,7 +6954,8 @@  static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
 		io_req_complete_failed(req, ret);
 }
 
-static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
+static void __io_poll_execute(struct io_kiocb *req, int mask,
+			      __poll_t __maybe_unused events)
 {
 	req->cqe.res = mask;
 	/*
@@ -6963,7 +6964,6 @@  static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
 	 * CPU. We want to avoid pulling in req->apoll->events for that
 	 * case.
 	 */
-	req->apoll_events = events;
 	if (req->opcode == IORING_OP_POLL_ADD)
 		req->io_task_work.func = io_poll_task_func;
 	else
@@ -7114,6 +7114,8 @@  static int __io_arm_poll_handler(struct io_kiocb *req,
 	io_init_poll_iocb(poll, mask, io_poll_wake);
 	poll->file = req->file;
 
+	req->apoll_events = poll->events;
+
 	ipt->pt._key = mask;
 	ipt->req = req;
 	ipt->error = 0;
@@ -7144,8 +7146,10 @@  static int __io_arm_poll_handler(struct io_kiocb *req,
 
 	if (mask) {
 		/* can't multishot if failed, just queue the event we've got */
-		if (unlikely(ipt->error || !ipt->nr_entries))
+		if (unlikely(ipt->error || !ipt->nr_entries)) {
 			poll->events |= EPOLLONESHOT;
+			req->apoll_events |= EPOLLONESHOT;
+		}
 		__io_poll_execute(req, mask, poll->events);
 		return 0;
 	}
@@ -7392,7 +7396,7 @@  static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
 		return -EINVAL;
 
 	io_req_set_refcount(req);
-	req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
+	poll->events = io_poll_parse_events(sqe, flags);
 	return 0;
 }