diff mbox series

[v3,1/3] io_uring: allocate io_kiocb upfront

Message ID b787787e16af14f03df2ee1ac0d57b81b367cb4c.1573079844.git.asml.silence@gmail.com (mailing list archive)
State New, archived
Headers show
Series Inline sqe_submit | expand

Commit Message

Pavel Begunkov Nov. 6, 2019, 10:41 p.m. UTC
Let io_submit_sqes() to allocate io_kiocb before fetching an sqe.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 27 ++++++++++++++-------------
 1 file changed, 14 insertions(+), 13 deletions(-)

Comments

Pavel Begunkov Nov. 6, 2019, 10:47 p.m. UTC | #1
On 07/11/2019 01:41, Pavel Begunkov wrote:
> Let io_submit_sqes() to allocate io_kiocb before fetching an sqe.
> 
> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
> ---
>  fs/io_uring.c | 27 ++++++++++++++-------------
>  1 file changed, 14 insertions(+), 13 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 6524898831e0..ceb616dbe710 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -2551,30 +2551,23 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
>  
>  #define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
>  
> -static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
> -			  struct io_submit_state *state, struct io_kiocb **link)
> +static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
> +			  struct sqe_submit *s, struct io_submit_state *state,
> +			  struct io_kiocb **link)
>  {
>  	struct io_uring_sqe *sqe_copy;
> -	struct io_kiocb *req;
>  	int ret;
>  
>  	/* enforce forwards compatibility on users */
>  	if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
>  		ret = -EINVAL;
> -		goto err;
> -	}
> -
> -	req = io_get_req(ctx, state);
> -	if (unlikely(!req)) {
> -		ret = -EAGAIN;
> -		goto err;
> +		goto err_req;
>  	}
>  
>  	ret = io_req_set_file(ctx, s, state, req);
>  	if (unlikely(ret)) {
>  err_req:
>  		io_free_req(req, NULL);
> -err:
>  		io_cqring_add_event(ctx, s->sqe->user_data, ret);
>  		return;
>  	}
> @@ -2710,9 +2703,15 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>  
>  	for (i = 0; i < nr; i++) {
>  		struct sqe_submit s;
> +		struct io_kiocb *req;
>  
> -		if (!io_get_sqring(ctx, &s))
> +		req = io_get_req(ctx, statep);
> +		if (unlikely(!req))
>  			break;
> +		if (!io_get_sqring(ctx, &s)) {
> +			__io_free_req(req);
> +			break;
> +		}
>  
>  		if (io_sqe_needs_user(s.sqe) && !*mm) {
>  			mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
> @@ -2740,7 +2739,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>  		s.in_async = async;
>  		s.needs_fixed_file = async;
>  		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, async);
> -		io_submit_sqe(ctx, &s, statep, &link);
> +		io_submit_sqe(ctx, req, &s, statep, &link);
>  		submitted++;
>  
>  		/*
> @@ -4009,6 +4008,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
>  		cur_mm = ctx->sqo_mm;
>  		submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
>  					   &cur_mm, false);
> +		if (!submitted)
> +			submitted = -EAGAIN;

Need to say, that left io_submit_sqes() errors of more priority than ones of
poll/wait.


>  		mutex_unlock(&ctx->uring_lock);
>  	}
>  	if (flags & IORING_ENTER_GETEVENTS) {
>
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6524898831e0..ceb616dbe710 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2551,30 +2551,23 @@  static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
 
 #define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
 
-static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
-			  struct io_submit_state *state, struct io_kiocb **link)
+static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+			  struct sqe_submit *s, struct io_submit_state *state,
+			  struct io_kiocb **link)
 {
 	struct io_uring_sqe *sqe_copy;
-	struct io_kiocb *req;
 	int ret;
 
 	/* enforce forwards compatibility on users */
 	if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
 		ret = -EINVAL;
-		goto err;
-	}
-
-	req = io_get_req(ctx, state);
-	if (unlikely(!req)) {
-		ret = -EAGAIN;
-		goto err;
+		goto err_req;
 	}
 
 	ret = io_req_set_file(ctx, s, state, req);
 	if (unlikely(ret)) {
 err_req:
 		io_free_req(req, NULL);
-err:
 		io_cqring_add_event(ctx, s->sqe->user_data, ret);
 		return;
 	}
@@ -2710,9 +2703,15 @@  static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 
 	for (i = 0; i < nr; i++) {
 		struct sqe_submit s;
+		struct io_kiocb *req;
 
-		if (!io_get_sqring(ctx, &s))
+		req = io_get_req(ctx, statep);
+		if (unlikely(!req))
 			break;
+		if (!io_get_sqring(ctx, &s)) {
+			__io_free_req(req);
+			break;
+		}
 
 		if (io_sqe_needs_user(s.sqe) && !*mm) {
 			mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
@@ -2740,7 +2739,7 @@  static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 		s.in_async = async;
 		s.needs_fixed_file = async;
 		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, async);
-		io_submit_sqe(ctx, &s, statep, &link);
+		io_submit_sqe(ctx, req, &s, statep, &link);
 		submitted++;
 
 		/*
@@ -4009,6 +4008,8 @@  SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 		cur_mm = ctx->sqo_mm;
 		submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
 					   &cur_mm, false);
+		if (!submitted)
+			submitted = -EAGAIN;
 		mutex_unlock(&ctx->uring_lock);
 	}
 	if (flags & IORING_ENTER_GETEVENTS) {