diff mbox series

[v2,1/2] io_uring: Merge io_submit_sqes and io_ring_submit

Message ID 09fcce1a50f4d1a399b903e3669ba98ede408d9c.1572988512.git.asml.silence@gmail.com (mailing list archive)
State New, archived
Headers show
Series cleanup of submission path | expand

Commit Message

Pavel Begunkov Nov. 5, 2019, 9:22 p.m. UTC
io_submit_sqes() and io_ring_submit() are doing the same stuff with
a little difference. Deduplicate them.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 88 +++++++++++----------------------------------------
 1 file changed, 18 insertions(+), 70 deletions(-)

Comments

Bob Liu Nov. 6, 2019, 8:57 a.m. UTC | #1
On 11/6/19 5:22 AM, Pavel Begunkov wrote:
> io_submit_sqes() and io_ring_submit() are doing the same stuff with
> a little difference. Deduplicate them.
> 
> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
> ---
>  fs/io_uring.c | 88 +++++++++++----------------------------------------
>  1 file changed, 18 insertions(+), 70 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 7813bc7d5b61..ebe2a4edd644 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -2681,7 +2681,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
>  }
>  
>  static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
> -			  struct mm_struct **mm)
> +			  struct file *ring_file, int ring_fd,
> +			  struct mm_struct **mm, bool async)
>  {
>  	struct io_submit_state state, *statep = NULL;
>  	struct io_kiocb *link = NULL;
> @@ -2732,10 +2733,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>  		}
>  
>  out:
> +		s.ring_file = ring_file;
> +		s.ring_fd = ring_fd;
>  		s.has_user = *mm != NULL;
> -		s.in_async = true;
> -		s.needs_fixed_file = true;
> -		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, true);
> +		s.in_async = async;
> +		s.needs_fixed_file = async;
> +		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, async);
>  		io_submit_sqe(ctx, &s, statep, &link);
>  		submitted++;
>  	}
> @@ -2745,6 +2748,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>  	if (statep)
>  		io_submit_state_end(&state);
>  
> +	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
> +	io_commit_sqring(ctx);
> +

Then don't need io_commit_sqring() in io_sq_thread any more?

Anyway, looks good to me.
Reviewed-by: Bob Liu <bob.liu@oracle.com>

>  	return submitted;
>  }
>  
> @@ -2849,7 +2855,8 @@ static int io_sq_thread(void *data)
>  		}
>  
>  		to_submit = min(to_submit, ctx->sq_entries);
> -		inflight += io_submit_sqes(ctx, to_submit, &cur_mm);
> +		inflight += io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm,
> +					   true);
>  
>  		/* Commit SQ ring head once we've consumed all SQEs */
>  		io_commit_sqring(ctx);
> @@ -2866,69 +2873,6 @@ static int io_sq_thread(void *data)
>  	return 0;
>  }
>  
> -static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
> -			  struct file *ring_file, int ring_fd)
> -{
> -	struct io_submit_state state, *statep = NULL;
> -	struct io_kiocb *link = NULL;
> -	struct io_kiocb *shadow_req = NULL;
> -	bool prev_was_link = false;
> -	int i, submit = 0;
> -
> -	if (to_submit > IO_PLUG_THRESHOLD) {
> -		io_submit_state_start(&state, ctx, to_submit);
> -		statep = &state;
> -	}
> -
> -	for (i = 0; i < to_submit; i++) {
> -		struct sqe_submit s;
> -
> -		if (!io_get_sqring(ctx, &s))
> -			break;
> -
> -		/*
> -		 * If previous wasn't linked and we have a linked command,
> -		 * that's the end of the chain. Submit the previous link.
> -		 */
> -		if (!prev_was_link && link) {
> -			io_queue_link_head(ctx, link, &link->submit, shadow_req);
> -			link = NULL;
> -			shadow_req = NULL;
> -		}
> -		prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
> -
> -		if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
> -			if (!shadow_req) {
> -				shadow_req = io_get_req(ctx, NULL);
> -				if (unlikely(!shadow_req))
> -					goto out;
> -				shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
> -				refcount_dec(&shadow_req->refs);
> -			}
> -			shadow_req->sequence = s.sequence;
> -		}
> -
> -out:
> -		s.ring_file = ring_file;
> -		s.has_user = true;
> -		s.in_async = false;
> -		s.needs_fixed_file = false;
> -		s.ring_fd = ring_fd;
> -		submit++;
> -		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, false);
> -		io_submit_sqe(ctx, &s, statep, &link);
> -	}
> -
> -	if (link)
> -		io_queue_link_head(ctx, link, &link->submit, shadow_req);
> -	if (statep)
> -		io_submit_state_end(statep);
> -
> -	io_commit_sqring(ctx);
> -
> -	return submit;
> -}
> -
>  struct io_wait_queue {
>  	struct wait_queue_entry wq;
>  	struct io_ring_ctx *ctx;
> @@ -4049,10 +3993,14 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
>  			wake_up(&ctx->sqo_wait);
>  		submitted = to_submit;
>  	} else if (to_submit) {
> -		to_submit = min(to_submit, ctx->sq_entries);
> +		struct mm_struct *cur_mm;
>  
> +		to_submit = min(to_submit, ctx->sq_entries);
>  		mutex_lock(&ctx->uring_lock);
> -		submitted = io_ring_submit(ctx, to_submit, f.file, fd);
> +		/* already have mm, so io_submit_sqes() won't try to grab it */
> +		cur_mm = ctx->sqo_mm;
> +		submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
> +					   &cur_mm, false);
>  		mutex_unlock(&ctx->uring_lock);
>  	}
>  	if (flags & IORING_ENTER_GETEVENTS) {
>
Pavel Begunkov Nov. 6, 2019, 9:07 a.m. UTC | #2
On 11/6/2019 11:57 AM, Bob Liu wrote:
> On 11/6/19 5:22 AM, Pavel Begunkov wrote:
>> io_submit_sqes() and io_ring_submit() are doing the same stuff with
>> a little difference. Deduplicate them.
>>
>> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
>> ---
>>  fs/io_uring.c | 88 +++++++++++----------------------------------------
>>  1 file changed, 18 insertions(+), 70 deletions(-)
>>
>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>> index 7813bc7d5b61..ebe2a4edd644 100644
>> --- a/fs/io_uring.c
>> +++ b/fs/io_uring.c
>> @@ -2681,7 +2681,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
>>  }
>>  
>>  static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>> -			  struct mm_struct **mm)
>> +			  struct file *ring_file, int ring_fd,
>> +			  struct mm_struct **mm, bool async)
>>  {
>>  	struct io_submit_state state, *statep = NULL;
>>  	struct io_kiocb *link = NULL;
>> @@ -2732,10 +2733,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>>  		}
>>  
>>  out:
>> +		s.ring_file = ring_file;
>> +		s.ring_fd = ring_fd;
>>  		s.has_user = *mm != NULL;
>> -		s.in_async = true;
>> -		s.needs_fixed_file = true;
>> -		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, true);
>> +		s.in_async = async;
>> +		s.needs_fixed_file = async;
>> +		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, async);
>>  		io_submit_sqe(ctx, &s, statep, &link);
>>  		submitted++;
>>  	}
>> @@ -2745,6 +2748,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
>>  	if (statep)
>>  		io_submit_state_end(&state);
>>  
>> +	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
>> +	io_commit_sqring(ctx);
>> +
> 
> Then don't need io_commit_sqring() in io_sq_thread any more?
>
Right, thanks! I'll resend with the change.


> Anyway, looks good to me.
> Reviewed-by: Bob Liu <bob.liu@oracle.com>
> 
>>  	return submitted;
>>  }
>>  
>> @@ -2849,7 +2855,8 @@ static int io_sq_thread(void *data)
>>  		}
>>  
>>  		to_submit = min(to_submit, ctx->sq_entries);
>> -		inflight += io_submit_sqes(ctx, to_submit, &cur_mm);
>> +		inflight += io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm,
>> +					   true);
>>  
>>  		/* Commit SQ ring head once we've consumed all SQEs */
>>  		io_commit_sqring(ctx);
>> @@ -2866,69 +2873,6 @@ static int io_sq_thread(void *data)
>>  	return 0;
>>  }
>>  
>> -static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
>> -			  struct file *ring_file, int ring_fd)
>> -{
>> -	struct io_submit_state state, *statep = NULL;
>> -	struct io_kiocb *link = NULL;
>> -	struct io_kiocb *shadow_req = NULL;
>> -	bool prev_was_link = false;
>> -	int i, submit = 0;
>> -
>> -	if (to_submit > IO_PLUG_THRESHOLD) {
>> -		io_submit_state_start(&state, ctx, to_submit);
>> -		statep = &state;
>> -	}
>> -
>> -	for (i = 0; i < to_submit; i++) {
>> -		struct sqe_submit s;
>> -
>> -		if (!io_get_sqring(ctx, &s))
>> -			break;
>> -
>> -		/*
>> -		 * If previous wasn't linked and we have a linked command,
>> -		 * that's the end of the chain. Submit the previous link.
>> -		 */
>> -		if (!prev_was_link && link) {
>> -			io_queue_link_head(ctx, link, &link->submit, shadow_req);
>> -			link = NULL;
>> -			shadow_req = NULL;
>> -		}
>> -		prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
>> -
>> -		if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
>> -			if (!shadow_req) {
>> -				shadow_req = io_get_req(ctx, NULL);
>> -				if (unlikely(!shadow_req))
>> -					goto out;
>> -				shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
>> -				refcount_dec(&shadow_req->refs);
>> -			}
>> -			shadow_req->sequence = s.sequence;
>> -		}
>> -
>> -out:
>> -		s.ring_file = ring_file;
>> -		s.has_user = true;
>> -		s.in_async = false;
>> -		s.needs_fixed_file = false;
>> -		s.ring_fd = ring_fd;
>> -		submit++;
>> -		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, false);
>> -		io_submit_sqe(ctx, &s, statep, &link);
>> -	}
>> -
>> -	if (link)
>> -		io_queue_link_head(ctx, link, &link->submit, shadow_req);
>> -	if (statep)
>> -		io_submit_state_end(statep);
>> -
>> -	io_commit_sqring(ctx);
>> -
>> -	return submit;
>> -}
>> -
>>  struct io_wait_queue {
>>  	struct wait_queue_entry wq;
>>  	struct io_ring_ctx *ctx;
>> @@ -4049,10 +3993,14 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
>>  			wake_up(&ctx->sqo_wait);
>>  		submitted = to_submit;
>>  	} else if (to_submit) {
>> -		to_submit = min(to_submit, ctx->sq_entries);
>> +		struct mm_struct *cur_mm;
>>  
>> +		to_submit = min(to_submit, ctx->sq_entries);
>>  		mutex_lock(&ctx->uring_lock);
>> -		submitted = io_ring_submit(ctx, to_submit, f.file, fd);
>> +		/* already have mm, so io_submit_sqes() won't try to grab it */
>> +		cur_mm = ctx->sqo_mm;
>> +		submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
>> +					   &cur_mm, false);
>>  		mutex_unlock(&ctx->uring_lock);
>>  	}
>>  	if (flags & IORING_ENTER_GETEVENTS) {
>>
>
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7813bc7d5b61..ebe2a4edd644 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2681,7 +2681,8 @@  static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
 }
 
 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
-			  struct mm_struct **mm)
+			  struct file *ring_file, int ring_fd,
+			  struct mm_struct **mm, bool async)
 {
 	struct io_submit_state state, *statep = NULL;
 	struct io_kiocb *link = NULL;
@@ -2732,10 +2733,12 @@  static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 		}
 
 out:
+		s.ring_file = ring_file;
+		s.ring_fd = ring_fd;
 		s.has_user = *mm != NULL;
-		s.in_async = true;
-		s.needs_fixed_file = true;
-		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, true);
+		s.in_async = async;
+		s.needs_fixed_file = async;
+		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, async);
 		io_submit_sqe(ctx, &s, statep, &link);
 		submitted++;
 	}
@@ -2745,6 +2748,9 @@  static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	if (statep)
 		io_submit_state_end(&state);
 
+	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
+	io_commit_sqring(ctx);
+
 	return submitted;
 }
 
@@ -2849,7 +2855,8 @@  static int io_sq_thread(void *data)
 		}
 
 		to_submit = min(to_submit, ctx->sq_entries);
-		inflight += io_submit_sqes(ctx, to_submit, &cur_mm);
+		inflight += io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm,
+					   true);
 
 		/* Commit SQ ring head once we've consumed all SQEs */
 		io_commit_sqring(ctx);
@@ -2866,69 +2873,6 @@  static int io_sq_thread(void *data)
 	return 0;
 }
 
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
-			  struct file *ring_file, int ring_fd)
-{
-	struct io_submit_state state, *statep = NULL;
-	struct io_kiocb *link = NULL;
-	struct io_kiocb *shadow_req = NULL;
-	bool prev_was_link = false;
-	int i, submit = 0;
-
-	if (to_submit > IO_PLUG_THRESHOLD) {
-		io_submit_state_start(&state, ctx, to_submit);
-		statep = &state;
-	}
-
-	for (i = 0; i < to_submit; i++) {
-		struct sqe_submit s;
-
-		if (!io_get_sqring(ctx, &s))
-			break;
-
-		/*
-		 * If previous wasn't linked and we have a linked command,
-		 * that's the end of the chain. Submit the previous link.
-		 */
-		if (!prev_was_link && link) {
-			io_queue_link_head(ctx, link, &link->submit, shadow_req);
-			link = NULL;
-			shadow_req = NULL;
-		}
-		prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
-
-		if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
-			if (!shadow_req) {
-				shadow_req = io_get_req(ctx, NULL);
-				if (unlikely(!shadow_req))
-					goto out;
-				shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
-				refcount_dec(&shadow_req->refs);
-			}
-			shadow_req->sequence = s.sequence;
-		}
-
-out:
-		s.ring_file = ring_file;
-		s.has_user = true;
-		s.in_async = false;
-		s.needs_fixed_file = false;
-		s.ring_fd = ring_fd;
-		submit++;
-		trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, false);
-		io_submit_sqe(ctx, &s, statep, &link);
-	}
-
-	if (link)
-		io_queue_link_head(ctx, link, &link->submit, shadow_req);
-	if (statep)
-		io_submit_state_end(statep);
-
-	io_commit_sqring(ctx);
-
-	return submit;
-}
-
 struct io_wait_queue {
 	struct wait_queue_entry wq;
 	struct io_ring_ctx *ctx;
@@ -4049,10 +3993,14 @@  SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 			wake_up(&ctx->sqo_wait);
 		submitted = to_submit;
 	} else if (to_submit) {
-		to_submit = min(to_submit, ctx->sq_entries);
+		struct mm_struct *cur_mm;
 
+		to_submit = min(to_submit, ctx->sq_entries);
 		mutex_lock(&ctx->uring_lock);
-		submitted = io_ring_submit(ctx, to_submit, f.file, fd);
+		/* already have mm, so io_submit_sqes() won't try to grab it */
+		cur_mm = ctx->sqo_mm;
+		submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
+					   &cur_mm, false);
 		mutex_unlock(&ctx->uring_lock);
 	}
 	if (flags & IORING_ENTER_GETEVENTS) {