diff mbox series

[2/2] io_uring: merge io_submit_sqes and io_ring_submit

Message ID e6171fc9b7fa93d93d2b42032420147053c7eec8.1572189860.git.asml.silence@gmail.com (mailing list archive)
State New, archived
Headers show
Series cleanup submission path | expand

Commit Message

Pavel Begunkov Oct. 27, 2019, 3:35 p.m. UTC
io_submit_sqes() and io_ring_submit() are mostly identical now, except
for several flags. And it's error-prone, as usually requires
synchcronously changing both of them.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 88 +++++++++------------------------------------------
 1 file changed, 15 insertions(+), 73 deletions(-)
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index f65727f2ba95..949faf14345e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2640,7 +2640,8 @@  static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
 }
 
 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
-			  bool has_user)
+			  struct file *ring_file, int ring_fd,
+			  bool has_user, bool in_async)
 {
 	struct io_submit_state state, *statep = NULL;
 	struct io_kiocb *link = NULL;
@@ -2682,10 +2683,12 @@  static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 		}
 
 out:
+		s.ring_file = ring_file;
 		s.has_user = has_user;
-		s.in_async = true;
-		s.needs_fixed_file = true;
-		trace_io_uring_submit_sqe(ctx, true, true);
+		s.in_async = in_async;
+		s.needs_fixed_file = in_async;
+		s.ring_fd = ring_fd;
+		trace_io_uring_submit_sqe(ctx, true, in_async);
 		io_submit_sqe(ctx, &s, statep, &link);
 		submitted++;
 	}
@@ -2693,7 +2696,10 @@  static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	if (link)
 		io_queue_link_head(ctx, link, &link->submit, shadow_req);
 	if (statep)
-		io_submit_state_end(&state);
+		io_submit_state_end(statep);
+
+	/* Commit SQ ring head once we've consumed all SQEs */
+	io_commit_sqring(ctx);
 
 	return submitted;
 }
@@ -2822,10 +2828,8 @@  static int io_sq_thread(void *data)
 			io_fail_all_sqes(ctx);
 		} else {
 			to_submit = min(to_submit, ctx->sq_entries);
-			inflight += io_submit_sqes(ctx, to_submit,
-						   cur_mm != NULL);
-			/* Commit SQ ring head once we've consumed all SQEs */
-			io_commit_sqring(ctx);
+			inflight += io_submit_sqes(ctx, to_submit, NULL, -1,
+						   cur_mm != NULL, true);
 		}
 	}
 
@@ -2840,69 +2844,6 @@  static int io_sq_thread(void *data)
 	return 0;
 }
 
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
-			  struct file *ring_file, int ring_fd)
-{
-	struct io_submit_state state, *statep = NULL;
-	struct io_kiocb *link = NULL;
-	struct io_kiocb *shadow_req = NULL;
-	bool prev_was_link = false;
-	int i, submit = 0;
-
-	if (to_submit > IO_PLUG_THRESHOLD) {
-		io_submit_state_start(&state, ctx, to_submit);
-		statep = &state;
-	}
-
-	for (i = 0; i < to_submit; i++) {
-		struct sqe_submit s;
-
-		if (!io_get_sqring(ctx, &s))
-			break;
-
-		/*
-		 * If previous wasn't linked and we have a linked command,
-		 * that's the end of the chain. Submit the previous link.
-		 */
-		if (!prev_was_link && link) {
-			io_queue_link_head(ctx, link, &link->submit, shadow_req);
-			link = NULL;
-			shadow_req = NULL;
-		}
-		prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
-
-		if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
-			if (!shadow_req) {
-				shadow_req = io_get_req(ctx, NULL);
-				if (unlikely(!shadow_req))
-					goto out;
-				shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
-				refcount_dec(&shadow_req->refs);
-			}
-			shadow_req->sequence = s.sequence;
-		}
-
-out:
-		s.ring_file = ring_file;
-		s.has_user = true;
-		s.in_async = false;
-		s.needs_fixed_file = false;
-		s.ring_fd = ring_fd;
-		submit++;
-		trace_io_uring_submit_sqe(ctx, true, false);
-		io_submit_sqe(ctx, &s, statep, &link);
-	}
-
-	if (link)
-		io_queue_link_head(ctx, link, &link->submit, shadow_req);
-	if (statep)
-		io_submit_state_end(statep);
-
-	io_commit_sqring(ctx);
-
-	return submit;
-}
-
 struct io_wait_queue {
 	struct wait_queue_entry wq;
 	struct io_ring_ctx *ctx;
@@ -4027,7 +3968,8 @@  SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 		to_submit = min(to_submit, ctx->sq_entries);
 
 		mutex_lock(&ctx->uring_lock);
-		submitted = io_ring_submit(ctx, to_submit, f.file, fd);
+		submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
+					   true, false);
 		mutex_unlock(&ctx->uring_lock);
 	}
 	if (flags & IORING_ENTER_GETEVENTS) {