diff mbox series

[V6,02/17] io_uring: use ctx->cached_sq_head to calculate left sqes

Message ID 20230330113630.1388860-3-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series io_uring/ublk: add generic IORING_OP_FUSED_CMD | expand

Commit Message

Ming Lei March 30, 2023, 11:36 a.m. UTC
Use ctx->cached_sq_head to calculate 'left' sqes, and prepare for supporting
fused requests, which need to get req/sqe in its own ->prep() callback.

ctx->cached_sq_head should always be cached or to be fetched, so this change
is just fine.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 io_uring/io_uring.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 693558c4b10b..25a940f0ab68 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2429,15 +2429,16 @@  int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
 	__must_hold(&ctx->uring_lock)
 {
 	unsigned int entries = io_sqring_entries(ctx);
-	unsigned int left;
+	unsigned old_head = ctx->cached_sq_head;
+	unsigned int left = 0;
 	int ret;
 
 	if (unlikely(!entries))
 		return 0;
 	/* make sure SQ entry isn't read before tail */
-	ret = left = min3(nr, ctx->sq_entries, entries);
-	io_get_task_refs(left);
-	io_submit_state_start(&ctx->submit_state, left);
+	ret = min3(nr, ctx->sq_entries, entries);
+	io_get_task_refs(ret);
+	io_submit_state_start(&ctx->submit_state, ret);
 
 	do {
 		const struct io_uring_sqe *sqe;
@@ -2456,11 +2457,12 @@  int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
 		 */
 		if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
 		    !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
-			left--;
+			left = 1;
 			break;
 		}
-	} while (--left);
+	} while ((ctx->cached_sq_head - old_head) < ret);
 
+	left = ret - (ctx->cached_sq_head - old_head) - left;
 	if (unlikely(left)) {
 		ret -= left;
 		/* try again if it submitted nothing and can't allocate a req */