@@ -2132,19 +2132,11 @@ static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
return 0;
}
-static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
- __must_hold(&ctx->uring_lock)
+/*
+ * Return NULL if nothing to be queued, otherwise return request for queueing */
+static struct io_kiocb *io_link_sqe(struct io_submit_link *link,
+ struct io_kiocb *req)
{
- struct io_submit_link *link = &ctx->submit_state.link;
- int ret;
-
- ret = io_init_req(ctx, req, sqe);
- if (unlikely(ret))
- return io_submit_fail_init(sqe, req, ret);
-
- trace_io_uring_submit_req(req);
-
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
@@ -2158,7 +2150,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
link->last = req;
if (req->flags & IO_REQ_LINK_FLAGS)
- return 0;
+ return NULL;
/* last request of the link, flush it */
req = link->head;
link->head = NULL;
@@ -2174,9 +2166,30 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
fallback:
io_queue_sqe_fallback(req);
}
- return 0;
+ return NULL;
}
+ return req;
+}
+
+static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+ __must_hold(&ctx->uring_lock)
+{
+ struct io_submit_link *link = &ctx->submit_state.link;
+ int ret;
+ ret = io_init_req(ctx, req, sqe);
+ if (unlikely(ret))
+ return io_submit_fail_init(sqe, req, ret);
+
+ trace_io_uring_submit_req(req);
+
+ if (unlikely(link->head || (req->flags & (IO_REQ_LINK_FLAGS |
+ REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) {
+ req = io_link_sqe(link, req);
+ if (!req)
+ return 0;
+ }
io_queue_sqe(req);
return 0;
}
Add io_link_req() helper, so that io_submit_sqe() can become more readable. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- io_uring/io_uring.c | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-)