@@ -2538,30 +2538,23 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
-static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb **link)
+static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ struct sqe_submit *s, struct io_submit_state *state,
+ struct io_kiocb **link)
{
struct io_uring_sqe *sqe_copy;
- struct io_kiocb *req;
int ret;
/* enforce forwards compatibility on users */
if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
- goto err;
- }
-
- req = io_get_req(ctx, state);
- if (unlikely(!req)) {
- ret = -EAGAIN;
- goto err;
+ goto err_req;
}
ret = io_req_set_file(ctx, s, state, req);
if (unlikely(ret)) {
err_req:
io_free_req(req, NULL);
-err:
io_cqring_add_event(ctx, s->sqe->user_data, ret);
return;
}
@@ -2697,9 +2690,15 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
for (i = 0; i < nr; i++) {
struct sqe_submit s;
+ struct io_kiocb *req;
- if (!io_get_sqring(ctx, &s))
+ req = io_get_req(ctx, statep);
+ if (unlikely(!req))
break;
+ if (!io_get_sqring(ctx, &s)) {
+ __io_free_req(req);
+ break;
+ }
if (io_sqe_needs_user(s.sqe) && !*mm) {
mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
@@ -2727,7 +2726,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
s.in_async = async;
s.needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, async);
- io_submit_sqe(ctx, &s, statep, &link);
+ io_submit_sqe(ctx, req, &s, statep, &link);
submitted++;
/*
Preparation patch. Make io_submit_sqes() to allocate io_kiocb, and then pass it further. Another difference is that it's allocated before we get an sqe. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- fs/io_uring.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-)