@@ -868,7 +868,7 @@ inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
io_req_complete_post(req);
}
-void io_req_complete_failed(struct io_kiocb *req, s32 res)
+static inline void io_req_prep_failed(struct io_kiocb *req, s32 res)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
@@ -876,6 +876,18 @@ void io_req_complete_failed(struct io_kiocb *req, s32 res)
io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
if (def->fail)
def->fail(req);
+}
+
+static void io_req_defer_failed(struct io_kiocb *req, s32 res)
+ __must_hold(&ctx->uring_lock)
+{
+ io_req_prep_failed(req, res);
+ io_req_complete_defer(req);
+}
+
+void io_req_post_failed(struct io_kiocb *req, s32 res)
+{
+ io_req_prep_failed(req, res);
io_req_complete_post(req);
}
@@ -1249,7 +1261,7 @@ static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
{
/* not needed for normal modes, but SQPOLL depends on it */
io_tw_lock(req->ctx, locked);
- io_req_complete_failed(req, req->cqe.res);
+ io_req_defer_failed(req, req->cqe.res);
}
void io_req_task_submit(struct io_kiocb *req, bool *locked)
@@ -1259,7 +1271,7 @@ void io_req_task_submit(struct io_kiocb *req, bool *locked)
if (likely(!(req->task->flags & PF_EXITING)))
io_queue_sqe(req);
else
- io_req_complete_failed(req, -EFAULT);
+ io_req_defer_failed(req, -EFAULT);
}
void io_req_task_queue_fail(struct io_kiocb *req, int ret)
@@ -1637,7 +1649,7 @@ static __cold void io_drain_req(struct io_kiocb *req)
ret = io_req_prep_async(req);
if (ret) {
fail:
- io_req_complete_failed(req, ret);
+ io_req_defer_failed(req, ret);
return;
}
io_prep_async_link(req);
@@ -1867,7 +1879,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
struct io_kiocb *linked_timeout;
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
- io_req_complete_failed(req, ret);
+ io_req_defer_failed(req, ret);
return;
}
@@ -1917,14 +1929,14 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
*/
req->flags &= ~REQ_F_HARDLINK;
req->flags |= REQ_F_LINK;
- io_req_complete_failed(req, req->cqe.res);
+ io_req_defer_failed(req, req->cqe.res);
} else if (unlikely(req->ctx->drain_active)) {
io_drain_req(req);
} else {
int ret = io_req_prep_async(req);
if (unlikely(ret))
- io_req_complete_failed(req, ret);
+ io_req_defer_failed(req, ret);
else
io_queue_iowq(req, NULL);
}
@@ -2851,7 +2863,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
while (!list_empty(&list)) {
de = list_first_entry(&list, struct io_defer_entry, list);
list_del_init(&de->list);
- io_req_complete_failed(de->req, -ECANCELED);
+ io_req_post_failed(de->req, -ECANCELED);
kfree(de);
}
return true;
@@ -29,7 +29,7 @@ bool io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
int io_run_local_work(struct io_ring_ctx *ctx);
-void io_req_complete_failed(struct io_kiocb *req, s32 res);
+void io_req_post_failed(struct io_kiocb *req, s32 res);
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
void io_req_complete_post(struct io_kiocb *req);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
@@ -304,7 +304,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
else if (ret == IOU_POLL_DONE)
io_req_task_submit(req, locked);
else
- io_req_complete_failed(req, ret);
+ io_req_post_failed(req, ret);
}
static void __io_poll_execute(struct io_kiocb *req, int mask)
Different use cases might want to defer failure completion if available, or post the completion immediately if the lock is not definitely taken. Signed-off-by: Dylan Yudaken <dylany@meta.com> --- io_uring/io_uring.c | 28 ++++++++++++++++++++-------- io_uring/io_uring.h | 2 +- io_uring/poll.c | 2 +- 3 files changed, 22 insertions(+), 10 deletions(-)