@@ -935,9 +935,11 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
}
-bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
+bool io_aux_cqe(struct io_kiocb *req, bool defer, s32 res, u32 cflags,
bool allow_overflow)
{
+ struct io_ring_ctx *ctx = req->ctx;
+ u64 user_data = req->cqe.user_data;
struct io_uring_cqe *cqe;
unsigned int length;
@@ -963,7 +965,7 @@ bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32
return false;
cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++];
- cqe->user_data = user_data;
+ cqe->user_data = req->cqe.user_data;
cqe->res = res;
cqe->flags = cflags;
return true;
@@ -47,7 +47,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx);
void io_req_defer_failed(struct io_kiocb *req, s32 res);
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
-bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
+bool io_aux_cqe(struct io_kiocb *req, bool defer, s32 res, u32 cflags,
bool allow_overflow);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
@@ -632,8 +632,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
}
if (!mshot_finished) {
- if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
- req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
+ if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ *ret, cflags | IORING_CQE_F_MORE, true)) {
io_recv_prep_retry(req);
/* Known not-empty or unknown state, retry */
if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
@@ -1304,7 +1304,6 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_ring_ctx *ctx = req->ctx;
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
@@ -1354,8 +1353,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
if (ret < 0)
return ret;
- if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
- req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
+ if (io_aux_cqe(req, issue_flags & IO_URING_F_COMPLETE_DEFER, ret,
+ IORING_CQE_F_MORE, true))
goto retry;
return -ECANCELED;
@@ -300,8 +300,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
- if (!io_aux_cqe(req->ctx, ts->locked, req->cqe.user_data,
- mask, IORING_CQE_F_MORE, false)) {
+ if (!io_aux_cqe(req, ts->locked, mask,
+ IORING_CQE_F_MORE, false)) {
io_req_set_res(req, mask, 0);
return IOU_POLL_REMOVE_POLL_USE_RES;
}
@@ -73,8 +73,8 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
if (!io_timeout_finish(timeout, data)) {
bool filled;
- filled = io_aux_cqe(ctx, ts->locked, req->cqe.user_data, -ETIME,
- IORING_CQE_F_MORE, false);
+ filled = io_aux_cqe(req, ts->locked, -ETIME, IORING_CQE_F_MORE,
+ false);
if (filled) {
/* re-arm timer */
spin_lock_irq(&ctx->timeout_lock);
Everybody is passing in the request, so get rid of the io_ring_ctx and explicit user_data pass-in. Both the ctx and user_data can be deduced from the request at hand. Signed-off-by: Jens Axboe <axboe@kernel.dk> ---