@@ -174,7 +174,9 @@ struct io_submit_state {
bool plug_started;
bool need_plug;
unsigned short submit_nr;
+ unsigned int cqes_count;
struct blk_plug plug;
+ struct io_uring_cqe cqes[16];
};
struct io_ev_fd {
@@ -167,7 +167,8 @@ EXPORT_SYMBOL(io_uring_get_socket);
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
{
- if (!wq_list_empty(&ctx->submit_state.compl_reqs))
+ if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
+ ctx->submit_state.cqes_count)
__io_submit_flush_completions(ctx);
}
@@ -807,6 +808,43 @@ bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
}
+static bool __io_fill_cqe_small(struct io_ring_ctx *ctx,
+ struct io_uring_cqe *cqe)
+{
+ struct io_uring_cqe *cqe_out;
+
+ cqe_out = io_get_cqe(ctx);
+ if (unlikely(!cqe_out)) {
+ return io_cqring_event_overflow(ctx, cqe->user_data,
+ cqe->res, cqe->flags,
+ 0, 0);
+ }
+
+ trace_io_uring_complete(ctx, NULL, cqe->user_data,
+ cqe->res, cqe->flags,
+ 0, 0);
+
+ memcpy(cqe_out, cqe, sizeof(*cqe_out));
+
+ if (ctx->flags & IORING_SETUP_CQE32) {
+ WRITE_ONCE(cqe_out->big_cqe[0], 0);
+ WRITE_ONCE(cqe_out->big_cqe[1], 0);
+ }
+ return true;
+}
+
+static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
+ __must_hold(&ctx->uring_lock)
+{
+ struct io_submit_state *state = &ctx->submit_state;
+ unsigned int i;
+
+ lockdep_assert_held(&ctx->uring_lock);
+ for (i = 0; i < state->cqes_count; i++)
+ __io_fill_cqe_small(ctx, state->cqes + i);
+ state->cqes_count = 0;
+}
+
bool io_post_aux_cqe(struct io_ring_ctx *ctx,
u64 user_data, s32 res, u32 cflags)
{
@@ -1352,6 +1390,9 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
struct io_submit_state *state = &ctx->submit_state;
io_cq_lock(ctx);
+ /* post must come first to preserve CQE ordering */
+ if (state->cqes_count)
+ __io_flush_post_cqes(ctx);
wq_list_for_each(node, prev, &state->compl_reqs) {
struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list);
@@ -1361,8 +1402,10 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
}
__io_cq_unlock_post(ctx);
- io_free_batch_list(ctx, state->compl_reqs.first);
- INIT_WQ_LIST(&state->compl_reqs);
+ if (!wq_list_empty(&ctx->submit_state.compl_reqs)) {
+ io_free_batch_list(ctx, state->compl_reqs.first);
+ INIT_WQ_LIST(&state->compl_reqs);
+ }
}
/*
Multishot ops cannot use the compl_reqs list as the request must stay in the poll list, but that means they need to run each completion without benefiting from batching. Here introduce batching infrastructure for only small (ie 16 byte) CQEs. This restriction is ok because there are no use cases posting 32 byte CQEs. In the ring keep a batch of up to 16 posted results, and flush in the same way as compl_reqs. 16 was chosen through experimentation on a microbenchmark ([1]), as well as trying not to increase the size of the ring too much. This increases the size to 1472 bytes from 1216. [1]: https://github.com/DylanZA/liburing/commit/9ac66b36bcf4477bfafeff1c5f107896b7ae31cf Run with $ make -j && ./benchmark/reg.b -s 1 -t 2000 -r 10 Gives results: baseline 8309 k/s 8 18807 k/s 16 19338 k/s 32 20134 k/s Signed-off-by: Dylan Yudaken <dylany@meta.com> --- include/linux/io_uring_types.h | 2 ++ io_uring/io_uring.c | 49 +++++++++++++++++++++++++++++++--- 2 files changed, 48 insertions(+), 3 deletions(-)