diff mbox series

[for-next,3/4] io_uring: remove async request cache

Message ID 7bffccd213e370abd4de480e739d8b08ab6c1326.1712331455.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series clean up io_req_complete_post | expand

Commit Message

Pavel Begunkov April 5, 2024, 3:50 p.m. UTC
io_req_complete_post() was a sole user of ->locked_free_list, but
since we just gutted the function, the cache is not used anymore and
can be removed.

->locked_free_list served as an asynhronous counterpart of the main
request (i.e. struct io_kiocb) cache for all unlocked cases like io-wq.
Now they're all forced to be completed into the main cache directly,
off of the normal completion path or via io_free_req().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 include/linux/io_uring_types.h |  4 ----
 io_uring/io_uring.c            | 22 ----------------------
 2 files changed, 26 deletions(-)

Comments

Ming Lei April 6, 2024, 1:26 p.m. UTC | #1
On Fri, Apr 05, 2024 at 04:50:04PM +0100, Pavel Begunkov wrote:
> io_req_complete_post() was a sole user of ->locked_free_list, but
> since we just gutted the function, the cache is not used anymore and
> can be removed.
> 
> ->locked_free_list served as an asynhronous counterpart of the main
> request (i.e. struct io_kiocb) cache for all unlocked cases like io-wq.
> Now they're all forced to be completed into the main cache directly,
> off of the normal completion path or via io_free_req().
> 
> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>

Reviewed-by: Ming Lei <ming.lei@redhat.com>

Thanks,
Ming
diff mbox series

Patch

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index b191710bec4f..9c49aa2dac38 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -347,10 +347,6 @@  struct io_ring_ctx {
 
 	spinlock_t		completion_lock;
 
-	/* IRQ completion list, under ->completion_lock */
-	unsigned int		locked_free_nr;
-	struct io_wq_work_list	locked_free_list;
-
 	struct list_head	io_buffers_comp;
 	struct list_head	cq_overflow_list;
 	struct io_hash_table	cancel_table;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index c84650b0f7f2..b20ee6a0e32e 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -335,7 +335,6 @@  static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	init_llist_head(&ctx->work_llist);
 	INIT_LIST_HEAD(&ctx->tctx_list);
 	ctx->submit_state.free_list.next = NULL;
-	INIT_WQ_LIST(&ctx->locked_free_list);
 	INIT_HLIST_HEAD(&ctx->waitid_list);
 #ifdef CONFIG_FUTEX
 	INIT_HLIST_HEAD(&ctx->futex_list);
@@ -990,15 +989,6 @@  static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
 	memset(&req->big_cqe, 0, sizeof(req->big_cqe));
 }
 
-static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
-					struct io_submit_state *state)
-{
-	spin_lock(&ctx->completion_lock);
-	wq_list_splice(&ctx->locked_free_list, &state->free_list);
-	ctx->locked_free_nr = 0;
-	spin_unlock(&ctx->completion_lock);
-}
-
 /*
  * A request might get retired back into the request caches even before opcode
  * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
@@ -1012,17 +1002,6 @@  __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
 	void *reqs[IO_REQ_ALLOC_BATCH];
 	int ret;
 
-	/*
-	 * If we have more than a batch's worth of requests in our IRQ side
-	 * locked cache, grab the lock and move them over to our submission
-	 * side cache.
-	 */
-	if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
-		io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
-		if (!io_req_cache_empty(ctx))
-			return true;
-	}
-
 	ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
 
 	/*
@@ -2741,7 +2720,6 @@  static void io_req_caches_free(struct io_ring_ctx *ctx)
 	int nr = 0;
 
 	mutex_lock(&ctx->uring_lock);
-	io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
 
 	while (!io_req_cache_empty(ctx)) {
 		req = io_extract_req(ctx);