@@ -158,6 +158,10 @@ struct io_ev_fd {
struct rcu_head rcu;
};
+struct io_alloc_cache {
+ struct hlist_head list;
+};
+
struct io_ring_ctx {
/* const or read-mostly hot data */
struct {
@@ -216,7 +220,7 @@ struct io_ring_ctx {
struct io_hash_table cancel_table_locked;
struct list_head cq_overflow_list;
- struct list_head apoll_cache;
+ struct io_alloc_cache apoll_cache;
struct xarray personalities;
u32 pers_next;
} ____cacheline_aligned_in_smp;
new file mode 100644
@@ -0,0 +1,4 @@
+static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
+{
+ INIT_HLIST_HEAD(&cache->list);
+}
@@ -92,6 +92,7 @@
#include "timeout.h"
#include "poll.h"
+#include "alloc_cache.h"
#define IORING_MAX_ENTRIES 32768
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
@@ -295,7 +296,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->sqd_list);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
INIT_LIST_HEAD(&ctx->io_buffers_cache);
- INIT_LIST_HEAD(&ctx->apoll_cache);
+ io_alloc_cache_init(&ctx->apoll_cache);
init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
@@ -1180,8 +1181,8 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
if (apoll->double_poll)
kfree(apoll->double_poll);
- list_add(&apoll->poll.wait.entry,
- &ctx->apoll_cache);
+ hlist_add_head(&apoll->cache_list,
+ &ctx->apoll_cache.list);
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)
@@ -589,10 +589,10 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
apoll = req->apoll;
kfree(apoll->double_poll);
} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
- !list_empty(&ctx->apoll_cache)) {
- apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
- poll.wait.entry);
- list_del_init(&apoll->poll.wait.entry);
+ !hlist_empty(&ctx->apoll_cache.list)) {
+ apoll = hlist_entry(ctx->apoll_cache.list.first,
+ struct async_poll, cache_list);
+ hlist_del(&apoll->cache_list);
} else {
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
@@ -963,10 +963,10 @@ void io_flush_apoll_cache(struct io_ring_ctx *ctx)
{
struct async_poll *apoll;
- while (!list_empty(&ctx->apoll_cache)) {
- apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
- poll.wait.entry);
- list_del(&apoll->poll.wait.entry);
+ while (!hlist_empty(&ctx->apoll_cache.list)) {
+ apoll = hlist_entry(ctx->apoll_cache.list.first,
+ struct async_poll, cache_list);
+ hlist_del(&apoll->cache_list);
kfree(apoll);
}
}
@@ -14,7 +14,10 @@ struct io_poll {
};
struct async_poll {
- struct io_poll poll;
+ union {
+ struct io_poll poll;
+ struct hlist_node cache_list;
+ };
struct io_poll *double_poll;
};
In preparation for adding limits, and one more user, abstract out the core bits of the allocation+free cache. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- include/linux/io_uring_types.h | 6 +++++- io_uring/alloc_cache.h | 4 ++++ io_uring/io_uring.c | 7 ++++--- io_uring/poll.c | 16 ++++++++-------- io_uring/poll.h | 5 ++++- 5 files changed, 25 insertions(+), 13 deletions(-) create mode 100644 io_uring/alloc_cache.h