@@ -336,6 +336,8 @@ struct io_ring_ctx {
struct wait_queue_head rsrc_quiesce_wq;
unsigned rsrc_quiesce;
+ struct io_alloc_cache reg_buf_cache;
+
struct list_head io_buffers_pages;
#if defined(CONFIG_UNIX)
@@ -312,6 +312,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->io_buffers_cache);
io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
sizeof(struct io_rsrc_node));
+ io_alloc_cache_init(&ctx->reg_buf_cache, IO_NODE_ALLOC_CACHE_MAX,
+ sizeof(struct io_async_msghdr));
io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct async_poll));
io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
@@ -2827,6 +2829,11 @@ static void io_rsrc_node_cache_free(struct io_cache_entry *entry)
kfree(container_of(entry, struct io_rsrc_node, cache));
}
+static void io_reg_buf_cache_free(struct io_cache_entry *entry)
+{
+ kvfree(container_of(entry, struct io_mapped_ubuf, cache));
+}
+
static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
io_sq_thread_finish(ctx);
@@ -2865,6 +2872,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free);
+ io_alloc_cache_free(&ctx->reg_buf_cache, io_reg_buf_cache_free);
+
if (ctx->mm_account) {
mmdrop(ctx->mm_account);
ctx->mm_account = NULL;
@@ -33,6 +33,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
#define IORING_MAX_FIXED_FILES (1U << 20)
#define IORING_MAX_REG_BUFFERS (1U << 14)
+#define IO_BUF_CACHE_MAX_BVECS 64
+
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
{
unsigned long page_limit, cur_pages, new_pages;
@@ -78,6 +80,39 @@ static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
return 0;
}
+static void io_put_reg_buf(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
+{
+ lockdep_assert_held(&ctx->uring_lock);
+
+ if ((imu->max_bvecs != IO_BUF_CACHE_MAX_BVECS) ||
+ !io_alloc_cache_put(&ctx->reg_buf_cache, &imu->cache))
+ kvfree(imu);
+}
+
+static struct io_mapped_ubuf *io_alloc_reg_buf(struct io_ring_ctx *ctx,
+ int nr_bvecs)
+{
+ struct io_cache_entry *entry;
+ struct io_mapped_ubuf *imu;
+
+ lockdep_assert_held(&ctx->uring_lock);
+
+ if (nr_bvecs > IO_BUF_CACHE_MAX_BVECS) {
+do_alloc:
+ imu = kvmalloc(struct_size(imu, bvec, nr_bvecs), GFP_KERNEL);
+ if (!imu)
+ return NULL;
+ } else {
+ nr_bvecs = IO_BUF_CACHE_MAX_BVECS;
+ entry = io_alloc_cache_get(&ctx->reg_buf_cache);
+ if (!entry)
+ goto do_alloc;
+ imu = container_of(entry, struct io_mapped_ubuf, cache);
+ }
+ imu->max_bvecs = nr_bvecs;
+ return imu;
+}
+
static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
void __user *arg, unsigned index)
{
@@ -137,7 +172,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
unpin_user_page(imu->bvec[i].bv_page);
if (imu->acct_pages)
io_unaccount_mem(ctx, imu->acct_pages);
- kvfree(imu);
+ io_put_reg_buf(ctx, imu);
}
*slot = NULL;
}
@@ -1134,7 +1169,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
}
}
- imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
+ imu = io_alloc_reg_buf(ctx, nr_pages);
if (!imu)
goto done;
@@ -50,9 +50,13 @@ struct io_rsrc_node {
};
struct io_mapped_ubuf {
- u64 ubuf;
+ union {
+ struct io_cache_entry cache;
+ u64 ubuf;
+ };
u64 ubuf_end;
unsigned int nr_bvecs;
+ unsigned int max_bvecs;
unsigned long acct_pages;
struct bio_vec bvec[];
};
We'll be allocating lots of io_mapped_ubuf shortly, add caches Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- include/linux/io_uring_types.h | 2 ++ io_uring/io_uring.c | 9 ++++++++ io_uring/rsrc.c | 39 ++++++++++++++++++++++++++++++++-- io_uring/rsrc.h | 6 +++++- 4 files changed, 53 insertions(+), 3 deletions(-)