diff mbox series

[1/2] io_uring/rsrc: keep one global dummy_ubuf

Message ID 95c9dea5180d066dc35a94d39f4ce5a3ecdfbf77.1691546329.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series random for-next cleanups | expand

Commit Message

Pavel Begunkov Aug. 9, 2023, 12:25 p.m. UTC
We set empty registered buffers to dummy_ubuf as an optimisation.
Currently, we allocate the dummy entry for each ring, whenever we can
simply have one global instance.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c |  9 ---------
 io_uring/rsrc.c     | 14 ++++++++++----
 2 files changed, 10 insertions(+), 13 deletions(-)

Comments

Pavel Begunkov Aug. 9, 2023, 3:05 p.m. UTC | #1
On 8/9/23 13:25, Pavel Begunkov wrote:
> We set empty registered buffers to dummy_ubuf as an optimisation.
> Currently, we allocate the dummy entry for each ring, whenever we can
> simply have one global instance.

And only now it started complaining about const-ness, I'll
resend it.


> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
> ---
>   io_uring/io_uring.c |  9 ---------
>   io_uring/rsrc.c     | 14 ++++++++++----
>   2 files changed, 10 insertions(+), 13 deletions(-)
> 
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index fb70ae436db6..3c97401240c2 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -307,13 +307,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
>   		goto err;
>   	if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
>   		goto err;
> -
> -	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
> -	if (!ctx->dummy_ubuf)
> -		goto err;
> -	/* set invalid range, so io_import_fixed() fails meeting it */
> -	ctx->dummy_ubuf->ubuf = -1UL;
> -
>   	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
>   			    0, GFP_KERNEL))
>   		goto err;
> @@ -352,7 +345,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
>   	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
>   	return ctx;
>   err:
> -	kfree(ctx->dummy_ubuf);
>   	kfree(ctx->cancel_table.hbs);
>   	kfree(ctx->cancel_table_locked.hbs);
>   	kfree(ctx->io_bl);
> @@ -2905,7 +2897,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
>   		io_wq_put_hash(ctx->hash_map);
>   	kfree(ctx->cancel_table.hbs);
>   	kfree(ctx->cancel_table_locked.hbs);
> -	kfree(ctx->dummy_ubuf);
>   	kfree(ctx->io_bl);
>   	xa_destroy(&ctx->io_bl_xa);
>   	kfree(ctx);
> diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
> index 5e8fdd9b8ca6..92e2471283ba 100644
> --- a/io_uring/rsrc.c
> +++ b/io_uring/rsrc.c
> @@ -33,6 +33,12 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
>   #define IORING_MAX_FIXED_FILES	(1U << 20)
>   #define IORING_MAX_REG_BUFFERS	(1U << 14)
>   
> +static const struct io_mapped_ubuf dummy_ubuf = {
> +	/* set invalid range, so io_import_fixed() fails meeting it */
> +	.ubuf = -1UL,
> +	.ubuf_end = 0,
> +};
> +
>   int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
>   {
>   	unsigned long page_limit, cur_pages, new_pages;
> @@ -132,7 +138,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
>   	struct io_mapped_ubuf *imu = *slot;
>   	unsigned int i;
>   
> -	if (imu != ctx->dummy_ubuf) {
> +	if (imu != &dummy_ubuf) {
>   		for (i = 0; i < imu->nr_bvecs; i++)
>   			unpin_user_page(imu->bvec[i].bv_page);
>   		if (imu->acct_pages)
> @@ -459,14 +465,14 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
>   			break;
>   
>   		i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
> -		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
> +		if (ctx->user_bufs[i] != &dummy_ubuf) {
>   			err = io_queue_rsrc_removal(ctx->buf_data, i,
>   						    ctx->user_bufs[i]);
>   			if (unlikely(err)) {
>   				io_buffer_unmap(ctx, &imu);
>   				break;
>   			}
> -			ctx->user_bufs[i] = ctx->dummy_ubuf;
> +			ctx->user_bufs[i] = &dummy_ubuf;
>   		}
>   
>   		ctx->user_bufs[i] = imu;
> @@ -1077,7 +1083,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
>   	int ret, nr_pages, i;
>   	struct folio *folio = NULL;
>   
> -	*pimu = ctx->dummy_ubuf;
> +	*pimu = &dummy_ubuf;
>   	if (!iov->iov_base)
>   		return 0;
>
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index fb70ae436db6..3c97401240c2 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -307,13 +307,6 @@  static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 		goto err;
 	if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
 		goto err;
-
-	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
-	if (!ctx->dummy_ubuf)
-		goto err;
-	/* set invalid range, so io_import_fixed() fails meeting it */
-	ctx->dummy_ubuf->ubuf = -1UL;
-
 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
 			    0, GFP_KERNEL))
 		goto err;
@@ -352,7 +345,6 @@  static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
 	return ctx;
 err:
-	kfree(ctx->dummy_ubuf);
 	kfree(ctx->cancel_table.hbs);
 	kfree(ctx->cancel_table_locked.hbs);
 	kfree(ctx->io_bl);
@@ -2905,7 +2897,6 @@  static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
 		io_wq_put_hash(ctx->hash_map);
 	kfree(ctx->cancel_table.hbs);
 	kfree(ctx->cancel_table_locked.hbs);
-	kfree(ctx->dummy_ubuf);
 	kfree(ctx->io_bl);
 	xa_destroy(&ctx->io_bl_xa);
 	kfree(ctx);
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 5e8fdd9b8ca6..92e2471283ba 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -33,6 +33,12 @@  static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 #define IORING_MAX_FIXED_FILES	(1U << 20)
 #define IORING_MAX_REG_BUFFERS	(1U << 14)
 
+static const struct io_mapped_ubuf dummy_ubuf = {
+	/* set invalid range, so io_import_fixed() fails meeting it */
+	.ubuf = -1UL,
+	.ubuf_end = 0,
+};
+
 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
 {
 	unsigned long page_limit, cur_pages, new_pages;
@@ -132,7 +138,7 @@  static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
 	struct io_mapped_ubuf *imu = *slot;
 	unsigned int i;
 
-	if (imu != ctx->dummy_ubuf) {
+	if (imu != &dummy_ubuf) {
 		for (i = 0; i < imu->nr_bvecs; i++)
 			unpin_user_page(imu->bvec[i].bv_page);
 		if (imu->acct_pages)
@@ -459,14 +465,14 @@  static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
 			break;
 
 		i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
-		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
+		if (ctx->user_bufs[i] != &dummy_ubuf) {
 			err = io_queue_rsrc_removal(ctx->buf_data, i,
 						    ctx->user_bufs[i]);
 			if (unlikely(err)) {
 				io_buffer_unmap(ctx, &imu);
 				break;
 			}
-			ctx->user_bufs[i] = ctx->dummy_ubuf;
+			ctx->user_bufs[i] = &dummy_ubuf;
 		}
 
 		ctx->user_bufs[i] = imu;
@@ -1077,7 +1083,7 @@  static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 	int ret, nr_pages, i;
 	struct folio *folio = NULL;
 
-	*pimu = ctx->dummy_ubuf;
+	*pimu = &dummy_ubuf;
 	if (!iov->iov_base)
 		return 0;