diff mbox series

[01/10] io_uring: allocate ctx id and build map between id and ctx

Message ID 20230918041106.2134250-2-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series io_uring/ublk: exit notifier support | expand

Commit Message

Ming Lei Sept. 18, 2023, 4:10 a.m. UTC
Prepare for supporting to notify uring_cmd driver when ctx/io_uring_task
is going away.

Notifier callback will be registered by driver to get notified, so that driver
can cancel in-flight command which may depend on the io task.

For driver to check if the ctx is matched with uring_cmd, allocate/provide ctx
id to the callback, so we can avoid to expose the whole ctx instance.

The global xarray of ctx_ids is added for holding the mapping and allocating
unique id for each ctx.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 include/linux/io_uring.h       | 2 ++
 include/linux/io_uring_types.h | 3 +++
 io_uring/io_uring.c            | 9 +++++++++
 3 files changed, 14 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 106cdc55ff3b..ec9714e36477 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -41,6 +41,8 @@  static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
 	return sqe->cmd;
 }
 
+#define IO_URING_INVALID_CTX_ID  UINT_MAX
+
 #if defined(CONFIG_IO_URING)
 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
 			      struct iov_iter *iter, void *ioucmd);
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 13d19b9be9f4..d310bb073101 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -215,6 +215,9 @@  struct io_ring_ctx {
 		struct percpu_ref	refs;
 
 		enum task_work_notify_mode	notify_method;
+
+		/* for uring cmd driver to retrieve context  */
+		unsigned int		id;
 	} ____cacheline_aligned_in_smp;
 
 	/* submission data */
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 783ed0fff71b..c015c070ff85 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -175,6 +175,9 @@  static struct ctl_table kernel_io_uring_disabled_table[] = {
 };
 #endif
 
+/* mapping between io_ring_ctx instance and its ctx_id */
+static DEFINE_XARRAY_FLAGS(ctx_ids, XA_FLAGS_ALLOC);
+
 struct sock *io_uring_get_socket(struct file *file)
 {
 #if defined(CONFIG_UNIX)
@@ -303,6 +306,10 @@  static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 
 	xa_init(&ctx->io_bl_xa);
 
+	ctx->id = IO_URING_INVALID_CTX_ID;
+	if (xa_alloc(&ctx_ids, &ctx->id, ctx, xa_limit_31b, GFP_KERNEL))
+		goto err;
+
 	/*
 	 * Use 5 bits less than the max cq entries, that should give us around
 	 * 32 entries per hash list if totally full and uniformly spread, but
@@ -356,6 +363,7 @@  static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	kfree(ctx->cancel_table_locked.hbs);
 	kfree(ctx->io_bl);
 	xa_destroy(&ctx->io_bl_xa);
+	xa_erase(&ctx_ids, ctx->id);
 	kfree(ctx);
 	return NULL;
 }
@@ -2929,6 +2937,7 @@  static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
 	kfree(ctx->cancel_table_locked.hbs);
 	kfree(ctx->io_bl);
 	xa_destroy(&ctx->io_bl_xa);
+	xa_erase(&ctx_ids, ctx->id);
 	kfree(ctx);
 }