diff mbox series

[RFC,16/22] ublk: bpf: add bpf aio struct_ops

Message ID 20250107120417.1237392-17-tom.leiming@gmail.com (mailing list archive)
State RFC
Headers show
Series ublk: support bpf | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch, async

Commit Message

Ming Lei Jan. 7, 2025, 12:04 p.m. UTC
Add bpf aio struct_ops, so that application code can provide bpf
aio completion callback in the struct_ops prog, then bpf aio can be
supported.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 drivers/block/ublk/Makefile      |   2 +-
 drivers/block/ublk/bpf_aio.c     |   7 ++
 drivers/block/ublk/bpf_aio.h     |  12 +++
 drivers/block/ublk/bpf_aio_ops.c | 152 +++++++++++++++++++++++++++++++
 4 files changed, 172 insertions(+), 1 deletion(-)
 create mode 100644 drivers/block/ublk/bpf_aio_ops.c
diff mbox series

Patch

diff --git a/drivers/block/ublk/Makefile b/drivers/block/ublk/Makefile
index 7094607c040d..a47f65eb97f8 100644
--- a/drivers/block/ublk/Makefile
+++ b/drivers/block/ublk/Makefile
@@ -5,6 +5,6 @@  ccflags-y			+= -I$(src)
 
 ublk_drv-$(CONFIG_BLK_DEV_UBLK)	:= main.o
 ifeq ($(CONFIG_UBLK_BPF), y)
-ublk_drv-$(CONFIG_BLK_DEV_UBLK)	+= bpf_ops.o bpf.o bpf_aio.o
+ublk_drv-$(CONFIG_BLK_DEV_UBLK)	+= bpf_ops.o bpf.o bpf_aio.o bpf_aio_ops.o
 endif
 obj-$(CONFIG_BLK_DEV_UBLK)	+= ublk_drv.o
diff --git a/drivers/block/ublk/bpf_aio.c b/drivers/block/ublk/bpf_aio.c
index 65013fe8054f..6e93f28f389b 100644
--- a/drivers/block/ublk/bpf_aio.c
+++ b/drivers/block/ublk/bpf_aio.c
@@ -243,9 +243,16 @@  __bpf_kfunc int bpf_aio_submit(struct bpf_aio *aio, int fd, loff_t pos,
 
 int __init bpf_aio_init(void)
 {
+	int err;
+
 	bpf_aio_cachep = KMEM_CACHE(bpf_aio, SLAB_PANIC);
 	bpf_aio_work_cachep = KMEM_CACHE(bpf_aio_work, SLAB_PANIC);
 	bpf_aio_wq = alloc_workqueue("bpf_aio", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
 
+	err = bpf_aio_struct_ops_init();
+	if (err) {
+		pr_warn("error while initializing bpf aio struct_ops: %d", err);
+		return err;
+	}
 	return 0;
 }
diff --git a/drivers/block/ublk/bpf_aio.h b/drivers/block/ublk/bpf_aio.h
index 625737965c90..07fcd43fd2ac 100644
--- a/drivers/block/ublk/bpf_aio.h
+++ b/drivers/block/ublk/bpf_aio.h
@@ -3,6 +3,8 @@ 
 #ifndef UBLK_BPF_AIO_HEADER
 #define UBLK_BPF_AIO_HEADER
 
+#include "bpf_reg.h"
+
 #define	BPF_AIO_OP_BITS		8
 #define	BPF_AIO_OP_MASK		((1 << BPF_AIO_OP_BITS) - 1)
 
@@ -47,9 +49,18 @@  struct bpf_aio {
 
 typedef void (*bpf_aio_complete_t)(struct bpf_aio *io, long ret);
 
+/**
+ * struct bpf_aio_complete_ops - A BPF struct_ops of callbacks allowing to
+ * 	complete `bpf_aio` submitted by `bpf_aio_submit()`
+ * @id: id used by bpf aio consumer, defined by globally
+ * @bpf_aio_complete_cb: callback for completing submitted `bpf_aio`
+ * @provider: holding all consumers of this struct_ops prog, used by
+ * 	kernel only
+ */
 struct bpf_aio_complete_ops {
 	unsigned int		id;
 	bpf_aio_complete_t	bpf_aio_complete_cb;
+	struct bpf_prog_provider provider;
 };
 
 static inline unsigned int bpf_aio_get_op(const struct bpf_aio *aio)
@@ -58,6 +69,7 @@  static inline unsigned int bpf_aio_get_op(const struct bpf_aio *aio)
 }
 
 int bpf_aio_init(void);
+int bpf_aio_struct_ops_init(void);
 struct bpf_aio *bpf_aio_alloc(unsigned int op, enum bpf_aio_flag aio_flags);
 struct bpf_aio *bpf_aio_alloc_sleepable(unsigned int op, enum bpf_aio_flag aio_flags);
 void bpf_aio_release(struct bpf_aio *aio);
diff --git a/drivers/block/ublk/bpf_aio_ops.c b/drivers/block/ublk/bpf_aio_ops.c
new file mode 100644
index 000000000000..12757f634dbd
--- /dev/null
+++ b/drivers/block/ublk/bpf_aio_ops.c
@@ -0,0 +1,152 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Red Hat */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bpf_verifier.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/filter.h>
+#include <linux/xarray.h>
+
+#include "bpf_aio.h"
+
+static DEFINE_XARRAY(bpf_aio_all_ops);
+static DEFINE_MUTEX(bpf_aio_ops_lock);
+
+static bool bpf_aio_ops_is_valid_access(int off, int size,
+		enum bpf_access_type type, const struct bpf_prog *prog,
+		struct bpf_insn_access_aux *info)
+{
+	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
+}
+
+static int bpf_aio_ops_btf_struct_access(struct bpf_verifier_log *log,
+		const struct bpf_reg_state *reg,
+		int off, int size)
+{
+	/* bpf_aio prog can change nothing */
+	if (size > 0)
+		return -EACCES;
+
+	return NOT_INIT;
+}
+
+static const struct bpf_verifier_ops bpf_aio_verifier_ops = {
+	.get_func_proto = bpf_base_func_proto,
+	.is_valid_access = bpf_aio_ops_is_valid_access,
+	.btf_struct_access = bpf_aio_ops_btf_struct_access,
+};
+
+static int bpf_aio_ops_init(struct btf *btf)
+{
+	return 0;
+}
+
+static int bpf_aio_ops_check_member(const struct btf_type *t,
+		const struct btf_member *member,
+		const struct bpf_prog *prog)
+{
+	if (prog->sleepable)
+		return -EINVAL;
+	return 0;
+}
+
+static int bpf_aio_ops_init_member(const struct btf_type *t,
+		const struct btf_member *member,
+		void *kdata, const void *udata)
+{
+	const struct bpf_aio_complete_ops *uops;
+	struct bpf_aio_complete_ops *kops;
+	u32 moff;
+
+	uops = (const struct bpf_aio_complete_ops *)udata;
+	kops = (struct bpf_aio_complete_ops*)kdata;
+
+	moff = __btf_member_bit_offset(t, member) / 8;
+
+	switch (moff) {
+	case offsetof(struct bpf_aio_complete_ops, id):
+		/* For dev_id, this function has to copy it and return 1 to
+		 * indicate that the data has been handled by the struct_ops
+		 * type, or the verifier will reject the map if the value of
+		 * those fields is not zero.
+		 */
+		kops->id = uops->id;
+		return 1;
+	}
+	return 0;
+}
+
+static int bpf_aio_reg(void *kdata, struct bpf_link *link)
+{
+	struct bpf_aio_complete_ops *ops = kdata;
+	struct bpf_aio_complete_ops *curr;
+	int ret = -EBUSY;
+
+	mutex_lock(&bpf_aio_ops_lock);
+	if (!xa_load(&bpf_aio_all_ops, ops->id)) {
+		curr = kmalloc(sizeof(*curr), GFP_KERNEL);
+		if (curr) {
+			*curr = *ops;
+			bpf_prog_provider_init(&curr->provider);
+			ret = xa_err(xa_store(&bpf_aio_all_ops, ops->id,
+						curr, GFP_KERNEL));
+		} else {
+			ret = -ENOMEM;
+		}
+	}
+	mutex_unlock(&bpf_aio_ops_lock);
+
+	return ret;
+}
+
+static void bpf_aio_unreg(void *kdata, struct bpf_link *link)
+{
+	struct bpf_aio_complete_ops *ops = kdata;
+	struct bpf_prog_consumer *consumer, *tmp;
+	struct bpf_aio_complete_ops *curr;
+	LIST_HEAD(consumer_list);
+
+	mutex_lock(&bpf_aio_ops_lock);
+	curr = xa_erase(&bpf_aio_all_ops, ops->id);
+	if (curr)
+		list_splice_init(&curr->provider.list, &consumer_list);
+	mutex_unlock(&bpf_aio_ops_lock);
+
+	list_for_each_entry_safe(consumer, tmp, &consumer_list, node)
+		bpf_prog_consumer_detach(consumer, true);
+	kfree(curr);
+}
+
+static void bpf_aio_cb(struct bpf_aio *io, long ret)
+{
+}
+
+static struct bpf_aio_complete_ops __bpf_aio_ops = {
+	.bpf_aio_complete_cb	=	bpf_aio_cb,
+};
+
+static struct bpf_struct_ops bpf_aio_ops = {
+	.verifier_ops = &bpf_aio_verifier_ops,
+	.init = bpf_aio_ops_init,
+	.check_member = bpf_aio_ops_check_member,
+	.init_member = bpf_aio_ops_init_member,
+	.reg = bpf_aio_reg,
+	.unreg = bpf_aio_unreg,
+	.name = "bpf_aio_complete_ops",
+	.cfi_stubs = &__bpf_aio_ops,
+	.owner = THIS_MODULE,
+};
+
+int __init bpf_aio_struct_ops_init(void)
+{
+	int err;
+
+	err = register_bpf_struct_ops(&bpf_aio_ops, bpf_aio_complete_ops);
+	if (err)
+		pr_warn("error while registering bpf aio struct ops: %d", err);
+
+	return 0;
+}