@@ -19,6 +19,79 @@ static int ublk_set_bpf_ops(struct ublk_device *ub,
return 0;
}
+static int ublk_set_bpf_aio_op(struct ublk_device *ub,
+ struct bpf_aio_complete_ops *ops)
+{
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ if (ops && ublk_get_queue(ub, i)->bpf_aio_ops) {
+ ublk_set_bpf_aio_op(ub, NULL);
+ return -EBUSY;
+ }
+ ublk_get_queue(ub, i)->bpf_aio_ops = ops;
+ }
+ return 0;
+}
+
+static int ublk_bpf_aio_prog_attach_cb(struct bpf_prog_consumer *consumer,
+ struct bpf_prog_provider *provider)
+{
+ struct ublk_device *ub = container_of(consumer, struct ublk_device,
+ aio_prog);
+ struct bpf_aio_complete_ops *ops = container_of(provider,
+ struct bpf_aio_complete_ops, provider);
+ int ret = -ENODEV;
+
+ if (ublk_get_device(ub)) {
+ ret = ublk_set_bpf_aio_op(ub, ops);
+ if (ret)
+ ublk_put_device(ub);
+ }
+
+ return ret;
+}
+
+static void ublk_bpf_aio_prog_detach_cb(struct bpf_prog_consumer *consumer,
+ bool unreg)
+{
+ struct ublk_device *ub = container_of(consumer, struct ublk_device,
+ aio_prog);
+
+ if (unreg) {
+ blk_mq_freeze_queue(ub->ub_disk->queue);
+ ublk_set_bpf_aio_op(ub, NULL);
+ blk_mq_unfreeze_queue(ub->ub_disk->queue);
+ } else {
+ ublk_set_bpf_aio_op(ub, NULL);
+ }
+ ublk_put_device(ub);
+}
+
+static const struct bpf_prog_consumer_ops ublk_aio_prog_consumer_ops = {
+ .attach_fn = ublk_bpf_aio_prog_attach_cb,
+ .detach_fn = ublk_bpf_aio_prog_detach_cb,
+};
+
+static int ublk_bpf_aio_attach(struct ublk_device *ub)
+{
+ if (!ublk_dev_support_bpf_aio(ub))
+ return 0;
+
+ ub->aio_prog.prog_id = ub->params.bpf.aio_ops_id;
+ ub->aio_prog.ops = &ublk_aio_prog_consumer_ops;
+
+ return bpf_aio_prog_attach(&ub->aio_prog);
+}
+
+static void ublk_bpf_aio_detach(struct ublk_device *ub)
+{
+ if (!ublk_dev_support_bpf_aio(ub))
+ return;
+ bpf_aio_prog_detach(&ub->aio_prog);
+}
+
+
static int ublk_bpf_prog_attach_cb(struct bpf_prog_consumer *consumer,
struct bpf_prog_provider *provider)
{
@@ -76,19 +149,25 @@ static const struct bpf_prog_consumer_ops ublk_prog_consumer_ops = {
int ublk_bpf_attach(struct ublk_device *ub)
{
+ int ret;
+
if (!ublk_dev_support_bpf(ub))
return 0;
ub->prog.prog_id = ub->params.bpf.ops_id;
ub->prog.ops = &ublk_prog_consumer_ops;
- return ublk_bpf_prog_attach(&ub->prog);
+ ret = ublk_bpf_prog_attach(&ub->prog);
+ if (ret)
+ return ret;
+ return ublk_bpf_aio_attach(ub);
}
void ublk_bpf_detach(struct ublk_device *ub)
{
if (!ublk_dev_support_bpf(ub))
return;
+ ublk_bpf_aio_detach(ub);
ublk_bpf_prog_detach(&ub->prog);
}
@@ -213,6 +213,10 @@ __bpf_kfunc int bpf_aio_submit(struct bpf_aio *aio, int fd, loff_t pos,
{
struct file *file;
+ /*
+ * ->ops has to assigned by kfunc of consumer subsystem because
+ * bpf prog lifetime is aligned with the consumer subsystem
+ */
if (!aio->ops)
return -EINVAL;
@@ -75,4 +75,8 @@ struct bpf_aio *bpf_aio_alloc_sleepable(unsigned int op, enum bpf_aio_flag aio_f
void bpf_aio_release(struct bpf_aio *aio);
int bpf_aio_submit(struct bpf_aio *aio, int fd, loff_t pos, unsigned bytes,
unsigned io_flags);
+
+int bpf_aio_prog_attach(struct bpf_prog_consumer *consumer);
+void bpf_aio_prog_detach(struct bpf_prog_consumer *consumer);
+
#endif
@@ -120,6 +120,28 @@ static void bpf_aio_unreg(void *kdata, struct bpf_link *link)
kfree(curr);
}
+int bpf_aio_prog_attach(struct bpf_prog_consumer *consumer)
+{
+ unsigned id = consumer->prog_id;
+ struct bpf_aio_complete_ops *ops;
+ int ret = -EINVAL;
+
+ mutex_lock(&bpf_aio_ops_lock);
+ ops = xa_load(&bpf_aio_all_ops, id);
+ if (ops && ops->id == id)
+ ret = bpf_prog_consumer_attach(consumer, &ops->provider);
+ mutex_unlock(&bpf_aio_ops_lock);
+
+ return ret;
+}
+
+void bpf_aio_prog_detach(struct bpf_prog_consumer *consumer)
+{
+ mutex_lock(&bpf_aio_ops_lock);
+ bpf_prog_consumer_detach(consumer, false);
+ mutex_unlock(&bpf_aio_ops_lock);
+}
+
static void bpf_aio_cb(struct bpf_aio *io, long ret)
{
}
@@ -126,6 +126,7 @@ struct ublk_queue {
#ifdef CONFIG_UBLK_BPF
struct ublk_bpf_ops *bpf_ops;
+ struct bpf_aio_complete_ops *bpf_aio_ops;
#endif
unsigned short force_abort:1;
@@ -159,6 +160,7 @@ struct ublk_device {
#ifdef CONFIG_UBLK_BPF
struct bpf_prog_consumer prog;
+ struct bpf_prog_consumer aio_prog;
#endif
struct mutex mutex;
@@ -203,6 +205,14 @@ static inline bool ublk_dev_support_bpf(const struct ublk_device *ub)
return ub->dev_info.flags & UBLK_F_BPF;
}
+static inline bool ublk_dev_support_bpf_aio(const struct ublk_device *ub)
+{
+ if (!ublk_dev_support_bpf(ub))
+ return false;
+
+ return ub->params.bpf.flags & UBLK_BPF_HAS_AIO_OPS_ID;
+}
+
struct ublk_device *ublk_get_device(struct ublk_device *ub);
struct ublk_device *ublk_get_device_from_id(int idx);
void ublk_put_device(struct ublk_device *ub);
@@ -406,9 +406,11 @@ struct ublk_param_zoned {
struct ublk_param_bpf {
#define UBLK_BPF_HAS_OPS_ID (1 << 0)
+#define UBLK_BPF_HAS_AIO_OPS_ID (1 << 1)
__u8 flags;
__u8 ops_id;
- __u8 reserved[6];
+ __u16 aio_ops_id;
+ __u8 reserved[4];
};
struct ublk_params {
Attach bpf aio program to ublk device before adding ublk disk, and detach it after the disk is removed. And when the bpf aio prog is unregistered, all devices will detach from the prog automatically. ublk device needs to provide the bpf aio struct_ops ID for attaching the specific prog, and each ublk device has to attach to only single bpf prog. So that we can use the attached bpf aio prog to submit bpf aio for handling ublk IO. Given bpf aio prog is attached to ublk device, ublk bpf prog has to provide one kfunc to assign 'bpf_aio_complete_ops *' to 'struct bpf_aio' instance. Signed-off-by: Ming Lei <tom.leiming@gmail.com> --- drivers/block/ublk/bpf.c | 81 +++++++++++++++++++++++++++++++- drivers/block/ublk/bpf_aio.c | 4 ++ drivers/block/ublk/bpf_aio.h | 4 ++ drivers/block/ublk/bpf_aio_ops.c | 22 +++++++++ drivers/block/ublk/ublk.h | 10 ++++ include/uapi/linux/ublk_cmd.h | 4 +- 6 files changed, 123 insertions(+), 2 deletions(-)