diff mbox series

[RFC,14/22] selftests: ublk: add tests for covering redirecting to userspace

Message ID 20250107120417.1237392-15-tom.leiming@gmail.com (mailing list archive)
State RFC
Headers show
Series ublk: support bpf | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch, async

Commit Message

Ming Lei Jan. 7, 2025, 12:04 p.m. UTC
Reuse ublk-null for testing UBLK_BPF_IO_REDIRECT:

- queue & complete io with odd tag number
- redirect io with even tag number, and let userspace handle their
queueing & completion
- also select some ios, and returns -EAGAIN from userspace & marking
it as ready for bpf prog to handle, then finally completed with bpf
prog in 2nd time

So we can cover code path for UBLK_BPF_IO_REDIRECT.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 tools/testing/selftests/ublk/Makefile         |  1 +
 .../selftests/ublk/progs/ublk_bpf_kfunc.h     | 10 +++
 .../testing/selftests/ublk/progs/ublk_null.c  | 68 +++++++++++++++++++
 tools/testing/selftests/ublk/test_null_04.sh  | 21 ++++++
 tools/testing/selftests/ublk/ublk_bpf.c       | 39 ++++++++++-
 5 files changed, 136 insertions(+), 3 deletions(-)
 create mode 100755 tools/testing/selftests/ublk/test_null_04.sh
diff mbox series

Patch

diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile
index 5a940bae9cbb..38903f05d99d 100644
--- a/tools/testing/selftests/ublk/Makefile
+++ b/tools/testing/selftests/ublk/Makefile
@@ -22,6 +22,7 @@  endif
 TEST_PROGS := test_null_01.sh
 TEST_PROGS += test_null_02.sh
 TEST_PROGS += test_null_03.sh
+TEST_PROGS += test_null_04.sh
 
 # Order correspond to 'make run_tests' order
 TEST_GEN_PROGS_EXTENDED = ublk_bpf
diff --git a/tools/testing/selftests/ublk/progs/ublk_bpf_kfunc.h b/tools/testing/selftests/ublk/progs/ublk_bpf_kfunc.h
index acab490d933c..1db8870b57d6 100644
--- a/tools/testing/selftests/ublk/progs/ublk_bpf_kfunc.h
+++ b/tools/testing/selftests/ublk/progs/ublk_bpf_kfunc.h
@@ -20,4 +20,14 @@  extern void ublk_bpf_complete_io(const struct ublk_bpf_io *io, int res) __ksym;
 extern int ublk_bpf_get_dev_id(const struct ublk_bpf_io *io) __ksym;
 extern int ublk_bpf_get_queue_id(const struct ublk_bpf_io *io) __ksym;
 extern int ublk_bpf_get_io_tag(const struct ublk_bpf_io *io) __ksym;
+
+static inline unsigned long long build_io_key(const struct ublk_bpf_io *io)
+{
+	unsigned long long dev_id = (unsigned short)ublk_bpf_get_dev_id(io);
+	unsigned long long q_id = (unsigned short)ublk_bpf_get_queue_id(io);
+	unsigned long long tag = ublk_bpf_get_io_tag(io);
+
+	return (dev_id << 32) | (q_id << 16) | tag;
+}
+
 #endif
diff --git a/tools/testing/selftests/ublk/progs/ublk_null.c b/tools/testing/selftests/ublk/progs/ublk_null.c
index 523bf8ff3ef8..cebdc8a2a214 100644
--- a/tools/testing/selftests/ublk/progs/ublk_null.c
+++ b/tools/testing/selftests/ublk/progs/ublk_null.c
@@ -9,6 +9,14 @@ 
 //#define DEBUG
 #include "ublk_bpf.h"
 
+/* todo: make it writable payload of ublk_bpf_io */
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 10240);
+	__type(key, unsigned long long);	/* dev_id + q_id + tag */
+	__type(value, int);
+} io_map SEC(".maps");
+
 /* libbpf v1.4.5 is required for struct_ops to work */
 
 static inline ublk_bpf_return_t __ublk_null_handle_io_split(const struct ublk_bpf_io *io, unsigned int _off)
@@ -44,6 +52,54 @@  static inline ublk_bpf_return_t __ublk_null_handle_io_split(const struct ublk_bp
 	return ublk_bpf_return_val(UBLK_BPF_IO_QUEUED, 0);
 }
 
+static inline ublk_bpf_return_t __ublk_null_handle_io_redirect(const struct ublk_bpf_io *io, unsigned int _off)
+{
+	unsigned int tag = ublk_bpf_get_io_tag(io);
+	unsigned long off = -1, sects = -1;
+	const struct ublksrv_io_desc *iod;
+	int res;
+
+	iod = ublk_bpf_get_iod(io);
+	if (iod) {
+		res = iod->nr_sectors << 9;
+		off = iod->start_sector;
+		sects = iod->nr_sectors;
+	} else
+		res = -EINVAL;
+
+	BPF_DBG("ublk dev %u qid %u: handle io tag %u %lx-%d res %d",
+			ublk_bpf_get_dev_id(io),
+			ublk_bpf_get_queue_id(io),
+			ublk_bpf_get_io_tag(io),
+			off, sects, res);
+	if (res < 0) {
+		ublk_bpf_complete_io(io, res);
+		return ublk_bpf_return_val(UBLK_BPF_IO_QUEUED, 0);
+	}
+
+	if (tag & 0x1) {
+		/* complete the whole io command after the 2nd sub-io is queued */
+		ublk_bpf_complete_io(io, res);
+		return ublk_bpf_return_val(UBLK_BPF_IO_QUEUED, 0);
+	} else {
+		unsigned long long key = build_io_key(io);
+		int *pv;
+
+		/* stored value means if it is ready to complete IO */
+		pv = bpf_map_lookup_elem(&io_map, &key);
+		if (pv && *pv) {
+			ublk_bpf_complete_io(io, res);
+			return ublk_bpf_return_val(UBLK_BPF_IO_QUEUED, 0);
+		} else {
+			int v = 0;
+			res = bpf_map_update_elem(&io_map, &key, &v, BPF_ANY);
+			if (res)
+				bpf_printk("update io map element failed %d key %llx\n", res, key);
+			return ublk_bpf_return_val(UBLK_BPF_IO_REDIRECT, 0);
+		}
+	}
+}
+
 
 static inline ublk_bpf_return_t __ublk_null_handle_io(const struct ublk_bpf_io *io, unsigned int _off)
 {
@@ -106,4 +162,16 @@  struct ublk_bpf_ops null_ublk_bpf_ops_split = {
 	.queue_io_cmd = (void *)ublk_null_handle_io_split,
 };
 
+SEC("struct_ops/ublk_bpf_queue_io_cmd")
+ublk_bpf_return_t BPF_PROG(ublk_null_handle_io_redirect, struct ublk_bpf_io *io, unsigned int off)
+{
+	return __ublk_null_handle_io_redirect(io, off);
+}
+
+SEC(".struct_ops.link")
+struct ublk_bpf_ops null_ublk_bpf_ops_redirect = {
+	.id = 2,
+	.queue_io_cmd = (void *)ublk_null_handle_io_redirect,
+};
+
 char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/ublk/test_null_04.sh b/tools/testing/selftests/ublk/test_null_04.sh
new file mode 100755
index 000000000000..f175e2ddb5cd
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_null_04.sh
@@ -0,0 +1,21 @@ 
+#!/bin/bash
+
+. test_common.sh
+
+TID="null_04"
+ERR_CODE=0
+
+# prepare and register & pin bpf prog
+_prep_bpf_test "null" ublk_null.bpf.o
+
+# add two ublk null disks with the pinned bpf prog
+_add_ublk_dev -t null -n 0 --bpf_prog 2 --quiet
+
+# run fio over the ublk disk
+fio --name=job1 --filename=/dev/ublkb0 --ioengine=libaio --rw=readwrite --iodepth=32 --size=256M > /dev/null 2>&1
+ERR_CODE=$?
+
+# clean and unregister & unpin the bpf prog
+_cleanup_bpf_test "null"
+
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/ublk_bpf.c b/tools/testing/selftests/ublk/ublk_bpf.c
index 2d923e42845d..e2c2e92268e1 100644
--- a/tools/testing/selftests/ublk/ublk_bpf.c
+++ b/tools/testing/selftests/ublk/ublk_bpf.c
@@ -1283,6 +1283,16 @@  static int cmd_dev_help(char *exe)
 }
 
 /****************** part 2: target implementation ********************/
+//extern int bpf_map_update_elem(int fd, const void *key, const void *value,
+//                                   __u64 flags);
+
+static inline unsigned long long build_io_key(struct ublk_queue *q, int tag)
+{
+       unsigned long long dev_id = (unsigned short)q->dev->dev_info.dev_id;
+       unsigned long long q_id = (unsigned short)q->q_id;
+
+       return (dev_id << 32) | (q_id << 16) | tag;
+}
 
 static int ublk_null_tgt_init(struct ublk_dev *dev)
 {
@@ -1314,12 +1324,35 @@  static int ublk_null_tgt_init(struct ublk_dev *dev)
 static int ublk_null_queue_io(struct ublk_queue *q, int tag)
 {
 	const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
+	bool bpf = q->dev->dev_info.flags & UBLK_F_BPF;
 
-	/* won't be called for UBLK_F_BPF */
-	assert(!(q->dev->dev_info.flags & UBLK_F_BPF));
+	/* either !UBLK_F_BPF or UBLK_F_BPF with redirect */
+	assert(!bpf || (bpf && !(tag & 0x1)));
 
-	ublk_complete_io(q, tag, iod->nr_sectors << 9);
+	if (bpf && (tag % 4)) {
+		unsigned long long key = build_io_key(q, tag);
+		int map_fd;
+		int err;
+		int val = 1;
+
+		map_fd = bpf_obj_get("/sys/fs/bpf/ublk/null/io_map");
+		if (map_fd < 0) {
+			ublk_err("Error finding BPF map fd from pinned path\n");
+			goto exit;
+		}
+
+		/* make this io ready for bpf prog to handle */
+		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
+		if (err) {
+			ublk_err("Error updating map element: %d\n", errno);
+			goto exit;
+		}
+		ublk_complete_io(q, tag, -EAGAIN);
+		return 0;
+	}
 
+exit:
+	ublk_complete_io(q, tag, iod->nr_sectors << 9);
 	return 0;
 }