diff mbox series

[RFC,bpf-next,4/4] selftests/bpf: Add test cases for bpf file-system iterator

Message ID 20230507040107.3755166-5-houtao@huaweicloud.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series Introduce bpf iterators for file-system | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/apply fail Patch does not apply to bpf-next
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ${{ matrix.test }} on ${{ matrix.arch }} with ${{ matrix.toolchain_full }}
bpf/vmtest-bpf-next-VM_Test-2 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 fail Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for build for aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-5 fail Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-9 success Logs for veristat

Commit Message

Hou Tao May 7, 2023, 4:01 a.m. UTC
From: Hou Tao <houtao1@huawei.com>

Add three test cases to demonstrate the basic functionalities of bpf
file-system iterator:
1) dump_raw_inode. Use bpf_seq_printf_btf to dump the content of the
   passed inode and its super_block
2) dump_inode. Use bpf_filemap_{cachestat,find_present,get_order} to
   dump the details of the inode page cache.
3) dump_mnt. Dump the basic information of the passed mount.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 .../selftests/bpf/prog_tests/bpf_iter_fs.c    | 184 ++++++++++++++++++
 .../testing/selftests/bpf/progs/bpf_iter_fs.c | 122 ++++++++++++
 2 files changed, 306 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/bpf_iter_fs.c
 create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_fs.c
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_fs.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_fs.c
new file mode 100644
index 000000000000..e26d736001b4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_fs.c
@@ -0,0 +1,184 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <test_progs.h>
+#include "bpf_iter_fs.skel.h"
+
+static void test_bpf_iter_raw_inode(void)
+{
+	const char *fpath = "/tmp/raw_inode.test";
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+	int ino_fd, iter_fd, err;
+	struct bpf_iter_fs *skel;
+	struct bpf_link *link;
+	char buf[8192];
+	ssize_t nr;
+
+	ino_fd = open(fpath, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+	if (!ASSERT_GE(ino_fd, 0, "open file"))
+		return;
+	ftruncate(ino_fd, 4095);
+
+	skel = bpf_iter_fs__open();
+	if (!ASSERT_OK_PTR(skel, "open"))
+		goto close_ino;
+
+	bpf_program__set_autoload(skel->progs.dump_raw_inode, true);
+
+	err = bpf_iter_fs__load(skel);
+	if (!ASSERT_OK(err, "load"))
+		goto free_skel;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.fs.type = BPF_FS_ITER_INODE;
+	linfo.fs.fd = ino_fd;
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+	link = bpf_program__attach_iter(skel->progs.dump_raw_inode, &opts);
+	if (!ASSERT_OK_PTR(link, "attach iter"))
+		goto free_skel;
+
+	iter_fd = bpf_iter_create(bpf_link__fd(link));
+	if (!ASSERT_GE(iter_fd, 0, "create iter"))
+		goto free_link;
+
+	nr = read(iter_fd, buf, sizeof(buf));
+	if (!ASSERT_GT(nr, 0, "read iter"))
+		goto close_iter;
+
+	buf[nr - 1] = 0;
+	puts(buf);
+
+close_iter:
+	close(iter_fd);
+free_link:
+	bpf_link__destroy(link);
+free_skel:
+	bpf_iter_fs__destroy(skel);
+close_ino:
+	close(ino_fd);
+}
+
+static void test_bpf_iter_inode(void)
+{
+	const char *fpath = "/tmp/inode.test";
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+	int ino_fd, iter_fd, err;
+	struct bpf_iter_fs *skel;
+	struct bpf_link *link;
+	char buf[8192];
+	ssize_t nr;
+
+	/* Close fd after reading iterator completes */
+	ino_fd = open(fpath, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+	if (!ASSERT_GE(ino_fd, 0, "open file"))
+		return;
+	pwrite(ino_fd, buf, sizeof(buf), 0);
+	pwrite(ino_fd, buf, sizeof(buf), sizeof(buf) * 2);
+
+	skel = bpf_iter_fs__open();
+	if (!ASSERT_OK_PTR(skel, "open"))
+		goto close_ino;
+
+	bpf_program__set_autoload(skel->progs.dump_inode, true);
+
+	err = bpf_iter_fs__load(skel);
+	if (!ASSERT_OK(err, "load"))
+		goto free_skel;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.fs.type = BPF_FS_ITER_INODE;
+	linfo.fs.fd = ino_fd;
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+	link = bpf_program__attach_iter(skel->progs.dump_inode, &opts);
+	if (!ASSERT_OK_PTR(link, "attach iter"))
+		goto free_skel;
+
+	iter_fd = bpf_iter_create(bpf_link__fd(link));
+	if (!ASSERT_GE(iter_fd, 0, "create iter"))
+		goto free_link;
+
+	nr = read(iter_fd, buf, sizeof(buf));
+	if (!ASSERT_GT(nr, 0, "read iter"))
+		goto close_iter;
+
+	buf[nr - 1] = 0;
+	puts(buf);
+
+close_iter:
+	close(iter_fd);
+free_link:
+	bpf_link__destroy(link);
+free_skel:
+	bpf_iter_fs__destroy(skel);
+close_ino:
+	close(ino_fd);
+}
+
+static void test_bpf_iter_mnt(void)
+{
+	const char *fpath = "/tmp/mnt.test";
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+	int mnt_fd, iter_fd, err;
+	struct bpf_iter_fs *skel;
+	struct bpf_link *link;
+	char buf[8192];
+	ssize_t nr;
+
+	/* Close fd after reading iterator completes */
+	mnt_fd = open(fpath, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+	if (!ASSERT_GE(mnt_fd, 0, "open file"))
+		return;
+
+	skel = bpf_iter_fs__open();
+	if (!ASSERT_OK_PTR(skel, "open"))
+		goto close_ino;
+
+	bpf_program__set_autoload(skel->progs.dump_mnt, true);
+
+	err = bpf_iter_fs__load(skel);
+	if (!ASSERT_OK(err, "load"))
+		goto free_skel;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.fs.type = BPF_FS_ITER_MNT;
+	linfo.fs.fd = mnt_fd;
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+	link = bpf_program__attach_iter(skel->progs.dump_mnt, &opts);
+	if (!ASSERT_OK_PTR(link, "attach iter"))
+		goto free_skel;
+
+	iter_fd = bpf_iter_create(bpf_link__fd(link));
+	if (!ASSERT_GE(iter_fd, 0, "create iter"))
+		goto free_link;
+
+	nr = read(iter_fd, buf, sizeof(buf));
+	if (!ASSERT_GT(nr, 0, "read iter"))
+		goto close_iter;
+
+	buf[nr - 1] = 0;
+	puts(buf);
+
+close_iter:
+	close(iter_fd);
+free_link:
+	bpf_link__destroy(link);
+free_skel:
+	bpf_iter_fs__destroy(skel);
+close_ino:
+	close(mnt_fd);
+}
+
+void test_bpf_iter_fs(void)
+{
+	if (test__start_subtest("dump_raw_inode"))
+		test_bpf_iter_raw_inode();
+	if (test__start_subtest("dump_inode"))
+		test_bpf_iter_inode();
+	if (test__start_subtest("dump_mnt"))
+		test_bpf_iter_mnt();
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_fs.c b/tools/testing/selftests/bpf/progs/bpf_iter_fs.c
new file mode 100644
index 000000000000..e238446b6ddf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_fs.c
@@ -0,0 +1,122 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include "bpf_iter.h"
+#include <string.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct dump_ctx {
+	struct seq_file *seq;
+	struct inode *inode;
+	unsigned long from;
+	unsigned long max;
+};
+
+void bpf_filemap_cachestat(struct inode *inode, unsigned long from, unsigned long last,
+		           struct cachestat *cs) __ksym;
+long bpf_filemap_find_present(struct inode *inode, unsigned long from, unsigned long last) __ksym;
+long bpf_filemap_get_order(struct inode *inode, unsigned long index) __ksym;
+
+static u64 dump_page_order(unsigned int i, void *ctx)
+{
+        struct dump_ctx *dump = ctx;
+	unsigned long index;
+	unsigned int order;
+
+	index = bpf_filemap_find_present(dump->inode, dump->from, dump->max);
+	if (index == -1UL)
+		return 1;
+	order = bpf_filemap_get_order(dump->inode, index);
+
+        BPF_SEQ_PRINTF(dump->seq, "  page offset %lu order %u\n", index, order);
+	dump->from = index + (1 << order);
+        return 0;
+}
+
+SEC("?iter/fs_inode")
+int dump_raw_inode(struct bpf_iter__fs_inode *ctx)
+{
+	struct seq_file *seq = ctx->meta->seq;
+	struct inode *inode = ctx->inode;
+	struct btf_ptr ptr;
+
+	if (inode == NULL)
+		return 0;
+
+	memset(&ptr, 0, sizeof(ptr));
+	ptr.type_id = bpf_core_type_id_kernel(struct inode);
+	ptr.ptr = inode;
+	bpf_seq_printf_btf(seq, &ptr, sizeof(ptr), 0);
+
+	memset(&ptr, 0, sizeof(ptr));
+	ptr.type_id = bpf_core_type_id_kernel(struct super_block);
+	ptr.ptr = inode->i_sb;
+	bpf_seq_printf_btf(seq, &ptr, sizeof(ptr), 0);
+
+	return 0;
+}
+
+SEC("?iter/fs_inode")
+int dump_inode(struct bpf_iter__fs_inode *ctx)
+{
+	struct seq_file *seq = ctx->meta->seq;
+	struct inode *inode = ctx->inode;
+	struct cachestat cs = {};
+	struct super_block *sb;
+	struct dentry *dentry;
+	struct dump_ctx dump;
+
+	if (inode == NULL)
+		return 0;
+
+	sb = inode->i_sb;
+	BPF_SEQ_PRINTF(seq, "sb: bsize %lu s_op %ps s_type %ps name %s\n",
+		       sb->s_blocksize, sb->s_op, sb->s_type, sb->s_type->name);
+
+	BPF_SEQ_PRINTF(seq, "ino: inode nlink %d inum %lu size %llu",
+			inode->i_nlink, inode->i_ino, inode->i_size);
+	dentry = ctx->dentry;
+	if (dentry)
+		BPF_SEQ_PRINTF(seq, ", name %s\n", dentry->d_name.name);
+	else
+		BPF_SEQ_PRINTF(seq, "\n");
+
+	bpf_filemap_cachestat(inode, 0, ~0UL, &cs);
+	BPF_SEQ_PRINTF(seq, "cache: cached %llu dirty %llu wb %llu evicted %llu\n",
+			cs.nr_cache, cs.nr_dirty, cs.nr_writeback, cs.nr_evicted);
+
+	dump.seq = seq;
+	dump.inode = inode;
+	dump.from = 0;
+	/* TODO: handle BPF_MAX_LOOPS */
+	dump.max = ((unsigned long)inode->i_size + 4095) / 4096;
+	BPF_SEQ_PRINTF(seq, "orders:\n");
+	bpf_loop(dump.max, dump_page_order, &dump, 0);
+
+	return 0;
+}
+
+SEC("?iter/fs_mnt")
+int dump_mnt(struct bpf_iter__fs_mnt *ctx)
+{
+	struct seq_file *seq = ctx->meta->seq;
+	struct mount *mnt = ctx->mnt;
+	struct super_block *sb;
+
+	if (mnt == NULL)
+		return 0;
+
+	sb = mnt->mnt.mnt_sb;
+	BPF_SEQ_PRINTF(seq, "dev %u:%u ",
+		       sb->s_dev >> 20, sb->s_dev & ((1 << 20) - 1));
+
+	BPF_SEQ_PRINTF(seq, "id %d parent_id %d mnt_flags 0x%x",
+		       mnt->mnt_id, mnt->mnt_parent->mnt_id, mnt->mnt.mnt_flags);
+	if (mnt->mnt.mnt_flags & 0x1000)
+		BPF_SEQ_PRINTF(seq, " shared:%d", mnt->mnt_group_id);
+	BPF_SEQ_PRINTF(seq, "\n");
+
+	return 0;
+}