diff mbox series

[bpf,v2,7/9] selftests/bpf: Add tests for reading a dangling map iter fd

Message ID 20220810080538.1845898-8-houtao@huaweicloud.com (mailing list archive)
State Accepted
Commit 5836d81e4b039429f409007ba7e0235391537397
Delegated to: BPF
Headers show
Series fixes for bpf map iterator | expand

Checks

Context Check Description
bpf/vmtest-bpf-PR fail PR summary
netdev/tree_selection success Clearly marked for bpf
netdev/fixes_present success Fixes tag present in non-next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 6 maintainers not CCed: song@kernel.org martin.lau@linux.dev linux-kselftest@vger.kernel.org 9erthalion6@gmail.com mykolal@fb.com shuah@kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-VM_Test-1 success Logs for Kernel LATEST on ubuntu-latest with gcc
bpf/vmtest-bpf-VM_Test-2 success Logs for Kernel LATEST on ubuntu-latest with llvm-16
bpf/vmtest-bpf-VM_Test-3 fail Logs for Kernel LATEST on z15 with gcc

Commit Message

Hou Tao Aug. 10, 2022, 8:05 a.m. UTC
From: Hou Tao <houtao1@huawei.com>

After closing both related link fd and map fd, reading the map
iterator fd to ensure it is OK to do so.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 .../selftests/bpf/prog_tests/bpf_iter.c       | 92 +++++++++++++++++++
 1 file changed, 92 insertions(+)

Comments

Yonghong Song Aug. 10, 2022, 3:13 p.m. UTC | #1
On 8/10/22 1:05 AM, Hou Tao wrote:
> From: Hou Tao <houtao1@huawei.com>
> 
> After closing both related link fd and map fd, reading the map
> iterator fd to ensure it is OK to do so.
> 
> Signed-off-by: Hou Tao <houtao1@huawei.com>

Acked-by: Yonghong Song <yhs@fb.com>
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index a33874b081b6..b690c9e9d346 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -28,6 +28,7 @@ 
 #include "bpf_iter_test_kern6.skel.h"
 #include "bpf_iter_bpf_link.skel.h"
 #include "bpf_iter_ksym.skel.h"
+#include "bpf_iter_sockmap.skel.h"
 
 static int duration;
 
@@ -67,6 +68,50 @@  static void do_dummy_read(struct bpf_program *prog)
 	bpf_link__destroy(link);
 }
 
+static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
+				struct bpf_map *map)
+{
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+	struct bpf_link *link;
+	char buf[16] = {};
+	int iter_fd, len;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.map.map_fd = bpf_map__fd(map);
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+	link = bpf_program__attach_iter(prog, &opts);
+	if (!ASSERT_OK_PTR(link, "attach_map_iter"))
+		return;
+
+	iter_fd = bpf_iter_create(bpf_link__fd(link));
+	if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
+		bpf_link__destroy(link);
+		return;
+	}
+
+	/* Close link and map fd prematurely */
+	bpf_link__destroy(link);
+	bpf_object__destroy_skeleton(*skel);
+	*skel = NULL;
+
+	/* Try to let map free work to run first if map is freed */
+	usleep(100);
+	/* Memory used by both sock map and sock local storage map are
+	 * freed after two synchronize_rcu() calls, so wait for it
+	 */
+	kern_sync_rcu();
+	kern_sync_rcu();
+
+	/* Read after both map fd and link fd are closed */
+	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+		;
+	ASSERT_GE(len, 0, "read_iterator");
+
+	close(iter_fd);
+}
+
 static int read_fd_into_buffer(int fd, char *buf, int size)
 {
 	int bufleft = size;
@@ -827,6 +872,20 @@  static void test_bpf_array_map(void)
 	bpf_iter_bpf_array_map__destroy(skel);
 }
 
+static void test_bpf_array_map_iter_fd(void)
+{
+	struct bpf_iter_bpf_array_map *skel;
+
+	skel = bpf_iter_bpf_array_map__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
+		return;
+
+	do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
+			    skel->maps.arraymap1);
+
+	bpf_iter_bpf_array_map__destroy(skel);
+}
+
 static void test_bpf_percpu_array_map(void)
 {
 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -1009,6 +1068,20 @@  static void test_bpf_sk_storage_get(void)
 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
 }
 
+static void test_bpf_sk_stoarge_map_iter_fd(void)
+{
+	struct bpf_iter_bpf_sk_storage_map *skel;
+
+	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
+		return;
+
+	do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_sk_storage_map,
+			    skel->maps.sk_stg_map);
+
+	bpf_iter_bpf_sk_storage_map__destroy(skel);
+}
+
 static void test_bpf_sk_storage_map(void)
 {
 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -1217,6 +1290,19 @@  static void test_task_vma(void)
 	bpf_iter_task_vma__destroy(skel);
 }
 
+void test_bpf_sockmap_map_iter_fd(void)
+{
+	struct bpf_iter_sockmap *skel;
+
+	skel = bpf_iter_sockmap__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
+		return;
+
+	do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
+
+	bpf_iter_sockmap__destroy(skel);
+}
+
 void test_bpf_iter(void)
 {
 	if (test__start_subtest("btf_id_or_null"))
@@ -1267,10 +1353,14 @@  void test_bpf_iter(void)
 		test_bpf_percpu_hash_map();
 	if (test__start_subtest("bpf_array_map"))
 		test_bpf_array_map();
+	if (test__start_subtest("bpf_array_map_iter_fd"))
+		test_bpf_array_map_iter_fd();
 	if (test__start_subtest("bpf_percpu_array_map"))
 		test_bpf_percpu_array_map();
 	if (test__start_subtest("bpf_sk_storage_map"))
 		test_bpf_sk_storage_map();
+	if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
+		test_bpf_sk_stoarge_map_iter_fd();
 	if (test__start_subtest("bpf_sk_storage_delete"))
 		test_bpf_sk_storage_delete();
 	if (test__start_subtest("bpf_sk_storage_get"))
@@ -1283,4 +1373,6 @@  void test_bpf_iter(void)
 		test_link_iter();
 	if (test__start_subtest("ksym"))
 		test_ksym_iter();
+	if (test__start_subtest("bpf_sockmap_map_iter_fd"))
+		test_bpf_sockmap_map_iter_fd();
 }