diff mbox series

[v4,bpf-next,09/14] selftests/bpf: Add C tests for rdonly PTR_TO_BTF_ID

Message ID 033365d2afbbd18f6524203f1e729c477865ce2b.1653600578.git.lorenzo@kernel.org (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series net: netfilter: add kfunc helper to update ct timeout | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 6 maintainers not CCed: songliubraving@fb.com linux-kselftest@vger.kernel.org john.fastabend@gmail.com kafai@fb.com shuah@kernel.org kpsingh@kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning CHECK: No space is necessary after a cast CHECK: spaces preferred around that '*' (ctx:WxV) WARNING: externs should be avoided in .c files WARNING: line length of 103 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Kernel LATEST on ubuntu-latest with llvm-15
bpf/vmtest-bpf-next-VM_Test-1 success Logs for Kernel LATEST on ubuntu-latest with gcc
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Kernel LATEST on z15 with gcc

Commit Message

Lorenzo Bianconi May 26, 2022, 9:34 p.m. UTC
From: Kumar Kartikeya Dwivedi <memxor@gmail.com>

Both the return value of bpf_kptr_xchg and load of read only kptr must
be marked with MEM_RDONLY.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 .../selftests/bpf/prog_tests/map_kptr.c       |   9 +-
 tools/testing/selftests/bpf/progs/map_kptr.c  |  31 ++++-
 .../selftests/bpf/progs/map_kptr_fail.c       | 114 ++++++++++++++++++
 3 files changed, 152 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
index fdcea7a61491..cca0bb51b752 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
@@ -36,6 +36,13 @@  struct {
 	{ "reject_indirect_global_func_access", "kptr cannot be accessed indirectly by helper" },
 	{ "kptr_xchg_ref_state", "Unreleased reference id=5 alloc_insn=" },
 	{ "kptr_get_ref_state", "Unreleased reference id=3 alloc_insn=" },
+	{ "kptr_const_to_non_const", "invalid kptr access, R0 type=rdonly_ptr_" },
+	{ "kptr_const_to_non_const_xchg", "invalid kptr access, R2 type=rdonly_ptr_" },
+	{ "kptr_const_or_null_to_non_const_xchg", "invalid kptr access, R2 type=rdonly_ptr_or_null_" },
+	{ "mark_rdonly", "R1 type=rdonly_untrusted_ptr_or_null_ expected=percpu_ptr_" },
+	{ "mark_ref_rdonly", "R1 type=rdonly_untrusted_ptr_or_null_ expected=percpu_ptr_" },
+	{ "mark_xchg_rdonly", "R1 type=rdonly_ptr_or_null_ expected=percpu_ptr_" },
+	{ "kptr_get_no_const", "arg#0 cannot raise reference for pointer to const" },
 };
 
 static void test_map_kptr_fail_prog(const char *prog_name, const char *err_msg)
@@ -91,7 +98,7 @@  static void test_map_kptr_success(bool test_run)
 	);
 	struct map_kptr *skel;
 	int key = 0, ret;
-	char buf[16];
+	char buf[32];
 
 	skel = map_kptr__open_and_load();
 	if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
index eb8217803493..f77689544e65 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -6,6 +6,8 @@ 
 struct map_value {
 	struct prog_test_ref_kfunc __kptr *unref_ptr;
 	struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
+	const struct prog_test_ref_kfunc __kptr *const_unref_ptr;
+	const struct prog_test_ref_kfunc __kptr_ref *const_ref_ptr;
 };
 
 struct array_map {
@@ -58,12 +60,14 @@  DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_mallo
 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
 
 extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
+extern const struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire_const(void) __ksym;
 extern struct prog_test_ref_kfunc *
 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
-extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+extern void bpf_kfunc_call_test_release(const struct prog_test_ref_kfunc *p) __ksym;
 
 static void test_kptr_unref(struct map_value *v)
 {
+	const struct prog_test_ref_kfunc *pc;
 	struct prog_test_ref_kfunc *p;
 
 	p = v->unref_ptr;
@@ -77,10 +81,21 @@  static void test_kptr_unref(struct map_value *v)
 	v->unref_ptr = p;
 	/* store NULL */
 	v->unref_ptr = NULL;
+
+	pc = v->const_ref_ptr;
+	/* store rdonly_untrusted_ptr_or_null_ */
+	v->const_unref_ptr = pc;
+	if (!pc)
+		return;
+	/* store rdonly_untrusted_ptr_ */
+	v->const_unref_ptr = pc;
+	/* store NULL */
+	v->const_unref_ptr = NULL;
 }
 
 static void test_kptr_ref(struct map_value *v)
 {
+	const struct prog_test_ref_kfunc *pc;
 	struct prog_test_ref_kfunc *p;
 
 	p = v->ref_ptr;
@@ -114,6 +129,20 @@  static void test_kptr_ref(struct map_value *v)
 		return;
 	}
 	bpf_kfunc_call_test_release(p);
+
+	pc = bpf_kptr_xchg(&v->const_ref_ptr, NULL);
+	if (!pc)
+		return;
+	/* store rdonly_ptr_ */
+	v->const_unref_ptr = pc;
+	bpf_kfunc_call_test_release(pc);
+
+	pc = bpf_kfunc_call_test_acquire_const();
+	if (!pc)
+		return;
+	v->const_unref_ptr = pc;
+	bpf_kfunc_call_test_release(pc);
+	v->const_unref_ptr = v->const_ref_ptr;
 }
 
 static void test_kptr_get(struct map_value *v)
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
index 05e209b1b12a..a1c4209a09e4 100644
--- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -9,6 +9,8 @@  struct map_value {
 	struct prog_test_ref_kfunc __kptr *unref_ptr;
 	struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
 	struct prog_test_member __kptr_ref *ref_memb_ptr;
+	const struct prog_test_ref_kfunc __kptr *const_unref_ptr;
+	const struct prog_test_ref_kfunc __kptr_ref *const_ref_ptr;
 };
 
 struct array_map {
@@ -19,6 +21,7 @@  struct array_map {
 } array_map SEC(".maps");
 
 extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
+extern const struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire_const(void) __ksym;
 extern struct prog_test_ref_kfunc *
 bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
 
@@ -415,4 +418,115 @@  int kptr_get_ref_state(struct __sk_buff *ctx)
 	return 0;
 }
 
+SEC("?tc")
+int kptr_const_to_non_const(struct __sk_buff *ctx)
+{
+	const struct prog_test_ref_kfunc *p;
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	p = bpf_kfunc_call_test_acquire_const();
+	if (!p)
+		return 0;
+
+	v->unref_ptr = (void *)p;
+	return 0;
+}
+
+SEC("?tc")
+int kptr_const_to_non_const_xchg(struct __sk_buff *ctx)
+{
+	const struct prog_test_ref_kfunc *p;
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	p = bpf_kfunc_call_test_acquire_const();
+	if (!p)
+		return 0;
+
+	bpf_kptr_xchg(&v->ref_ptr, p);
+	return 0;
+}
+
+SEC("?tc")
+int kptr_const_or_null_to_non_const_xchg(struct __sk_buff *ctx)
+{
+	const struct prog_test_ref_kfunc *p;
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	p = bpf_kfunc_call_test_acquire_const();
+
+	bpf_kptr_xchg(&v->ref_ptr, p);
+	return 0;
+}
+
+SEC("?tc")
+int mark_rdonly(struct __sk_buff *ctx)
+{
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	bpf_this_cpu_ptr(v->const_unref_ptr);
+	return 0;
+}
+
+SEC("?tc")
+int mark_ref_rdonly(struct __sk_buff *ctx)
+{
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	bpf_this_cpu_ptr(v->const_ref_ptr);
+	return 0;
+}
+
+SEC("?tc")
+int mark_xchg_rdonly(struct __sk_buff *ctx)
+{
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	bpf_this_cpu_ptr(bpf_kptr_xchg(&v->const_ref_ptr, NULL));
+	return 0;
+}
+
+SEC("?tc")
+int kptr_get_no_const(struct __sk_buff *ctx)
+{
+	struct map_value *v;
+	int key = 0;
+
+	v = bpf_map_lookup_elem(&array_map, &key);
+	if (!v)
+		return 0;
+
+	bpf_kfunc_call_test_kptr_get((void *)&v->const_ref_ptr, 0, 0);
+	return 0;
+}
+
 char _license[] SEC("license") = "GPL";