Message ID | 20240627090900.20017-10-iii@linux.ibm.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | s390/bpf: Implement arena | expand |
On Thu, Jun 27, 2024 at 2:09 AM Ilya Leoshkevich <iii@linux.ibm.com> wrote: > > Check that __sync_*() functions don't cause kernel panics when handling > freed arena pages. > > Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> > --- > .../selftests/bpf/prog_tests/arena_atomics.c | 16 +++++++ > .../selftests/bpf/progs/arena_atomics.c | 43 +++++++++++++++++++ > 2 files changed, 59 insertions(+) > > diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > index 0807a48a58ee..38eef4cc5c80 100644 > --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > @@ -146,6 +146,20 @@ static void test_xchg(struct arena_atomics *skel) > ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result"); > } > > +static void test_uaf(struct arena_atomics *skel) > +{ > + LIBBPF_OPTS(bpf_test_run_opts, topts); > + int err, prog_fd; > + > + /* No need to attach it, just run it directly */ > + prog_fd = bpf_program__fd(skel->progs.uaf); > + err = bpf_prog_test_run_opts(prog_fd, &topts); > + if (!ASSERT_OK(err, "test_run_opts err")) > + return; > + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) > + return; > +} > + > void test_arena_atomics(void) > { > struct arena_atomics *skel; > @@ -180,6 +194,8 @@ void test_arena_atomics(void) > test_cmpxchg(skel); > if (test__start_subtest("xchg")) > test_xchg(skel); > + if (test__start_subtest("uaf")) > + test_uaf(skel); > > cleanup: > arena_atomics__destroy(skel); > diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c > index 55f10563208d..a86c8cdf1a30 100644 > --- a/tools/testing/selftests/bpf/progs/arena_atomics.c > +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c > @@ -176,3 +176,46 @@ int xchg(const void *ctx) > > return 0; > } > + > +SEC("syscall") > +int uaf(const void *ctx) > +{ > + if (pid != (bpf_get_current_pid_tgid() >> 32)) > + return 0; > +#ifdef ENABLE_ATOMICS_TESTS > + void __arena *page; > + > + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); > + bpf_arena_free_pages(&arena, page, 1); > + > + __sync_fetch_and_add((__u32 __arena *)page, 1); > + __sync_add_and_fetch((__u32 __arena *)page, 1); > + __sync_fetch_and_sub((__u32 __arena *)page, 1); > + __sync_sub_and_fetch((__u32 __arena *)page, 1); > + __sync_fetch_and_and((__u32 __arena *)page, 1); > + __sync_and_and_fetch((__u32 __arena *)page, 1); > + __sync_fetch_and_or((__u32 __arena *)page, 1); > + __sync_or_and_fetch((__u32 __arena *)page, 1); > + __sync_fetch_and_xor((__u32 __arena *)page, 1); > + __sync_xor_and_fetch((__u32 __arena *)page, 1); > + __sync_val_compare_and_swap((__u32 __arena *)page, 0, 1); > + __sync_lock_test_and_set((__u32 __arena *)page, 1); > + > + __sync_fetch_and_add((__u64 __arena *)page, 1); > + __sync_add_and_fetch((__u64 __arena *)page, 1); > + __sync_fetch_and_sub((__u64 __arena *)page, 1); > + __sync_sub_and_fetch((__u64 __arena *)page, 1); > + __sync_fetch_and_and((__u64 __arena *)page, 1); > + __sync_and_and_fetch((__u64 __arena *)page, 1); > + __sync_fetch_and_or((__u64 __arena *)page, 1); > + __sync_or_and_fetch((__u64 __arena *)page, 1); > + __sync_fetch_and_xor((__u64 __arena *)page, 1); > + __sync_xor_and_fetch((__u64 __arena *)page, 1); > + __sync_val_compare_and_swap((__u64 __arena *)page, 0, 1); > + __sync_lock_test_and_set((__u64 __arena *)page, 1); > +#endif Needs to be gated to exclude x86. Not sure about arm64.
On Thu, 2024-06-27 at 17:45 -0700, Alexei Starovoitov wrote: > On Thu, Jun 27, 2024 at 2:09 AM Ilya Leoshkevich <iii@linux.ibm.com> > wrote: > > > > Check that __sync_*() functions don't cause kernel panics when > > handling > > freed arena pages. > > > > Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> > > --- > > .../selftests/bpf/prog_tests/arena_atomics.c | 16 +++++++ > > .../selftests/bpf/progs/arena_atomics.c | 43 > > +++++++++++++++++++ > > 2 files changed, 59 insertions(+) > > > > diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > > b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > > index 0807a48a58ee..38eef4cc5c80 100644 > > --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > > +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > > @@ -146,6 +146,20 @@ static void test_xchg(struct arena_atomics > > *skel) > > ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result"); > > } > > > > +static void test_uaf(struct arena_atomics *skel) > > +{ > > + LIBBPF_OPTS(bpf_test_run_opts, topts); > > + int err, prog_fd; > > + > > + /* No need to attach it, just run it directly */ > > + prog_fd = bpf_program__fd(skel->progs.uaf); > > + err = bpf_prog_test_run_opts(prog_fd, &topts); > > + if (!ASSERT_OK(err, "test_run_opts err")) > > + return; > > + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) > > + return; > > +} > > + > > void test_arena_atomics(void) > > { > > struct arena_atomics *skel; > > @@ -180,6 +194,8 @@ void test_arena_atomics(void) > > test_cmpxchg(skel); > > if (test__start_subtest("xchg")) > > test_xchg(skel); > > + if (test__start_subtest("uaf")) > > + test_uaf(skel); > > > > cleanup: > > arena_atomics__destroy(skel); > > diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c > > b/tools/testing/selftests/bpf/progs/arena_atomics.c > > index 55f10563208d..a86c8cdf1a30 100644 > > --- a/tools/testing/selftests/bpf/progs/arena_atomics.c > > +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c > > @@ -176,3 +176,46 @@ int xchg(const void *ctx) > > > > return 0; > > } > > + > > +SEC("syscall") > > +int uaf(const void *ctx) > > +{ > > + if (pid != (bpf_get_current_pid_tgid() >> 32)) > > + return 0; > > +#ifdef ENABLE_ATOMICS_TESTS > > + void __arena *page; > > + > > + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, > > 0); > > + bpf_arena_free_pages(&arena, page, 1); > > + > > + __sync_fetch_and_add((__u32 __arena *)page, 1); > > + __sync_add_and_fetch((__u32 __arena *)page, 1); > > + __sync_fetch_and_sub((__u32 __arena *)page, 1); > > + __sync_sub_and_fetch((__u32 __arena *)page, 1); > > + __sync_fetch_and_and((__u32 __arena *)page, 1); > > + __sync_and_and_fetch((__u32 __arena *)page, 1); > > + __sync_fetch_and_or((__u32 __arena *)page, 1); > > + __sync_or_and_fetch((__u32 __arena *)page, 1); > > + __sync_fetch_and_xor((__u32 __arena *)page, 1); > > + __sync_xor_and_fetch((__u32 __arena *)page, 1); > > + __sync_val_compare_and_swap((__u32 __arena *)page, 0, 1); > > + __sync_lock_test_and_set((__u32 __arena *)page, 1); > > + > > + __sync_fetch_and_add((__u64 __arena *)page, 1); > > + __sync_add_and_fetch((__u64 __arena *)page, 1); > > + __sync_fetch_and_sub((__u64 __arena *)page, 1); > > + __sync_sub_and_fetch((__u64 __arena *)page, 1); > > + __sync_fetch_and_and((__u64 __arena *)page, 1); > > + __sync_and_and_fetch((__u64 __arena *)page, 1); > > + __sync_fetch_and_or((__u64 __arena *)page, 1); > > + __sync_or_and_fetch((__u64 __arena *)page, 1); > > + __sync_fetch_and_xor((__u64 __arena *)page, 1); > > + __sync_xor_and_fetch((__u64 __arena *)page, 1); > > + __sync_val_compare_and_swap((__u64 __arena *)page, 0, 1); > > + __sync_lock_test_and_set((__u64 __arena *)page, 1); > > +#endif > > Needs to be gated to exclude x86. > Not sure about arm64. I ran this test on x86, and it passed. But you are right, as you mentioned in the other mail, x86 does not support certain atomics. This means that the test is not testing what I want it to test. I'll have to debug this.
Hi Ilya,
kernel test robot noticed the following build errors:
[auto build test ERROR on bpf-next/master]
url: https://github.com/intel-lab-lkp/linux/commits/Ilya-Leoshkevich/s390-bpf-Factor-out-emitting-probe-nops/20240628-021211
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link: https://lore.kernel.org/r/20240627090900.20017-10-iii%40linux.ibm.com
patch subject: [PATCH bpf-next 09/10] selftests/bpf: Add UAF tests for arena atomics
:::::: branch date: 3 days ago
:::::: commit date: 3 days ago
compiler: clang version 18.1.5 (https://github.com/llvm/llvm-project 617a15a9eac96088ae5e9134248d8236e34b91b1)
reproduce (this is a W=1 build):
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/r/202407010539.7cpWa4x1-lkp@intel.com/
All errors (new ones prefixed by >>):
>> progs/arena_atomics.c:186:7: error: 'section' attribute only applies to functions, global variables, Objective-C methods, and Objective-C properties
186 | void __arena *page;
| ^
progs/arena_atomics.c:32:17: note: expanded from macro '__arena'
32 | #define __arena SEC(".addr_space.1")
| ^
/tools/include/bpf/bpf_helpers.h:40:17: note: expanded from macro 'SEC'
40 | __attribute__((section(name), used)) \
| ^
1 error generated.
vim +/section +186 tools/testing/selftests/bpf/progs/arena_atomics.c
0281323c566bad Ilya Leoshkevich 2024-06-27 179
0281323c566bad Ilya Leoshkevich 2024-06-27 180 SEC("syscall")
0281323c566bad Ilya Leoshkevich 2024-06-27 181 int uaf(const void *ctx)
0281323c566bad Ilya Leoshkevich 2024-06-27 182 {
0281323c566bad Ilya Leoshkevich 2024-06-27 183 if (pid != (bpf_get_current_pid_tgid() >> 32))
0281323c566bad Ilya Leoshkevich 2024-06-27 184 return 0;
0281323c566bad Ilya Leoshkevich 2024-06-27 185 #ifdef ENABLE_ATOMICS_TESTS
0281323c566bad Ilya Leoshkevich 2024-06-27 @186 void __arena *page;
0281323c566bad Ilya Leoshkevich 2024-06-27 187
0281323c566bad Ilya Leoshkevich 2024-06-27 188 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
0281323c566bad Ilya Leoshkevich 2024-06-27 189 bpf_arena_free_pages(&arena, page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 190
0281323c566bad Ilya Leoshkevich 2024-06-27 191 __sync_fetch_and_add((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 192 __sync_add_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 193 __sync_fetch_and_sub((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 194 __sync_sub_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 195 __sync_fetch_and_and((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 196 __sync_and_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 197 __sync_fetch_and_or((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 198 __sync_or_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 199 __sync_fetch_and_xor((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 200 __sync_xor_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 201 __sync_val_compare_and_swap((__u32 __arena *)page, 0, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 202 __sync_lock_test_and_set((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 203
0281323c566bad Ilya Leoshkevich 2024-06-27 204 __sync_fetch_and_add((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 205 __sync_add_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 206 __sync_fetch_and_sub((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 207 __sync_sub_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 208 __sync_fetch_and_and((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 209 __sync_and_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 210 __sync_fetch_and_or((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 211 __sync_or_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 212 __sync_fetch_and_xor((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 213 __sync_xor_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 214 __sync_val_compare_and_swap((__u64 __arena *)page, 0, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 215 __sync_lock_test_and_set((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27 216 #endif
0281323c566bad Ilya Leoshkevich 2024-06-27 217
0281323c566bad Ilya Leoshkevich 2024-06-27 218 return 0;
0281323c566bad Ilya Leoshkevich 2024-06-27 219 }
0281323c566bad Ilya Leoshkevich 2024-06-27 220
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c index 0807a48a58ee..38eef4cc5c80 100644 --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c @@ -146,6 +146,20 @@ static void test_xchg(struct arena_atomics *skel) ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result"); } +static void test_uaf(struct arena_atomics *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + /* No need to attach it, just run it directly */ + prog_fd = bpf_program__fd(skel->progs.uaf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; +} + void test_arena_atomics(void) { struct arena_atomics *skel; @@ -180,6 +194,8 @@ void test_arena_atomics(void) test_cmpxchg(skel); if (test__start_subtest("xchg")) test_xchg(skel); + if (test__start_subtest("uaf")) + test_uaf(skel); cleanup: arena_atomics__destroy(skel); diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c index 55f10563208d..a86c8cdf1a30 100644 --- a/tools/testing/selftests/bpf/progs/arena_atomics.c +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c @@ -176,3 +176,46 @@ int xchg(const void *ctx) return 0; } + +SEC("syscall") +int uaf(const void *ctx) +{ + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; +#ifdef ENABLE_ATOMICS_TESTS + void __arena *page; + + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + bpf_arena_free_pages(&arena, page, 1); + + __sync_fetch_and_add((__u32 __arena *)page, 1); + __sync_add_and_fetch((__u32 __arena *)page, 1); + __sync_fetch_and_sub((__u32 __arena *)page, 1); + __sync_sub_and_fetch((__u32 __arena *)page, 1); + __sync_fetch_and_and((__u32 __arena *)page, 1); + __sync_and_and_fetch((__u32 __arena *)page, 1); + __sync_fetch_and_or((__u32 __arena *)page, 1); + __sync_or_and_fetch((__u32 __arena *)page, 1); + __sync_fetch_and_xor((__u32 __arena *)page, 1); + __sync_xor_and_fetch((__u32 __arena *)page, 1); + __sync_val_compare_and_swap((__u32 __arena *)page, 0, 1); + __sync_lock_test_and_set((__u32 __arena *)page, 1); + + __sync_fetch_and_add((__u64 __arena *)page, 1); + __sync_add_and_fetch((__u64 __arena *)page, 1); + __sync_fetch_and_sub((__u64 __arena *)page, 1); + __sync_sub_and_fetch((__u64 __arena *)page, 1); + __sync_fetch_and_and((__u64 __arena *)page, 1); + __sync_and_and_fetch((__u64 __arena *)page, 1); + __sync_fetch_and_or((__u64 __arena *)page, 1); + __sync_or_and_fetch((__u64 __arena *)page, 1); + __sync_fetch_and_xor((__u64 __arena *)page, 1); + __sync_xor_and_fetch((__u64 __arena *)page, 1); + __sync_val_compare_and_swap((__u64 __arena *)page, 0, 1); + __sync_lock_test_and_set((__u64 __arena *)page, 1); +#endif + + return 0; +} + +char _license[] SEC("license") = "GPL";
Check that __sync_*() functions don't cause kernel panics when handling freed arena pages. Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> --- .../selftests/bpf/prog_tests/arena_atomics.c | 16 +++++++ .../selftests/bpf/progs/arena_atomics.c | 43 +++++++++++++++++++ 2 files changed, 59 insertions(+)