diff mbox series

[bpf-next,09/10] selftests/bpf: Add UAF tests for arena atomics

Message ID 20240627090900.20017-10-iii@linux.ibm.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series s390/bpf: Implement arena | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-9 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-10 success Logs for s390x-gcc / test
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test
bpf/vmtest-bpf-next-VM_Test-14 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-15 success Logs for x86_64-gcc / test
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for x86_64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-llvm-17 / test
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-llvm-18 / test
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-llvm-18 / veristat
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 12 maintainers not CCed: yonghong.song@linux.dev mykolal@fb.com haoluo@google.com jolsa@kernel.org shuah@kernel.org song@kernel.org john.fastabend@gmail.com eddyz87@gmail.com kpsingh@kernel.org linux-kselftest@vger.kernel.org martin.lau@linux.dev sdf@google.com
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch warning CHECK: spaces preferred around that '*' (ctx:WxV)
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Ilya Leoshkevich June 27, 2024, 9:07 a.m. UTC
Check that __sync_*() functions don't cause kernel panics when handling
freed arena pages.

Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
 .../selftests/bpf/prog_tests/arena_atomics.c  | 16 +++++++
 .../selftests/bpf/progs/arena_atomics.c       | 43 +++++++++++++++++++
 2 files changed, 59 insertions(+)

Comments

Alexei Starovoitov June 28, 2024, 12:45 a.m. UTC | #1
On Thu, Jun 27, 2024 at 2:09 AM Ilya Leoshkevich <iii@linux.ibm.com> wrote:
>
> Check that __sync_*() functions don't cause kernel panics when handling
> freed arena pages.
>
> Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
> ---
>  .../selftests/bpf/prog_tests/arena_atomics.c  | 16 +++++++
>  .../selftests/bpf/progs/arena_atomics.c       | 43 +++++++++++++++++++
>  2 files changed, 59 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> index 0807a48a58ee..38eef4cc5c80 100644
> --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> @@ -146,6 +146,20 @@ static void test_xchg(struct arena_atomics *skel)
>         ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result");
>  }
>
> +static void test_uaf(struct arena_atomics *skel)
> +{
> +       LIBBPF_OPTS(bpf_test_run_opts, topts);
> +       int err, prog_fd;
> +
> +       /* No need to attach it, just run it directly */
> +       prog_fd = bpf_program__fd(skel->progs.uaf);
> +       err = bpf_prog_test_run_opts(prog_fd, &topts);
> +       if (!ASSERT_OK(err, "test_run_opts err"))
> +               return;
> +       if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
> +               return;
> +}
> +
>  void test_arena_atomics(void)
>  {
>         struct arena_atomics *skel;
> @@ -180,6 +194,8 @@ void test_arena_atomics(void)
>                 test_cmpxchg(skel);
>         if (test__start_subtest("xchg"))
>                 test_xchg(skel);
> +       if (test__start_subtest("uaf"))
> +               test_uaf(skel);
>
>  cleanup:
>         arena_atomics__destroy(skel);
> diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
> index 55f10563208d..a86c8cdf1a30 100644
> --- a/tools/testing/selftests/bpf/progs/arena_atomics.c
> +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
> @@ -176,3 +176,46 @@ int xchg(const void *ctx)
>
>         return 0;
>  }
> +
> +SEC("syscall")
> +int uaf(const void *ctx)
> +{
> +       if (pid != (bpf_get_current_pid_tgid() >> 32))
> +               return 0;
> +#ifdef ENABLE_ATOMICS_TESTS
> +       void __arena *page;
> +
> +       page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
> +       bpf_arena_free_pages(&arena, page, 1);
> +
> +       __sync_fetch_and_add((__u32 __arena *)page, 1);
> +       __sync_add_and_fetch((__u32 __arena *)page, 1);
> +       __sync_fetch_and_sub((__u32 __arena *)page, 1);
> +       __sync_sub_and_fetch((__u32 __arena *)page, 1);
> +       __sync_fetch_and_and((__u32 __arena *)page, 1);
> +       __sync_and_and_fetch((__u32 __arena *)page, 1);
> +       __sync_fetch_and_or((__u32 __arena *)page, 1);
> +       __sync_or_and_fetch((__u32 __arena *)page, 1);
> +       __sync_fetch_and_xor((__u32 __arena *)page, 1);
> +       __sync_xor_and_fetch((__u32 __arena *)page, 1);
> +       __sync_val_compare_and_swap((__u32 __arena *)page, 0, 1);
> +       __sync_lock_test_and_set((__u32 __arena *)page, 1);
> +
> +       __sync_fetch_and_add((__u64 __arena *)page, 1);
> +       __sync_add_and_fetch((__u64 __arena *)page, 1);
> +       __sync_fetch_and_sub((__u64 __arena *)page, 1);
> +       __sync_sub_and_fetch((__u64 __arena *)page, 1);
> +       __sync_fetch_and_and((__u64 __arena *)page, 1);
> +       __sync_and_and_fetch((__u64 __arena *)page, 1);
> +       __sync_fetch_and_or((__u64 __arena *)page, 1);
> +       __sync_or_and_fetch((__u64 __arena *)page, 1);
> +       __sync_fetch_and_xor((__u64 __arena *)page, 1);
> +       __sync_xor_and_fetch((__u64 __arena *)page, 1);
> +       __sync_val_compare_and_swap((__u64 __arena *)page, 0, 1);
> +       __sync_lock_test_and_set((__u64 __arena *)page, 1);
> +#endif

Needs to be gated to exclude x86.
Not sure about arm64.
Ilya Leoshkevich June 28, 2024, 9:13 a.m. UTC | #2
On Thu, 2024-06-27 at 17:45 -0700, Alexei Starovoitov wrote:
> On Thu, Jun 27, 2024 at 2:09 AM Ilya Leoshkevich <iii@linux.ibm.com>
> wrote:
> > 
> > Check that __sync_*() functions don't cause kernel panics when
> > handling
> > freed arena pages.
> > 
> > Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
> > ---
> >  .../selftests/bpf/prog_tests/arena_atomics.c  | 16 +++++++
> >  .../selftests/bpf/progs/arena_atomics.c       | 43
> > +++++++++++++++++++
> >  2 files changed, 59 insertions(+)
> > 
> > diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> > b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> > index 0807a48a58ee..38eef4cc5c80 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> > @@ -146,6 +146,20 @@ static void test_xchg(struct arena_atomics
> > *skel)
> >         ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result");
> >  }
> > 
> > +static void test_uaf(struct arena_atomics *skel)
> > +{
> > +       LIBBPF_OPTS(bpf_test_run_opts, topts);
> > +       int err, prog_fd;
> > +
> > +       /* No need to attach it, just run it directly */
> > +       prog_fd = bpf_program__fd(skel->progs.uaf);
> > +       err = bpf_prog_test_run_opts(prog_fd, &topts);
> > +       if (!ASSERT_OK(err, "test_run_opts err"))
> > +               return;
> > +       if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
> > +               return;
> > +}
> > +
> >  void test_arena_atomics(void)
> >  {
> >         struct arena_atomics *skel;
> > @@ -180,6 +194,8 @@ void test_arena_atomics(void)
> >                 test_cmpxchg(skel);
> >         if (test__start_subtest("xchg"))
> >                 test_xchg(skel);
> > +       if (test__start_subtest("uaf"))
> > +               test_uaf(skel);
> > 
> >  cleanup:
> >         arena_atomics__destroy(skel);
> > diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c
> > b/tools/testing/selftests/bpf/progs/arena_atomics.c
> > index 55f10563208d..a86c8cdf1a30 100644
> > --- a/tools/testing/selftests/bpf/progs/arena_atomics.c
> > +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
> > @@ -176,3 +176,46 @@ int xchg(const void *ctx)
> > 
> >         return 0;
> >  }
> > +
> > +SEC("syscall")
> > +int uaf(const void *ctx)
> > +{
> > +       if (pid != (bpf_get_current_pid_tgid() >> 32))
> > +               return 0;
> > +#ifdef ENABLE_ATOMICS_TESTS
> > +       void __arena *page;
> > +
> > +       page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE,
> > 0);
> > +       bpf_arena_free_pages(&arena, page, 1);
> > +
> > +       __sync_fetch_and_add((__u32 __arena *)page, 1);
> > +       __sync_add_and_fetch((__u32 __arena *)page, 1);
> > +       __sync_fetch_and_sub((__u32 __arena *)page, 1);
> > +       __sync_sub_and_fetch((__u32 __arena *)page, 1);
> > +       __sync_fetch_and_and((__u32 __arena *)page, 1);
> > +       __sync_and_and_fetch((__u32 __arena *)page, 1);
> > +       __sync_fetch_and_or((__u32 __arena *)page, 1);
> > +       __sync_or_and_fetch((__u32 __arena *)page, 1);
> > +       __sync_fetch_and_xor((__u32 __arena *)page, 1);
> > +       __sync_xor_and_fetch((__u32 __arena *)page, 1);
> > +       __sync_val_compare_and_swap((__u32 __arena *)page, 0, 1);
> > +       __sync_lock_test_and_set((__u32 __arena *)page, 1);
> > +
> > +       __sync_fetch_and_add((__u64 __arena *)page, 1);
> > +       __sync_add_and_fetch((__u64 __arena *)page, 1);
> > +       __sync_fetch_and_sub((__u64 __arena *)page, 1);
> > +       __sync_sub_and_fetch((__u64 __arena *)page, 1);
> > +       __sync_fetch_and_and((__u64 __arena *)page, 1);
> > +       __sync_and_and_fetch((__u64 __arena *)page, 1);
> > +       __sync_fetch_and_or((__u64 __arena *)page, 1);
> > +       __sync_or_and_fetch((__u64 __arena *)page, 1);
> > +       __sync_fetch_and_xor((__u64 __arena *)page, 1);
> > +       __sync_xor_and_fetch((__u64 __arena *)page, 1);
> > +       __sync_val_compare_and_swap((__u64 __arena *)page, 0, 1);
> > +       __sync_lock_test_and_set((__u64 __arena *)page, 1);
> > +#endif
> 
> Needs to be gated to exclude x86.
> Not sure about arm64.

I ran this test on x86, and it passed. But you are right, as you
mentioned in the other mail, x86 does not support certain atomics.
This means that the test is not testing what I want it to test. I'll
have to debug this.
kernel test robot July 3, 2024, 2:10 a.m. UTC | #3
Hi Ilya,

kernel test robot noticed the following build errors:

[auto build test ERROR on bpf-next/master]

url:    https://github.com/intel-lab-lkp/linux/commits/Ilya-Leoshkevich/s390-bpf-Factor-out-emitting-probe-nops/20240628-021211
base:   https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link:    https://lore.kernel.org/r/20240627090900.20017-10-iii%40linux.ibm.com
patch subject: [PATCH bpf-next 09/10] selftests/bpf: Add UAF tests for arena atomics
:::::: branch date: 3 days ago
:::::: commit date: 3 days ago
compiler: clang version 18.1.5 (https://github.com/llvm/llvm-project 617a15a9eac96088ae5e9134248d8236e34b91b1)
reproduce (this is a W=1 build):

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/r/202407010539.7cpWa4x1-lkp@intel.com/

All errors (new ones prefixed by >>):

>> progs/arena_atomics.c:186:7: error: 'section' attribute only applies to functions, global variables, Objective-C methods, and Objective-C properties
     186 |         void __arena *page;
         |              ^
   progs/arena_atomics.c:32:17: note: expanded from macro '__arena'
      32 | #define __arena SEC(".addr_space.1")
         |                 ^
   /tools/include/bpf/bpf_helpers.h:40:17: note: expanded from macro 'SEC'
      40 |         __attribute__((section(name), used))                                \
         |                        ^
   1 error generated.


vim +/section +186 tools/testing/selftests/bpf/progs/arena_atomics.c

0281323c566bad Ilya Leoshkevich 2024-06-27  179  
0281323c566bad Ilya Leoshkevich 2024-06-27  180  SEC("syscall")
0281323c566bad Ilya Leoshkevich 2024-06-27  181  int uaf(const void *ctx)
0281323c566bad Ilya Leoshkevich 2024-06-27  182  {
0281323c566bad Ilya Leoshkevich 2024-06-27  183  	if (pid != (bpf_get_current_pid_tgid() >> 32))
0281323c566bad Ilya Leoshkevich 2024-06-27  184  		return 0;
0281323c566bad Ilya Leoshkevich 2024-06-27  185  #ifdef ENABLE_ATOMICS_TESTS
0281323c566bad Ilya Leoshkevich 2024-06-27 @186  	void __arena *page;
0281323c566bad Ilya Leoshkevich 2024-06-27  187  
0281323c566bad Ilya Leoshkevich 2024-06-27  188  	page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
0281323c566bad Ilya Leoshkevich 2024-06-27  189  	bpf_arena_free_pages(&arena, page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  190  
0281323c566bad Ilya Leoshkevich 2024-06-27  191  	__sync_fetch_and_add((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  192  	__sync_add_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  193  	__sync_fetch_and_sub((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  194  	__sync_sub_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  195  	__sync_fetch_and_and((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  196  	__sync_and_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  197  	__sync_fetch_and_or((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  198  	__sync_or_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  199  	__sync_fetch_and_xor((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  200  	__sync_xor_and_fetch((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  201  	__sync_val_compare_and_swap((__u32 __arena *)page, 0, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  202  	__sync_lock_test_and_set((__u32 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  203  
0281323c566bad Ilya Leoshkevich 2024-06-27  204  	__sync_fetch_and_add((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  205  	__sync_add_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  206  	__sync_fetch_and_sub((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  207  	__sync_sub_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  208  	__sync_fetch_and_and((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  209  	__sync_and_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  210  	__sync_fetch_and_or((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  211  	__sync_or_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  212  	__sync_fetch_and_xor((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  213  	__sync_xor_and_fetch((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  214  	__sync_val_compare_and_swap((__u64 __arena *)page, 0, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  215  	__sync_lock_test_and_set((__u64 __arena *)page, 1);
0281323c566bad Ilya Leoshkevich 2024-06-27  216  #endif
0281323c566bad Ilya Leoshkevich 2024-06-27  217  
0281323c566bad Ilya Leoshkevich 2024-06-27  218  	return 0;
0281323c566bad Ilya Leoshkevich 2024-06-27  219  }
0281323c566bad Ilya Leoshkevich 2024-06-27  220
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
index 0807a48a58ee..38eef4cc5c80 100644
--- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
+++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
@@ -146,6 +146,20 @@  static void test_xchg(struct arena_atomics *skel)
 	ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result");
 }
 
+static void test_uaf(struct arena_atomics *skel)
+{
+	LIBBPF_OPTS(bpf_test_run_opts, topts);
+	int err, prog_fd;
+
+	/* No need to attach it, just run it directly */
+	prog_fd = bpf_program__fd(skel->progs.uaf);
+	err = bpf_prog_test_run_opts(prog_fd, &topts);
+	if (!ASSERT_OK(err, "test_run_opts err"))
+		return;
+	if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+		return;
+}
+
 void test_arena_atomics(void)
 {
 	struct arena_atomics *skel;
@@ -180,6 +194,8 @@  void test_arena_atomics(void)
 		test_cmpxchg(skel);
 	if (test__start_subtest("xchg"))
 		test_xchg(skel);
+	if (test__start_subtest("uaf"))
+		test_uaf(skel);
 
 cleanup:
 	arena_atomics__destroy(skel);
diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
index 55f10563208d..a86c8cdf1a30 100644
--- a/tools/testing/selftests/bpf/progs/arena_atomics.c
+++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
@@ -176,3 +176,46 @@  int xchg(const void *ctx)
 
 	return 0;
 }
+
+SEC("syscall")
+int uaf(const void *ctx)
+{
+	if (pid != (bpf_get_current_pid_tgid() >> 32))
+		return 0;
+#ifdef ENABLE_ATOMICS_TESTS
+	void __arena *page;
+
+	page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+	bpf_arena_free_pages(&arena, page, 1);
+
+	__sync_fetch_and_add((__u32 __arena *)page, 1);
+	__sync_add_and_fetch((__u32 __arena *)page, 1);
+	__sync_fetch_and_sub((__u32 __arena *)page, 1);
+	__sync_sub_and_fetch((__u32 __arena *)page, 1);
+	__sync_fetch_and_and((__u32 __arena *)page, 1);
+	__sync_and_and_fetch((__u32 __arena *)page, 1);
+	__sync_fetch_and_or((__u32 __arena *)page, 1);
+	__sync_or_and_fetch((__u32 __arena *)page, 1);
+	__sync_fetch_and_xor((__u32 __arena *)page, 1);
+	__sync_xor_and_fetch((__u32 __arena *)page, 1);
+	__sync_val_compare_and_swap((__u32 __arena *)page, 0, 1);
+	__sync_lock_test_and_set((__u32 __arena *)page, 1);
+
+	__sync_fetch_and_add((__u64 __arena *)page, 1);
+	__sync_add_and_fetch((__u64 __arena *)page, 1);
+	__sync_fetch_and_sub((__u64 __arena *)page, 1);
+	__sync_sub_and_fetch((__u64 __arena *)page, 1);
+	__sync_fetch_and_and((__u64 __arena *)page, 1);
+	__sync_and_and_fetch((__u64 __arena *)page, 1);
+	__sync_fetch_and_or((__u64 __arena *)page, 1);
+	__sync_or_and_fetch((__u64 __arena *)page, 1);
+	__sync_fetch_and_xor((__u64 __arena *)page, 1);
+	__sync_xor_and_fetch((__u64 __arena *)page, 1);
+	__sync_val_compare_and_swap((__u64 __arena *)page, 0, 1);
+	__sync_lock_test_and_set((__u64 __arena *)page, 1);
+#endif
+
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";