diff mbox series

[v2,bpf-next,07/10] selftests/bpf: validate zero preservation for sub-slot loads

Message ID 20231121002221.3687787-8-andrii@kernel.org (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Complete BPF verifier precision tracking support for register spills | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / test
bpf/vmtest-bpf-next-VM_Test-3 fail Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for s390x-gcc / test
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-8 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-9 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-10 fail Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 fail Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for x86_64-gcc / test
bpf/vmtest-bpf-next-VM_Test-12 success Logs for x86_64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for x86_64-llvm-16 / build / build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 success Logs for x86_64-llvm-16 / test
bpf/vmtest-bpf-next-VM_Test-15 success Logs for x86_64-llvm-16 / veristat
netdev/series_format success Posting correctly formatted
netdev/codegen success Generated files up to date
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/cc_maintainers warning 13 maintainers not CCed: mykolal@fb.com maxim@isovalent.com yonghong.song@linux.dev john.fastabend@gmail.com eddyz87@gmail.com sdf@google.com haoluo@google.com linux-kselftest@vger.kernel.org martin.lau@linux.dev jolsa@kernel.org kpsingh@kernel.org shuah@kernel.org song@kernel.org
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch warning CHECK: Lines should not end with a '('
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Andrii Nakryiko Nov. 21, 2023, 12:22 a.m. UTC
Validate that 1-, 2-, and 4-byte loads from stack slots not aligned on
8-byte boundary still preserve zero, when loading from all-STACK_ZERO
sub-slots, or when stack sub-slots are covered by spilled register with
known constant zero value.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 .../selftests/bpf/progs/verifier_spill_fill.c | 62 +++++++++++++++++++
 1 file changed, 62 insertions(+)

Comments

Eduard Zingerman Nov. 21, 2023, 4:20 p.m. UTC | #1
On Mon, 2023-11-20 at 16:22 -0800, Andrii Nakryiko wrote:
[...]

> +SEC("raw_tp")
> +__log_level(2)
> +__success
> +__naked void partial_stack_load_preserves_zeros(void)
> +{
> +	asm volatile (
> +		/* fp-8 is all STACK_ZERO */
> +		"*(u64 *)(r10 -8) = 0;"

This fails when compiled with llvm-16, bpf st assembly support is only
present in llvm-18. If we want to preserve support for llvm-16 this
test would require ifdefs or the following patch:

@@ -3,6 +3,7 @@
 
 #include <linux/bpf.h>
 #include <bpf/bpf_helpers.h>
+#include "../../../include/linux/filter.h"
 #include "bpf_misc.h"
 
 struct {
@@ -510,7 +511,7 @@ __naked void partial_stack_load_preserves_zeros(void)
 {
        asm volatile (
                /* fp-8 is all STACK_ZERO */
-               "*(u64 *)(r10 -8) = 0;"
+               ".8byte %[fp8_st_zero];"
 
                /* fp-16 is const zero register */
                "r0 = 0;"
@@ -559,7 +560,8 @@ __naked void partial_stack_load_preserves_zeros(void)
                "r0 = 0;"
                "exit;"
        :
-       : __imm_ptr(single_byte_buf)
+       : __imm_ptr(single_byte_buf),
+         __imm_insn(fp8_st_zero, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0))
        : __clobber_common);
 }


> +		/* fp-16 is const zero register */
> +		"r0 = 0;"
> +		"*(u64 *)(r10 -16) = r0;"
> +
> +		/* load single U8 from non-aligned STACK_ZERO slot */
> +		"r1 = %[single_byte_buf];"
> +		"r2 = *(u8 *)(r10 -1);"
> +		"r1 += r2;" /* this should be fine */

Question: the comment suggests that adding something other than
          zero would be an error, however error would only be
          reported if *r1 is attempted, maybe add such access?
          E.g. "*(u8 *)(r1 + 0) = r2;"?

[...]
Andrii Nakryiko Nov. 21, 2023, 6:15 p.m. UTC | #2
On Tue, Nov 21, 2023 at 8:20 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Mon, 2023-11-20 at 16:22 -0800, Andrii Nakryiko wrote:
> [...]
>
> > +SEC("raw_tp")
> > +__log_level(2)
> > +__success
> > +__naked void partial_stack_load_preserves_zeros(void)
> > +{
> > +     asm volatile (
> > +             /* fp-8 is all STACK_ZERO */
> > +             "*(u64 *)(r10 -8) = 0;"
>
> This fails when compiled with llvm-16, bpf st assembly support is only
> present in llvm-18. If we want to preserve support for llvm-16 this
> test would require ifdefs or the following patch:

Ok, I'll use that, thanks! I need BPF_ST here to avoid register spill code path.


>
> @@ -3,6 +3,7 @@
>
>  #include <linux/bpf.h>
>  #include <bpf/bpf_helpers.h>
> +#include "../../../include/linux/filter.h"
>  #include "bpf_misc.h"
>
>  struct {
> @@ -510,7 +511,7 @@ __naked void partial_stack_load_preserves_zeros(void)
>  {
>         asm volatile (
>                 /* fp-8 is all STACK_ZERO */
> -               "*(u64 *)(r10 -8) = 0;"
> +               ".8byte %[fp8_st_zero];"
>
>                 /* fp-16 is const zero register */
>                 "r0 = 0;"
> @@ -559,7 +560,8 @@ __naked void partial_stack_load_preserves_zeros(void)
>                 "r0 = 0;"
>                 "exit;"
>         :
> -       : __imm_ptr(single_byte_buf)
> +       : __imm_ptr(single_byte_buf),
> +         __imm_insn(fp8_st_zero, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0))
>         : __clobber_common);
>  }
>
>
> > +             /* fp-16 is const zero register */
> > +             "r0 = 0;"
> > +             "*(u64 *)(r10 -16) = r0;"
> > +
> > +             /* load single U8 from non-aligned STACK_ZERO slot */
> > +             "r1 = %[single_byte_buf];"
> > +             "r2 = *(u8 *)(r10 -1);"
> > +             "r1 += r2;" /* this should be fine */
>
> Question: the comment suggests that adding something other than
>           zero would be an error, however error would only be
>           reported if *r1 is attempted, maybe add such access?
>           E.g. "*(u8 *)(r1 + 0) = r2;"?

you are right! I assumed we check bounds during pointer increment, but
I'm wrong. I added dereference instruction everywhere, thanks!

>
> [...]
>
>
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index 899a74ac9093..60ad2e0247b4 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -486,4 +486,66 @@  __naked void spill_subregs_preserve_stack_zero(void)
 	: __clobber_all);
 }
 
+char single_byte_buf[1] SEC(".data.single_byte_buf");
+
+SEC("raw_tp")
+__log_level(2)
+__success
+__naked void partial_stack_load_preserves_zeros(void)
+{
+	asm volatile (
+		/* fp-8 is all STACK_ZERO */
+		"*(u64 *)(r10 -8) = 0;"
+
+		/* fp-16 is const zero register */
+		"r0 = 0;"
+		"*(u64 *)(r10 -16) = r0;"
+
+		/* load single U8 from non-aligned STACK_ZERO slot */
+		"r1 = %[single_byte_buf];"
+		"r2 = *(u8 *)(r10 -1);"
+		"r1 += r2;" /* this should be fine */
+
+		/* load single U8 from non-aligned ZERO REG slot */
+		"r1 = %[single_byte_buf];"
+		"r2 = *(u8 *)(r10 -9);"
+		"r1 += r2;" /* this should be fine */
+
+		/* load single U16 from non-aligned STACK_ZERO slot */
+		"r1 = %[single_byte_buf];"
+		"r2 = *(u16 *)(r10 -2);"
+		"r1 += r2;" /* this should be fine */
+
+		/* load single U16 from non-aligned ZERO REG slot */
+		"r1 = %[single_byte_buf];"
+		"r2 = *(u16 *)(r10 -10);"
+		"r1 += r2;" /* this should be fine */
+
+		/* load single U32 from non-aligned STACK_ZERO slot */
+		"r1 = %[single_byte_buf];"
+		"r2 = *(u32 *)(r10 -4);"
+		"r1 += r2;" /* this should be fine */
+
+		/* load single U32 from non-aligned ZERO REG slot */
+		"r1 = %[single_byte_buf];"
+		"r2 = *(u32 *)(r10 -12);"
+		"r1 += r2;" /* this should be fine */
+
+		/* for completeness, load U64 from STACK_ZERO slot */
+		"r1 = %[single_byte_buf];"
+		"r2 = *(u64 *)(r10 -8);"
+		"r1 += r2;" /* this should be fine */
+
+		/* for completeness, load U64 from ZERO REG slot */
+		"r1 = %[single_byte_buf];"
+		"r2 = *(u64 *)(r10 -16);"
+		"r1 += r2;" /* this should be fine */
+
+		"r0 = 0;"
+		"exit;"
+	:
+	: __imm_ptr(single_byte_buf)
+	: __clobber_common);
+}
+
 char _license[] SEC("license") = "GPL";