Message ID | 20231121002221.3687787-8-andrii@kernel.org (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | Complete BPF verifier precision tracking support for register spills | expand |
On Mon, 2023-11-20 at 16:22 -0800, Andrii Nakryiko wrote: [...] > +SEC("raw_tp") > +__log_level(2) > +__success > +__naked void partial_stack_load_preserves_zeros(void) > +{ > + asm volatile ( > + /* fp-8 is all STACK_ZERO */ > + "*(u64 *)(r10 -8) = 0;" This fails when compiled with llvm-16, bpf st assembly support is only present in llvm-18. If we want to preserve support for llvm-16 this test would require ifdefs or the following patch: @@ -3,6 +3,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" #include "bpf_misc.h" struct { @@ -510,7 +511,7 @@ __naked void partial_stack_load_preserves_zeros(void) { asm volatile ( /* fp-8 is all STACK_ZERO */ - "*(u64 *)(r10 -8) = 0;" + ".8byte %[fp8_st_zero];" /* fp-16 is const zero register */ "r0 = 0;" @@ -559,7 +560,8 @@ __naked void partial_stack_load_preserves_zeros(void) "r0 = 0;" "exit;" : - : __imm_ptr(single_byte_buf) + : __imm_ptr(single_byte_buf), + __imm_insn(fp8_st_zero, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0)) : __clobber_common); } > + /* fp-16 is const zero register */ > + "r0 = 0;" > + "*(u64 *)(r10 -16) = r0;" > + > + /* load single U8 from non-aligned STACK_ZERO slot */ > + "r1 = %[single_byte_buf];" > + "r2 = *(u8 *)(r10 -1);" > + "r1 += r2;" /* this should be fine */ Question: the comment suggests that adding something other than zero would be an error, however error would only be reported if *r1 is attempted, maybe add such access? E.g. "*(u8 *)(r1 + 0) = r2;"? [...]
On Tue, Nov 21, 2023 at 8:20 AM Eduard Zingerman <eddyz87@gmail.com> wrote: > > On Mon, 2023-11-20 at 16:22 -0800, Andrii Nakryiko wrote: > [...] > > > +SEC("raw_tp") > > +__log_level(2) > > +__success > > +__naked void partial_stack_load_preserves_zeros(void) > > +{ > > + asm volatile ( > > + /* fp-8 is all STACK_ZERO */ > > + "*(u64 *)(r10 -8) = 0;" > > This fails when compiled with llvm-16, bpf st assembly support is only > present in llvm-18. If we want to preserve support for llvm-16 this > test would require ifdefs or the following patch: Ok, I'll use that, thanks! I need BPF_ST here to avoid register spill code path. > > @@ -3,6 +3,7 @@ > > #include <linux/bpf.h> > #include <bpf/bpf_helpers.h> > +#include "../../../include/linux/filter.h" > #include "bpf_misc.h" > > struct { > @@ -510,7 +511,7 @@ __naked void partial_stack_load_preserves_zeros(void) > { > asm volatile ( > /* fp-8 is all STACK_ZERO */ > - "*(u64 *)(r10 -8) = 0;" > + ".8byte %[fp8_st_zero];" > > /* fp-16 is const zero register */ > "r0 = 0;" > @@ -559,7 +560,8 @@ __naked void partial_stack_load_preserves_zeros(void) > "r0 = 0;" > "exit;" > : > - : __imm_ptr(single_byte_buf) > + : __imm_ptr(single_byte_buf), > + __imm_insn(fp8_st_zero, BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0)) > : __clobber_common); > } > > > > + /* fp-16 is const zero register */ > > + "r0 = 0;" > > + "*(u64 *)(r10 -16) = r0;" > > + > > + /* load single U8 from non-aligned STACK_ZERO slot */ > > + "r1 = %[single_byte_buf];" > > + "r2 = *(u8 *)(r10 -1);" > > + "r1 += r2;" /* this should be fine */ > > Question: the comment suggests that adding something other than > zero would be an error, however error would only be > reported if *r1 is attempted, maybe add such access? > E.g. "*(u8 *)(r1 + 0) = r2;"? you are right! I assumed we check bounds during pointer increment, but I'm wrong. I added dereference instruction everywhere, thanks! > > [...] > > >
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c index 899a74ac9093..60ad2e0247b4 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -486,4 +486,66 @@ __naked void spill_subregs_preserve_stack_zero(void) : __clobber_all); } +char single_byte_buf[1] SEC(".data.single_byte_buf"); + +SEC("raw_tp") +__log_level(2) +__success +__naked void partial_stack_load_preserves_zeros(void) +{ + asm volatile ( + /* fp-8 is all STACK_ZERO */ + "*(u64 *)(r10 -8) = 0;" + + /* fp-16 is const zero register */ + "r0 = 0;" + "*(u64 *)(r10 -16) = r0;" + + /* load single U8 from non-aligned STACK_ZERO slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u8 *)(r10 -1);" + "r1 += r2;" /* this should be fine */ + + /* load single U8 from non-aligned ZERO REG slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u8 *)(r10 -9);" + "r1 += r2;" /* this should be fine */ + + /* load single U16 from non-aligned STACK_ZERO slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u16 *)(r10 -2);" + "r1 += r2;" /* this should be fine */ + + /* load single U16 from non-aligned ZERO REG slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u16 *)(r10 -10);" + "r1 += r2;" /* this should be fine */ + + /* load single U32 from non-aligned STACK_ZERO slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u32 *)(r10 -4);" + "r1 += r2;" /* this should be fine */ + + /* load single U32 from non-aligned ZERO REG slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u32 *)(r10 -12);" + "r1 += r2;" /* this should be fine */ + + /* for completeness, load U64 from STACK_ZERO slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u64 *)(r10 -8);" + "r1 += r2;" /* this should be fine */ + + /* for completeness, load U64 from ZERO REG slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u64 *)(r10 -16);" + "r1 += r2;" /* this should be fine */ + + "r0 = 0;" + "exit;" + : + : __imm_ptr(single_byte_buf) + : __clobber_common); +} + char _license[] SEC("license") = "GPL";
Validate that 1-, 2-, and 4-byte loads from stack slots not aligned on 8-byte boundary still preserve zero, when loading from all-STACK_ZERO sub-slots, or when stack sub-slots are covered by spilled register with known constant zero value. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> --- .../selftests/bpf/progs/verifier_spill_fill.c | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+)