diff mbox series

[bpf-next,v2,15/15] selftests/bpf: states pruning checks for scalar vs STACK_{MISC,ZERO}

Message ID 20240108205209.838365-16-maxtram95@gmail.com (mailing list archive)
State New
Headers show
Series Improvements for tracking scalars in the BPF verifier | expand

Commit Message

Maxim Mikityanskiy Jan. 8, 2024, 8:52 p.m. UTC
From: Eduard Zingerman <eddyz87@gmail.com>

Check that stacksafe() considers the following old vs cur stack spill
state combinations equivalent:
- spill of unbound scalar vs combination of STACK_{MISC,ZERO,INVALID}
- STACK_MISC vs spill of unbound scalar
- spill of scalar 0 vs STACK_ZERO
- STACK_ZERO vs spill of scalar 0

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 .../selftests/bpf/progs/verifier_spill_fill.c | 192 ++++++++++++++++++
 1 file changed, 192 insertions(+)

Comments

Andrii Nakryiko Jan. 10, 2024, 12:27 a.m. UTC | #1
On Mon, Jan 8, 2024 at 12:53 PM Maxim Mikityanskiy <maxtram95@gmail.com> wrote:
>
> From: Eduard Zingerman <eddyz87@gmail.com>
>
> Check that stacksafe() considers the following old vs cur stack spill
> state combinations equivalent:
> - spill of unbound scalar vs combination of STACK_{MISC,ZERO,INVALID}
> - STACK_MISC vs spill of unbound scalar
> - spill of scalar 0 vs STACK_ZERO
> - STACK_ZERO vs spill of scalar 0
>
> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
> ---
>  .../selftests/bpf/progs/verifier_spill_fill.c | 192 ++++++++++++++++++
>  1 file changed, 192 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
> index 3764111d190d..3cd3fe30357f 100644
> --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
> +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
> @@ -1044,4 +1044,196 @@ l0_%=:  r1 >>= 32;                                      \
>         : __clobber_all);
>  }
>
> +/* stacksafe(): check if spill of unbound scalar in old state is
> + * considered equivalent to any state of the spill in the current state.
> + *
> + * On the first verification path an unbound scalar is written for
> + * fp-8 and later marked precise.
> + * On the second verification path a mix of STACK_MISC/ZERO/INVALID is
> + * written to fp-8. These should be considered equivalent.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("10: (79) r0 = *(u64 *)(r10 -8)")
> +__msg("10: safe")
> +__msg("processed 16 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_unbound_scalar_vs_cur_anything(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "r7 = r0;"
> +       /* get a random value for storing at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r7 == 0 goto 1f;"
> +       /* unbound scalar written to fp-8 */
> +       "*(u64*)(r10 - 8) = r0;"
> +       "goto 2f;"
> +"1:"
> +       /* mark fp-8 as mix of STACK_MISC/ZERO/INVALID */
> +       "r1 = 0;"
> +       "*(u8*)(r10 - 8) = r0;"

this is actually a spilled register, not STACK_ZERO. Is it important?

> +       "*(u8*)(r10 - 7) = r1;"
> +       /* fp-2..fp-6 remain STACK_INVALID */
> +       "*(u8*)(r10 - 1) = r0;"
> +"2:"
> +       /* read fp-8 and force it precise, should be considered safe
> +        * on second visit
> +        */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "r0 &= 0xff;"
> +       "r1 = r10;"
> +       "r1 += r0;"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
> +/* stacksafe(): check if STACK_MISC in old state is considered
> + * equivalent to stack spill of unbound scalar in cur state.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("8: (79) r0 = *(u64 *)(r10 -8)         ; R0_w=scalar(id=1) R10=fp0 fp-8=scalar(id=1)")
> +__msg("8: safe")
> +__msg("processed 11 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_unbound_scalar_vs_cur_stack_misc(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r0 == 0 goto 1f;"
> +       /* conjure unbound scalar at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +       "goto 2f;"
> +"1:"
> +       /* conjure STACK_MISC at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +       "*(u32*)(r10 - 4) = r0;"
> +"2:"
> +       /* read fp-8, should be considered safe on second visit */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
> +/* stacksafe(): check if stack spill of unbound scalar in old state is
> + * considered equivalent to STACK_MISC in cur state.
> + */
> +SEC("socket")
> +__success  __log_level(2)
> +__msg("8: (79) r0 = *(u64 *)(r10 -8)         ; R0_w=scalar() R10=fp0 fp-8=mmmmmmmm")
> +__msg("8: safe")
> +__msg("processed 11 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_stack_misc_vs_cur_unbound_scalar(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r0 == 0 goto 1f;"
> +       /* conjure STACK_MISC at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +       "*(u32*)(r10 - 4) = r0;"
> +       "goto 2f;"
> +"1:"
> +       /* conjure unbound scalar at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +"2:"
> +       /* read fp-8, should be considered safe on second visit */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
> +/* stacksafe(): check if spill of register with value 0 in old state
> + * is considered equivalent to STACK_ZERO.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("9: (79) r0 = *(u64 *)(r10 -8)")
> +__msg("9: safe")
> +__msg("processed 15 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_spill_zero_vs_stack_zero(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "r7 = r0;"
> +       /* get a random value for storing at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r7 == 0 goto 1f;"
> +       /* conjure spilled register with value 0 at fp-8 */
> +       "*(u64*)(r10 - 8) = r0;"
> +       "if r0 != 0 goto 3f;"
> +       "goto 2f;"
> +"1:"
> +       /* conjure STACK_ZERO at fp-8 */
> +       "r1 = 0;"
> +       "*(u64*)(r10 - 8) = r1;"

this is not STACK_ZERO, it's full register spill

> +"2:"
> +       /* read fp-8 and force it precise, should be considered safe
> +        * on second visit
> +        */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "r1 = r10;"
> +       "r1 += r0;"
> +"3:"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
> +/* stacksafe(): similar to old_spill_zero_vs_stack_zero() but the
> + * other way around: check if STACK_ZERO is considered equivalent to
> + * spill of register with value 0.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("8: (79) r0 = *(u64 *)(r10 -8)")
> +__msg("8: safe")
> +__msg("processed 14 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_stack_zero_vs_spill_zero(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r0 == 0 goto 1f;"
> +       /* conjure STACK_ZERO at fp-8 */
> +       "r1 = 0;"
> +       "*(u64*)(r10 - 8) = r1;"

same, please double check this STACK_xxx assumptions, as now we spill
registers instead of STACK_ZERO in a lot of cases

> +       "goto 2f;"
> +"1:"
> +       /* conjure spilled register with value 0 at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +       "if r0 != 0 goto 3f;"
> +"2:"
> +       /* read fp-8 and force it precise, should be considered safe
> +        * on second visit
> +        */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "r1 = r10;"
> +       "r1 += r0;"
> +"3:"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
>  char _license[] SEC("license") = "GPL";
> --
> 2.43.0
>
Eduard Zingerman Jan. 10, 2024, 8:27 p.m. UTC | #2
On Tue, 2024-01-09 at 16:27 -0800, Andrii Nakryiko wrote:
[...]
> same, please double check this STACK_xxx assumptions, as now we spill
> registers instead of STACK_ZERO in a lot of cases

Right, the test is outdated after your recent fixes for STACK_ZERO.
Thank you for catching this.
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index 3764111d190d..3cd3fe30357f 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -1044,4 +1044,196 @@  l0_%=:	r1 >>= 32;					\
 	: __clobber_all);
 }
 
+/* stacksafe(): check if spill of unbound scalar in old state is
+ * considered equivalent to any state of the spill in the current state.
+ *
+ * On the first verification path an unbound scalar is written for
+ * fp-8 and later marked precise.
+ * On the second verification path a mix of STACK_MISC/ZERO/INVALID is
+ * written to fp-8. These should be considered equivalent.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("10: (79) r0 = *(u64 *)(r10 -8)")
+__msg("10: safe")
+__msg("processed 16 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_unbound_scalar_vs_cur_anything(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"r7 = r0;"
+	/* get a random value for storing at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"if r7 == 0 goto 1f;"
+	/* unbound scalar written to fp-8 */
+	"*(u64*)(r10 - 8) = r0;"
+	"goto 2f;"
+"1:"
+	/* mark fp-8 as mix of STACK_MISC/ZERO/INVALID */
+	"r1 = 0;"
+	"*(u8*)(r10 - 8) = r0;"
+	"*(u8*)(r10 - 7) = r1;"
+	/* fp-2..fp-6 remain STACK_INVALID */
+	"*(u8*)(r10 - 1) = r0;"
+"2:"
+	/* read fp-8 and force it precise, should be considered safe
+	 * on second visit
+	 */
+	"r0 = *(u64*)(r10 - 8);"
+	"r0 &= 0xff;"
+	"r1 = r10;"
+	"r1 += r0;"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* stacksafe(): check if STACK_MISC in old state is considered
+ * equivalent to stack spill of unbound scalar in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r0 = *(u64 *)(r10 -8)         ; R0_w=scalar(id=1) R10=fp0 fp-8=scalar(id=1)")
+__msg("8: safe")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_unbound_scalar_vs_cur_stack_misc(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"if r0 == 0 goto 1f;"
+	/* conjure unbound scalar at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+	"goto 2f;"
+"1:"
+	/* conjure STACK_MISC at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+	"*(u32*)(r10 - 4) = r0;"
+"2:"
+	/* read fp-8, should be considered safe on second visit */
+	"r0 = *(u64*)(r10 - 8);"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* stacksafe(): check if stack spill of unbound scalar in old state is
+ * considered equivalent to STACK_MISC in cur state.
+ */
+SEC("socket")
+__success  __log_level(2)
+__msg("8: (79) r0 = *(u64 *)(r10 -8)         ; R0_w=scalar() R10=fp0 fp-8=mmmmmmmm")
+__msg("8: safe")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_misc_vs_cur_unbound_scalar(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"if r0 == 0 goto 1f;"
+	/* conjure STACK_MISC at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+	"*(u32*)(r10 - 4) = r0;"
+	"goto 2f;"
+"1:"
+	/* conjure unbound scalar at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+"2:"
+	/* read fp-8, should be considered safe on second visit */
+	"r0 = *(u64*)(r10 - 8);"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* stacksafe(): check if spill of register with value 0 in old state
+ * is considered equivalent to STACK_ZERO.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("9: (79) r0 = *(u64 *)(r10 -8)")
+__msg("9: safe")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_spill_zero_vs_stack_zero(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"r7 = r0;"
+	/* get a random value for storing at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"if r7 == 0 goto 1f;"
+	/* conjure spilled register with value 0 at fp-8 */
+	"*(u64*)(r10 - 8) = r0;"
+	"if r0 != 0 goto 3f;"
+	"goto 2f;"
+"1:"
+	/* conjure STACK_ZERO at fp-8 */
+	"r1 = 0;"
+	"*(u64*)(r10 - 8) = r1;"
+"2:"
+	/* read fp-8 and force it precise, should be considered safe
+	 * on second visit
+	 */
+	"r0 = *(u64*)(r10 - 8);"
+	"r1 = r10;"
+	"r1 += r0;"
+"3:"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* stacksafe(): similar to old_spill_zero_vs_stack_zero() but the
+ * other way around: check if STACK_ZERO is considered equivalent to
+ * spill of register with value 0.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r0 = *(u64 *)(r10 -8)")
+__msg("8: safe")
+__msg("processed 14 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_zero_vs_spill_zero(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"if r0 == 0 goto 1f;"
+	/* conjure STACK_ZERO at fp-8 */
+	"r1 = 0;"
+	"*(u64*)(r10 - 8) = r1;"
+	"goto 2f;"
+"1:"
+	/* conjure spilled register with value 0 at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+	"if r0 != 0 goto 3f;"
+"2:"
+	/* read fp-8 and force it precise, should be considered safe
+	 * on second visit
+	 */
+	"r0 = *(u64*)(r10 - 8);"
+	"r1 = r10;"
+	"r1 += r0;"
+"3:"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
 char _license[] SEC("license") = "GPL";