diff mbox series

[bpf-next,v2,15/15] selftests/bpf: states pruning checks for scalar vs STACK_{MISC,ZERO}

Message ID 20240108205209.838365-16-maxtram95@gmail.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series Improvements for tracking scalars in the BPF verifier | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success SINGLE THREAD; Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/cc_maintainers success CCed 0 of 0 maintainers
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch warning CHECK: Lines should not end with a '(' WARNING: line length of 84 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: quoted string split across lines
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-14 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 fail Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-7 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-13 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-15 success Logs for x86_64-gcc / build-release

Commit Message

Maxim Mikityanskiy Jan. 8, 2024, 8:52 p.m. UTC
From: Eduard Zingerman <eddyz87@gmail.com>

Check that stacksafe() considers the following old vs cur stack spill
state combinations equivalent:
- spill of unbound scalar vs combination of STACK_{MISC,ZERO,INVALID}
- STACK_MISC vs spill of unbound scalar
- spill of scalar 0 vs STACK_ZERO
- STACK_ZERO vs spill of scalar 0

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 .../selftests/bpf/progs/verifier_spill_fill.c | 192 ++++++++++++++++++
 1 file changed, 192 insertions(+)

Comments

Andrii Nakryiko Jan. 10, 2024, 12:27 a.m. UTC | #1
On Mon, Jan 8, 2024 at 12:53 PM Maxim Mikityanskiy <maxtram95@gmail.com> wrote:
>
> From: Eduard Zingerman <eddyz87@gmail.com>
>
> Check that stacksafe() considers the following old vs cur stack spill
> state combinations equivalent:
> - spill of unbound scalar vs combination of STACK_{MISC,ZERO,INVALID}
> - STACK_MISC vs spill of unbound scalar
> - spill of scalar 0 vs STACK_ZERO
> - STACK_ZERO vs spill of scalar 0
>
> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
> ---
>  .../selftests/bpf/progs/verifier_spill_fill.c | 192 ++++++++++++++++++
>  1 file changed, 192 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
> index 3764111d190d..3cd3fe30357f 100644
> --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
> +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
> @@ -1044,4 +1044,196 @@ l0_%=:  r1 >>= 32;                                      \
>         : __clobber_all);
>  }
>
> +/* stacksafe(): check if spill of unbound scalar in old state is
> + * considered equivalent to any state of the spill in the current state.
> + *
> + * On the first verification path an unbound scalar is written for
> + * fp-8 and later marked precise.
> + * On the second verification path a mix of STACK_MISC/ZERO/INVALID is
> + * written to fp-8. These should be considered equivalent.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("10: (79) r0 = *(u64 *)(r10 -8)")
> +__msg("10: safe")
> +__msg("processed 16 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_unbound_scalar_vs_cur_anything(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "r7 = r0;"
> +       /* get a random value for storing at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r7 == 0 goto 1f;"
> +       /* unbound scalar written to fp-8 */
> +       "*(u64*)(r10 - 8) = r0;"
> +       "goto 2f;"
> +"1:"
> +       /* mark fp-8 as mix of STACK_MISC/ZERO/INVALID */
> +       "r1 = 0;"
> +       "*(u8*)(r10 - 8) = r0;"

this is actually a spilled register, not STACK_ZERO. Is it important?

> +       "*(u8*)(r10 - 7) = r1;"
> +       /* fp-2..fp-6 remain STACK_INVALID */
> +       "*(u8*)(r10 - 1) = r0;"
> +"2:"
> +       /* read fp-8 and force it precise, should be considered safe
> +        * on second visit
> +        */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "r0 &= 0xff;"
> +       "r1 = r10;"
> +       "r1 += r0;"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
> +/* stacksafe(): check if STACK_MISC in old state is considered
> + * equivalent to stack spill of unbound scalar in cur state.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("8: (79) r0 = *(u64 *)(r10 -8)         ; R0_w=scalar(id=1) R10=fp0 fp-8=scalar(id=1)")
> +__msg("8: safe")
> +__msg("processed 11 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_unbound_scalar_vs_cur_stack_misc(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r0 == 0 goto 1f;"
> +       /* conjure unbound scalar at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +       "goto 2f;"
> +"1:"
> +       /* conjure STACK_MISC at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +       "*(u32*)(r10 - 4) = r0;"
> +"2:"
> +       /* read fp-8, should be considered safe on second visit */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
> +/* stacksafe(): check if stack spill of unbound scalar in old state is
> + * considered equivalent to STACK_MISC in cur state.
> + */
> +SEC("socket")
> +__success  __log_level(2)
> +__msg("8: (79) r0 = *(u64 *)(r10 -8)         ; R0_w=scalar() R10=fp0 fp-8=mmmmmmmm")
> +__msg("8: safe")
> +__msg("processed 11 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_stack_misc_vs_cur_unbound_scalar(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r0 == 0 goto 1f;"
> +       /* conjure STACK_MISC at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +       "*(u32*)(r10 - 4) = r0;"
> +       "goto 2f;"
> +"1:"
> +       /* conjure unbound scalar at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +"2:"
> +       /* read fp-8, should be considered safe on second visit */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
> +/* stacksafe(): check if spill of register with value 0 in old state
> + * is considered equivalent to STACK_ZERO.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("9: (79) r0 = *(u64 *)(r10 -8)")
> +__msg("9: safe")
> +__msg("processed 15 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_spill_zero_vs_stack_zero(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "r7 = r0;"
> +       /* get a random value for storing at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r7 == 0 goto 1f;"
> +       /* conjure spilled register with value 0 at fp-8 */
> +       "*(u64*)(r10 - 8) = r0;"
> +       "if r0 != 0 goto 3f;"
> +       "goto 2f;"
> +"1:"
> +       /* conjure STACK_ZERO at fp-8 */
> +       "r1 = 0;"
> +       "*(u64*)(r10 - 8) = r1;"

this is not STACK_ZERO, it's full register spill

> +"2:"
> +       /* read fp-8 and force it precise, should be considered safe
> +        * on second visit
> +        */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "r1 = r10;"
> +       "r1 += r0;"
> +"3:"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
> +/* stacksafe(): similar to old_spill_zero_vs_stack_zero() but the
> + * other way around: check if STACK_ZERO is considered equivalent to
> + * spill of register with value 0.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("8: (79) r0 = *(u64 *)(r10 -8)")
> +__msg("8: safe")
> +__msg("processed 14 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void old_stack_zero_vs_spill_zero(void)
> +{
> +       asm volatile(
> +       /* get a random value for branching */
> +       "call %[bpf_ktime_get_ns];"
> +       "if r0 == 0 goto 1f;"
> +       /* conjure STACK_ZERO at fp-8 */
> +       "r1 = 0;"
> +       "*(u64*)(r10 - 8) = r1;"

same, please double check this STACK_xxx assumptions, as now we spill
registers instead of STACK_ZERO in a lot of cases

> +       "goto 2f;"
> +"1:"
> +       /* conjure spilled register with value 0 at fp-8 */
> +       "call %[bpf_ktime_get_ns];"
> +       "*(u64*)(r10 - 8) = r0;"
> +       "if r0 != 0 goto 3f;"
> +"2:"
> +       /* read fp-8 and force it precise, should be considered safe
> +        * on second visit
> +        */
> +       "r0 = *(u64*)(r10 - 8);"
> +       "r1 = r10;"
> +       "r1 += r0;"
> +"3:"
> +       "exit;"
> +       :
> +       : __imm(bpf_ktime_get_ns)
> +       : __clobber_all);
> +}
> +
>  char _license[] SEC("license") = "GPL";
> --
> 2.43.0
>
Eduard Zingerman Jan. 10, 2024, 8:27 p.m. UTC | #2
On Tue, 2024-01-09 at 16:27 -0800, Andrii Nakryiko wrote:
[...]
> same, please double check this STACK_xxx assumptions, as now we spill
> registers instead of STACK_ZERO in a lot of cases

Right, the test is outdated after your recent fixes for STACK_ZERO.
Thank you for catching this.
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index 3764111d190d..3cd3fe30357f 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -1044,4 +1044,196 @@  l0_%=:	r1 >>= 32;					\
 	: __clobber_all);
 }
 
+/* stacksafe(): check if spill of unbound scalar in old state is
+ * considered equivalent to any state of the spill in the current state.
+ *
+ * On the first verification path an unbound scalar is written for
+ * fp-8 and later marked precise.
+ * On the second verification path a mix of STACK_MISC/ZERO/INVALID is
+ * written to fp-8. These should be considered equivalent.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("10: (79) r0 = *(u64 *)(r10 -8)")
+__msg("10: safe")
+__msg("processed 16 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_unbound_scalar_vs_cur_anything(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"r7 = r0;"
+	/* get a random value for storing at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"if r7 == 0 goto 1f;"
+	/* unbound scalar written to fp-8 */
+	"*(u64*)(r10 - 8) = r0;"
+	"goto 2f;"
+"1:"
+	/* mark fp-8 as mix of STACK_MISC/ZERO/INVALID */
+	"r1 = 0;"
+	"*(u8*)(r10 - 8) = r0;"
+	"*(u8*)(r10 - 7) = r1;"
+	/* fp-2..fp-6 remain STACK_INVALID */
+	"*(u8*)(r10 - 1) = r0;"
+"2:"
+	/* read fp-8 and force it precise, should be considered safe
+	 * on second visit
+	 */
+	"r0 = *(u64*)(r10 - 8);"
+	"r0 &= 0xff;"
+	"r1 = r10;"
+	"r1 += r0;"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* stacksafe(): check if STACK_MISC in old state is considered
+ * equivalent to stack spill of unbound scalar in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r0 = *(u64 *)(r10 -8)         ; R0_w=scalar(id=1) R10=fp0 fp-8=scalar(id=1)")
+__msg("8: safe")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_unbound_scalar_vs_cur_stack_misc(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"if r0 == 0 goto 1f;"
+	/* conjure unbound scalar at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+	"goto 2f;"
+"1:"
+	/* conjure STACK_MISC at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+	"*(u32*)(r10 - 4) = r0;"
+"2:"
+	/* read fp-8, should be considered safe on second visit */
+	"r0 = *(u64*)(r10 - 8);"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* stacksafe(): check if stack spill of unbound scalar in old state is
+ * considered equivalent to STACK_MISC in cur state.
+ */
+SEC("socket")
+__success  __log_level(2)
+__msg("8: (79) r0 = *(u64 *)(r10 -8)         ; R0_w=scalar() R10=fp0 fp-8=mmmmmmmm")
+__msg("8: safe")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_misc_vs_cur_unbound_scalar(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"if r0 == 0 goto 1f;"
+	/* conjure STACK_MISC at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+	"*(u32*)(r10 - 4) = r0;"
+	"goto 2f;"
+"1:"
+	/* conjure unbound scalar at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+"2:"
+	/* read fp-8, should be considered safe on second visit */
+	"r0 = *(u64*)(r10 - 8);"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* stacksafe(): check if spill of register with value 0 in old state
+ * is considered equivalent to STACK_ZERO.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("9: (79) r0 = *(u64 *)(r10 -8)")
+__msg("9: safe")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_spill_zero_vs_stack_zero(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"r7 = r0;"
+	/* get a random value for storing at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"if r7 == 0 goto 1f;"
+	/* conjure spilled register with value 0 at fp-8 */
+	"*(u64*)(r10 - 8) = r0;"
+	"if r0 != 0 goto 3f;"
+	"goto 2f;"
+"1:"
+	/* conjure STACK_ZERO at fp-8 */
+	"r1 = 0;"
+	"*(u64*)(r10 - 8) = r1;"
+"2:"
+	/* read fp-8 and force it precise, should be considered safe
+	 * on second visit
+	 */
+	"r0 = *(u64*)(r10 - 8);"
+	"r1 = r10;"
+	"r1 += r0;"
+"3:"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* stacksafe(): similar to old_spill_zero_vs_stack_zero() but the
+ * other way around: check if STACK_ZERO is considered equivalent to
+ * spill of register with value 0.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r0 = *(u64 *)(r10 -8)")
+__msg("8: safe")
+__msg("processed 14 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_zero_vs_spill_zero(void)
+{
+	asm volatile(
+	/* get a random value for branching */
+	"call %[bpf_ktime_get_ns];"
+	"if r0 == 0 goto 1f;"
+	/* conjure STACK_ZERO at fp-8 */
+	"r1 = 0;"
+	"*(u64*)(r10 - 8) = r1;"
+	"goto 2f;"
+"1:"
+	/* conjure spilled register with value 0 at fp-8 */
+	"call %[bpf_ktime_get_ns];"
+	"*(u64*)(r10 - 8) = r0;"
+	"if r0 != 0 goto 3f;"
+"2:"
+	/* read fp-8 and force it precise, should be considered safe
+	 * on second visit
+	 */
+	"r0 = *(u64*)(r10 - 8);"
+	"r1 = r10;"
+	"r1 += r0;"
+"3:"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
 char _license[] SEC("license") = "GPL";