diff mbox series

[v3,bpf-next,09/10] selftests/bpf: validate precision logic in partial_stack_load_preserves_zeros

Message ID 20231204192601.2672497-10-andrii@kernel.org (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Complete BPF verifier precision tracking support for register spills | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success SINGLE THREAD; Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/cc_maintainers warning 11 maintainers not CCed: haoluo@google.com jolsa@kernel.org kpsingh@kernel.org martin.lau@linux.dev john.fastabend@gmail.com mykolal@fb.com yonghong.song@linux.dev song@kernel.org shuah@kernel.org sdf@google.com linux-kselftest@vger.kernel.org
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-3 fail Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / test
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-6 fail Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for s390x-gcc / test
bpf/vmtest-bpf-next-VM_Test-8 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-9 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-10 fail Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for x86_64-gcc / test
bpf/vmtest-bpf-next-VM_Test-12 success Logs for x86_64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for x86_64-llvm-16 / build / build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 success Logs for x86_64-llvm-16 / test
bpf/vmtest-bpf-next-VM_Test-15 success Logs for x86_64-llvm-16 / veristat

Commit Message

Andrii Nakryiko Dec. 4, 2023, 7:26 p.m. UTC
Enhance partial_stack_load_preserves_zeros subtest with detailed
precision propagation log checks. We know expect fp-16 to be spilled,
initially imprecise, zero const register, which is later marked as
precise even when partial stack slot load is performed, even if it's not
a register fill (!).

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 .../selftests/bpf/progs/verifier_spill_fill.c | 40 +++++++++++++++----
 1 file changed, 32 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index 7c1f1927f01a..f7bebc79fec4 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -492,6 +492,22 @@  char single_byte_buf[1] SEC(".data.single_byte_buf");
 SEC("raw_tp")
 __log_level(2)
 __success
+/* make sure fp-8 is all STACK_ZERO */
+__msg("2: (7a) *(u64 *)(r10 -8) = 0          ; R10=fp0 fp-8_w=00000000")
+/* but fp-16 is spilled IMPRECISE zero const reg */
+__msg("4: (7b) *(u64 *)(r10 -16) = r0        ; R0_w=0 R10=fp0 fp-16_w=0")
+/* and now check that precision propagation works even for such tricky case */
+__msg("10: (71) r2 = *(u8 *)(r10 -9)         ; R2_w=P0 R10=fp0 fp-16_w=0")
+__msg("11: (0f) r1 += r2")
+__msg("mark_precise: frame0: last_idx 11 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 10: (71) r2 = *(u8 *)(r10 -9)")
+__msg("mark_precise: frame0: regs= stack=-16 before 9: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs= stack=-16 before 8: (73) *(u8 *)(r1 +0) = r2")
+__msg("mark_precise: frame0: regs= stack=-16 before 7: (0f) r1 += r2")
+__msg("mark_precise: frame0: regs= stack=-16 before 6: (71) r2 = *(u8 *)(r10 -1)")
+__msg("mark_precise: frame0: regs= stack=-16 before 5: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs= stack=-16 before 4: (7b) *(u64 *)(r10 -16) = r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0")
 __naked void partial_stack_load_preserves_zeros(void)
 {
 	asm volatile (
@@ -505,42 +521,50 @@  __naked void partial_stack_load_preserves_zeros(void)
 		/* load single U8 from non-aligned STACK_ZERO slot */
 		"r1 = %[single_byte_buf];"
 		"r2 = *(u8 *)(r10 -1);"
-		"r1 += r2;" /* this should be fine */
+		"r1 += r2;"
+		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
 
 		/* load single U8 from non-aligned ZERO REG slot */
 		"r1 = %[single_byte_buf];"
 		"r2 = *(u8 *)(r10 -9);"
-		"r1 += r2;" /* this should be fine */
+		"r1 += r2;"
+		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
 
 		/* load single U16 from non-aligned STACK_ZERO slot */
 		"r1 = %[single_byte_buf];"
 		"r2 = *(u16 *)(r10 -2);"
-		"r1 += r2;" /* this should be fine */
+		"r1 += r2;"
+		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
 
 		/* load single U16 from non-aligned ZERO REG slot */
 		"r1 = %[single_byte_buf];"
 		"r2 = *(u16 *)(r10 -10);"
-		"r1 += r2;" /* this should be fine */
+		"r1 += r2;"
+		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
 
 		/* load single U32 from non-aligned STACK_ZERO slot */
 		"r1 = %[single_byte_buf];"
 		"r2 = *(u32 *)(r10 -4);"
-		"r1 += r2;" /* this should be fine */
+		"r1 += r2;"
+		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
 
 		/* load single U32 from non-aligned ZERO REG slot */
 		"r1 = %[single_byte_buf];"
 		"r2 = *(u32 *)(r10 -12);"
-		"r1 += r2;" /* this should be fine */
+		"r1 += r2;"
+		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
 
 		/* for completeness, load U64 from STACK_ZERO slot */
 		"r1 = %[single_byte_buf];"
 		"r2 = *(u64 *)(r10 -8);"
-		"r1 += r2;" /* this should be fine */
+		"r1 += r2;"
+		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
 
 		/* for completeness, load U64 from ZERO REG slot */
 		"r1 = %[single_byte_buf];"
 		"r2 = *(u64 *)(r10 -16);"
-		"r1 += r2;" /* this should be fine */
+		"r1 += r2;"
+		"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
 
 		"r0 = 0;"
 		"exit;"