diff mbox series

[bpf-next,v5,4/4] selftests/bpf: verify that check_ids() is used for scalars in regsafe()

Message ID 20230612160801.2804666-5-eddyz87@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series verify scalar ids mapping in regsafe() | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-6 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 pending Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for veristat
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_progs on s390x with gcc
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/cc_maintainers warning 9 maintainers not CCed: kpsingh@kernel.org shuah@kernel.org sdf@google.com john.fastabend@gmail.com song@kernel.org mykolal@fb.com linux-kselftest@vger.kernel.org jolsa@kernel.org haoluo@google.com
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch warning CHECK: Lines should not end with a '(' WARNING: quoted string split across lines
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Eduard Zingerman June 12, 2023, 4:08 p.m. UTC
Verify that the following example is rejected by verifier:

  r9 = ... some pointer with range X ...
  r6 = ... unbound scalar ID=a ...
  r7 = ... unbound scalar ID=b ...
  if (r6 > r7) goto +1
  r7 = r6
  if (r7 > X) goto exit
  r9 += r6
  *(u64 *)r9 = Y

Also add test cases to:
- check that check_alu_op() for BPF_MOV instruction does not allocate
  scalar ID if source register is a constant;
- check that unique scalar IDs are ignored when new verifier state is
  compared to cached verifier state;
- check that two different scalar IDs in a verified state can't be
  mapped to the same scalar ID in current state.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 .../selftests/bpf/progs/verifier_scalar_ids.c | 313 ++++++++++++++++++
 1 file changed, 313 insertions(+)

Comments

Maxim Mikityanskiy June 12, 2023, 5:40 p.m. UTC | #1
On Mon, 12 Jun 2023 at 19:08:01 +0300, Eduard Zingerman wrote:
> Verify that the following example is rejected by verifier:
> 
>   r9 = ... some pointer with range X ...
>   r6 = ... unbound scalar ID=a ...
>   r7 = ... unbound scalar ID=b ...
>   if (r6 > r7) goto +1
>   r7 = r6
>   if (r7 > X) goto exit
>   r9 += r6
>   *(u64 *)r9 = Y
> 
> Also add test cases to:
> - check that check_alu_op() for BPF_MOV instruction does not allocate
>   scalar ID if source register is a constant;
> - check that unique scalar IDs are ignored when new verifier state is
>   compared to cached verifier state;
> - check that two different scalar IDs in a verified state can't be
>   mapped to the same scalar ID in current state.
> 
> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
> ---
>  .../selftests/bpf/progs/verifier_scalar_ids.c | 313 ++++++++++++++++++
>  1 file changed, 313 insertions(+)
> 
> diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
> index 8a5203fb14ca..5d56e764fe43 100644
> --- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
> +++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
> @@ -341,4 +341,317 @@ __naked void precision_two_ids(void)
>  	: __clobber_all);
>  }
>  
> +/* Verify that check_ids() is used by regsafe() for scalars.
> + *
> + * r9 = ... some pointer with range X ...
> + * r6 = ... unbound scalar ID=a ...
> + * r7 = ... unbound scalar ID=b ...
> + * if (r6 > r7) goto +1
> + * r6 = r7
> + * if (r6 > X) goto exit
> + * r9 += r7
> + * *(u8 *)r9 = Y
> + *
> + * The memory access is safe only if r7 is bounded,
> + * which is true for one branch and not true for another.
> + */
> +SEC("socket")
> +__failure __msg("register with unbounded min value")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void check_ids_in_regsafe(void)
> +{
> +	asm volatile (
> +	/* Bump allocated stack */
> +	"r1 = 0;"
> +	"*(u64*)(r10 - 8) = r1;"
> +	/* r9 = pointer to stack */
> +	"r9 = r10;"
> +	"r9 += -8;"
> +	/* r7 = ktime_get_ns() */
> +	"call %[bpf_ktime_get_ns];"
> +	"r7 = r0;"
> +	/* r6 = ktime_get_ns() */
> +	"call %[bpf_ktime_get_ns];"
> +	"r6 = r0;"
> +	/* if r6 > r7 is an unpredictable jump */
> +	"if r6 > r7 goto l1_%=;"
> +	"r7 = r6;"
> +"l1_%=:"
> +	/* if r6 > 4 exit(0) */
> +	"if r7 > 4 goto l2_%=;"
> +	/* Access memory at r9[r7] */
> +	"r9 += r6;"

Sorry if I'm missing some context, but there seem to be discrepancies
between the code of this test, the comments right here, the comment
above the test and the commit message. r6 vs r7 don't match in a few
places.

The code matches the commit message and looks correct (unsafe). The code
sample in the comments, however, is different and looks safe to me
(r7 <= r6 <= X, accessing r9[r7]).

> +	"r0 = *(u8*)(r9 + 0);"
> +"l2_%=:"
> +	"r0 = 0;"
> +	"exit;"
> +	:
> +	: __imm(bpf_ktime_get_ns)
> +	: __clobber_all);
> +}
> +
> +/* Similar to check_ids_in_regsafe.
> + * The l0 could be reached in two states:
> + *
> + *   (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
> + *   (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
> + *
> + * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
> + * This example would be considered safe without changes to
> + * mark_chain_precision() to track scalar values with equal IDs.
> + */
> +SEC("socket")
> +__failure __msg("register with unbounded min value")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void check_ids_in_regsafe_2(void)
> +{
> +	asm volatile (
> +	/* Bump allocated stack */
> +	"r1 = 0;"
> +	"*(u64*)(r10 - 8) = r1;"
> +	/* r9 = pointer to stack */
> +	"r9 = r10;"
> +	"r9 += -8;"
> +	/* r8 = ktime_get_ns() */
> +	"call %[bpf_ktime_get_ns];"
> +	"r8 = r0;"
> +	/* r7 = ktime_get_ns() */
> +	"call %[bpf_ktime_get_ns];"
> +	"r7 = r0;"
> +	/* r6 = ktime_get_ns() */
> +	"call %[bpf_ktime_get_ns];"
> +	"r6 = r0;"
> +	/* scratch .id from r0 */
> +	"r0 = 0;"
> +	/* if r6 > r7 is an unpredictable jump */
> +	"if r6 > r7 goto l1_%=;"
> +	/* tie r6 and r7 .id */
> +	"r6 = r7;"
> +"l0_%=:"
> +	/* if r7 > 4 exit(0) */
> +	"if r7 > 4 goto l2_%=;"
> +	/* Access memory at r9[r7] */
> +	"r9 += r6;"
> +	"r0 = *(u8*)(r9 + 0);"
> +"l2_%=:"
> +	"r0 = 0;"
> +	"exit;"
> +"l1_%=:"
> +	/* tie r6 and r8 .id */
> +	"r6 = r8;"
> +	"goto l0_%=;"
> +	:
> +	: __imm(bpf_ktime_get_ns)
> +	: __clobber_all);
> +}
> +
> +/* Check that scalar IDs *are not* generated on register to register
> + * assignments if source register is a constant.
> + *
> + * If such IDs *are* generated the 'l1' below would be reached in
> + * two states:
> + *
> + *   (1) r1{.id=A}, r2{.id=A}
> + *   (2) r1{.id=C}, r2{.id=C}
> + *
> + * Thus forcing 'if r1 == r2' verification twice.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("11: (1d) if r3 == r4 goto pc+0")
> +__msg("frame 0: propagating r3,r4")
> +__msg("11: safe")
> +__msg("processed 15 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void no_scalar_id_for_const(void)
> +{
> +	asm volatile (
> +	"call %[bpf_ktime_get_ns];"
> +	/* unpredictable jump */
> +	"if r0 > 7 goto l0_%=;"
> +	/* possibly generate same scalar ids for r3 and r4 */
> +	"r1 = 0;"
> +	"r1 = r1;"
> +	"r3 = r1;"
> +	"r4 = r1;"
> +	"goto l1_%=;"
> +"l0_%=:"
> +	/* possibly generate different scalar ids for r3 and r4 */
> +	"r1 = 0;"
> +	"r2 = 0;"
> +	"r3 = r1;"
> +	"r4 = r2;"
> +"l1_%=:"
> +	/* predictable jump, marks r3 and r4 precise */
> +	"if r3 == r4 goto +0;"
> +	"r0 = 0;"
> +	"exit;"
> +	:
> +	: __imm(bpf_ktime_get_ns)
> +	: __clobber_all);
> +}
> +
> +/* Same as no_scalar_id_for_const() but for 32-bit values */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("11: (1e) if w3 == w4 goto pc+0")
> +__msg("frame 0: propagating r3,r4")
> +__msg("11: safe")
> +__msg("processed 15 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void no_scalar_id_for_const32(void)
> +{
> +	asm volatile (
> +	"call %[bpf_ktime_get_ns];"
> +	/* unpredictable jump */
> +	"if r0 > 7 goto l0_%=;"
> +	/* possibly generate same scalar ids for r3 and r4 */
> +	"w1 = 0;"
> +	"w1 = w1;"
> +	"w3 = w1;"
> +	"w4 = w1;"
> +	"goto l1_%=;"
> +"l0_%=:"
> +	/* possibly generate different scalar ids for r3 and r4 */
> +	"w1 = 0;"
> +	"w2 = 0;"
> +	"w3 = w1;"
> +	"w4 = w2;"
> +"l1_%=:"
> +	/* predictable jump, marks r1 and r2 precise */
> +	"if w3 == w4 goto +0;"
> +	"r0 = 0;"
> +	"exit;"
> +	:
> +	: __imm(bpf_ktime_get_ns)
> +	: __clobber_all);
> +}
> +
> +/* Check that unique scalar IDs are ignored when new verifier state is
> + * compared to cached verifier state. For this test:
> + * - cached state has no id on r1
> + * - new state has a unique id on r1
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("6: (25) if r6 > 0x7 goto pc+1")
> +__msg("7: (57) r1 &= 255")
> +__msg("8: (bf) r2 = r10")
> +__msg("from 6 to 8: safe")
> +__msg("processed 12 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void ignore_unique_scalar_ids_cur(void)
> +{
> +	asm volatile (
> +	"call %[bpf_ktime_get_ns];"
> +	"r6 = r0;"
> +	"call %[bpf_ktime_get_ns];"
> +	"r0 &= 0xff;"
> +	/* r1.id == r0.id */
> +	"r1 = r0;"
> +	/* make r1.id unique */
> +	"r0 = 0;"
> +	"if r6 > 7 goto l0_%=;"
> +	/* clear r1 id, but keep the range compatible */
> +	"r1 &= 0xff;"
> +"l0_%=:"
> +	/* get here in two states:
> +	 * - first: r1 has no id (cached state)
> +	 * - second: r1 has a unique id (should be considered equivalent)
> +	 */
> +	"r2 = r10;"
> +	"r2 += r1;"
> +	"exit;"
> +	:
> +	: __imm(bpf_ktime_get_ns)
> +	: __clobber_all);
> +}
> +
> +/* Check that unique scalar IDs are ignored when new verifier state is
> + * compared to cached verifier state. For this test:
> + * - cached state has a unique id on r1
> + * - new state has no id on r1
> + */
> +SEC("socket")
> +__success __log_level(2)
> +__msg("6: (25) if r6 > 0x7 goto pc+1")
> +__msg("7: (05) goto pc+1")
> +__msg("9: (bf) r2 = r10")
> +__msg("9: safe")
> +__msg("processed 13 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void ignore_unique_scalar_ids_old(void)
> +{
> +	asm volatile (
> +	"call %[bpf_ktime_get_ns];"
> +	"r6 = r0;"
> +	"call %[bpf_ktime_get_ns];"
> +	"r0 &= 0xff;"
> +	/* r1.id == r0.id */
> +	"r1 = r0;"
> +	/* make r1.id unique */
> +	"r0 = 0;"
> +	"if r6 > 7 goto l1_%=;"
> +	"goto l0_%=;"
> +"l1_%=:"
> +	/* clear r1 id, but keep the range compatible */
> +	"r1 &= 0xff;"
> +"l0_%=:"
> +	/* get here in two states:
> +	 * - first: r1 has a unique id (cached state)
> +	 * - second: r1 has no id (should be considered equivalent)
> +	 */
> +	"r2 = r10;"
> +	"r2 += r1;"
> +	"exit;"
> +	:
> +	: __imm(bpf_ktime_get_ns)
> +	: __clobber_all);
> +}
> +
> +/* Check that two different scalar IDs in a verified state can't be
> + * mapped to the same scalar ID in current state.
> + */
> +SEC("socket")
> +__success __log_level(2)
> +/* The exit instruction should be reachable from two states,
> + * use two matches and "processed .. insns" to ensure this.
> + */
> +__msg("13: (95) exit")
> +__msg("13: (95) exit")
> +__msg("processed 18 insns")
> +__flag(BPF_F_TEST_STATE_FREQ)
> +__naked void two_old_ids_one_cur_id(void)
> +{
> +	asm volatile (
> +	/* Give unique scalar IDs to r{6,7} */
> +	"call %[bpf_ktime_get_ns];"
> +	"r0 &= 0xff;"
> +	"r6 = r0;"
> +	"call %[bpf_ktime_get_ns];"
> +	"r0 &= 0xff;"
> +	"r7 = r0;"
> +	"r0 = 0;"
> +	/* Maybe make r{6,7} IDs identical */
> +	"if r6 > r7 goto l0_%=;"
> +	"goto l1_%=;"
> +"l0_%=:"
> +	"r6 = r7;"
> +"l1_%=:"
> +	/* Mark r{6,7} precise.
> +	 * Get here in two states:
> +	 * - first:  r6{.id=A}, r7{.id=B} (cached state)
> +	 * - second: r6{.id=A}, r7{.id=A}
> +	 * Currently we don't want to consider such states equivalent.
> +	 * Thus, marker instruction "r0 = r0;" would be verified twice.
> +	 */
> +	"r2 = r10;"
> +	"r2 += r6;"
> +	"r2 += r7;"
> +	"exit;"
> +	:
> +	: __imm(bpf_ktime_get_ns)
> +	: __clobber_all);
> +}
> +
>  char _license[] SEC("license") = "GPL";
> -- 
> 2.40.1
> 
>
Eduard Zingerman June 12, 2023, 7:42 p.m. UTC | #2
On Mon, 2023-06-12 at 20:40 +0300, Maxim Mikityanskiy wrote:
> On Mon, 12 Jun 2023 at 19:08:01 +0300, Eduard Zingerman wrote:
> > Verify that the following example is rejected by verifier:
> > 
> >   r9 = ... some pointer with range X ...
> >   r6 = ... unbound scalar ID=a ...
> >   r7 = ... unbound scalar ID=b ...
> >   if (r6 > r7) goto +1
> >   r7 = r6
> >   if (r7 > X) goto exit
> >   r9 += r6
> >   *(u64 *)r9 = Y
> > 
> > Also add test cases to:
> > - check that check_alu_op() for BPF_MOV instruction does not allocate
> >   scalar ID if source register is a constant;
> > - check that unique scalar IDs are ignored when new verifier state is
> >   compared to cached verifier state;
> > - check that two different scalar IDs in a verified state can't be
> >   mapped to the same scalar ID in current state.
> > 
> > Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
> > ---
> >  .../selftests/bpf/progs/verifier_scalar_ids.c | 313 ++++++++++++++++++
> >  1 file changed, 313 insertions(+)
> > 
> > diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
> > index 8a5203fb14ca..5d56e764fe43 100644
> > --- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
> > +++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
> > @@ -341,4 +341,317 @@ __naked void precision_two_ids(void)
> >  	: __clobber_all);
> >  }
> >  
> > +/* Verify that check_ids() is used by regsafe() for scalars.
> > + *
> > + * r9 = ... some pointer with range X ...
> > + * r6 = ... unbound scalar ID=a ...
> > + * r7 = ... unbound scalar ID=b ...
> > + * if (r6 > r7) goto +1
> > + * r6 = r7
> > + * if (r6 > X) goto exit
> > + * r9 += r7
> > + * *(u8 *)r9 = Y
> > + *
> > + * The memory access is safe only if r7 is bounded,
> > + * which is true for one branch and not true for another.
> > + */
> > +SEC("socket")
> > +__failure __msg("register with unbounded min value")
> > +__flag(BPF_F_TEST_STATE_FREQ)
> > +__naked void check_ids_in_regsafe(void)
> > +{
> > +	asm volatile (
> > +	/* Bump allocated stack */
> > +	"r1 = 0;"
> > +	"*(u64*)(r10 - 8) = r1;"
> > +	/* r9 = pointer to stack */
> > +	"r9 = r10;"
> > +	"r9 += -8;"
> > +	/* r7 = ktime_get_ns() */
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r7 = r0;"
> > +	/* r6 = ktime_get_ns() */
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r6 = r0;"
> > +	/* if r6 > r7 is an unpredictable jump */
> > +	"if r6 > r7 goto l1_%=;"
> > +	"r7 = r6;"
> > +"l1_%=:"
> > +	/* if r6 > 4 exit(0) */
> > +	"if r7 > 4 goto l2_%=;"
> > +	/* Access memory at r9[r7] */
> > +	"r9 += r6;"
> 
> Sorry if I'm missing some context, but there seem to be discrepancies
> between the code of this test, the comments right here, the comment
> above the test and the commit message. r6 vs r7 don't match in a few
> places.
> 
> The code matches the commit message and looks correct (unsafe). The code
> sample in the comments, however, is different and looks safe to me
> (r7 <= r6 <= X, accessing r9[r7]).

Yep, thank you for catching this. I need top update comments.
Will wait a couple of hours for other comments and re-send v6 with a fix.

> 
> > +	"r0 = *(u8*)(r9 + 0);"
> > +"l2_%=:"
> > +	"r0 = 0;"
> > +	"exit;"
> > +	:
> > +	: __imm(bpf_ktime_get_ns)
> > +	: __clobber_all);
> > +}
> > +
> > +/* Similar to check_ids_in_regsafe.
> > + * The l0 could be reached in two states:
> > + *
> > + *   (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
> > + *   (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
> > + *
> > + * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
> > + * This example would be considered safe without changes to
> > + * mark_chain_precision() to track scalar values with equal IDs.
> > + */
> > +SEC("socket")
> > +__failure __msg("register with unbounded min value")
> > +__flag(BPF_F_TEST_STATE_FREQ)
> > +__naked void check_ids_in_regsafe_2(void)
> > +{
> > +	asm volatile (
> > +	/* Bump allocated stack */
> > +	"r1 = 0;"
> > +	"*(u64*)(r10 - 8) = r1;"
> > +	/* r9 = pointer to stack */
> > +	"r9 = r10;"
> > +	"r9 += -8;"
> > +	/* r8 = ktime_get_ns() */
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r8 = r0;"
> > +	/* r7 = ktime_get_ns() */
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r7 = r0;"
> > +	/* r6 = ktime_get_ns() */
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r6 = r0;"
> > +	/* scratch .id from r0 */
> > +	"r0 = 0;"
> > +	/* if r6 > r7 is an unpredictable jump */
> > +	"if r6 > r7 goto l1_%=;"
> > +	/* tie r6 and r7 .id */
> > +	"r6 = r7;"
> > +"l0_%=:"
> > +	/* if r7 > 4 exit(0) */
> > +	"if r7 > 4 goto l2_%=;"
> > +	/* Access memory at r9[r7] */
> > +	"r9 += r6;"
> > +	"r0 = *(u8*)(r9 + 0);"
> > +"l2_%=:"
> > +	"r0 = 0;"
> > +	"exit;"
> > +"l1_%=:"
> > +	/* tie r6 and r8 .id */
> > +	"r6 = r8;"
> > +	"goto l0_%=;"
> > +	:
> > +	: __imm(bpf_ktime_get_ns)
> > +	: __clobber_all);
> > +}
> > +
> > +/* Check that scalar IDs *are not* generated on register to register
> > + * assignments if source register is a constant.
> > + *
> > + * If such IDs *are* generated the 'l1' below would be reached in
> > + * two states:
> > + *
> > + *   (1) r1{.id=A}, r2{.id=A}
> > + *   (2) r1{.id=C}, r2{.id=C}
> > + *
> > + * Thus forcing 'if r1 == r2' verification twice.
> > + */
> > +SEC("socket")
> > +__success __log_level(2)
> > +__msg("11: (1d) if r3 == r4 goto pc+0")
> > +__msg("frame 0: propagating r3,r4")
> > +__msg("11: safe")
> > +__msg("processed 15 insns")
> > +__flag(BPF_F_TEST_STATE_FREQ)
> > +__naked void no_scalar_id_for_const(void)
> > +{
> > +	asm volatile (
> > +	"call %[bpf_ktime_get_ns];"
> > +	/* unpredictable jump */
> > +	"if r0 > 7 goto l0_%=;"
> > +	/* possibly generate same scalar ids for r3 and r4 */
> > +	"r1 = 0;"
> > +	"r1 = r1;"
> > +	"r3 = r1;"
> > +	"r4 = r1;"
> > +	"goto l1_%=;"
> > +"l0_%=:"
> > +	/* possibly generate different scalar ids for r3 and r4 */
> > +	"r1 = 0;"
> > +	"r2 = 0;"
> > +	"r3 = r1;"
> > +	"r4 = r2;"
> > +"l1_%=:"
> > +	/* predictable jump, marks r3 and r4 precise */
> > +	"if r3 == r4 goto +0;"
> > +	"r0 = 0;"
> > +	"exit;"
> > +	:
> > +	: __imm(bpf_ktime_get_ns)
> > +	: __clobber_all);
> > +}
> > +
> > +/* Same as no_scalar_id_for_const() but for 32-bit values */
> > +SEC("socket")
> > +__success __log_level(2)
> > +__msg("11: (1e) if w3 == w4 goto pc+0")
> > +__msg("frame 0: propagating r3,r4")
> > +__msg("11: safe")
> > +__msg("processed 15 insns")
> > +__flag(BPF_F_TEST_STATE_FREQ)
> > +__naked void no_scalar_id_for_const32(void)
> > +{
> > +	asm volatile (
> > +	"call %[bpf_ktime_get_ns];"
> > +	/* unpredictable jump */
> > +	"if r0 > 7 goto l0_%=;"
> > +	/* possibly generate same scalar ids for r3 and r4 */
> > +	"w1 = 0;"
> > +	"w1 = w1;"
> > +	"w3 = w1;"
> > +	"w4 = w1;"
> > +	"goto l1_%=;"
> > +"l0_%=:"
> > +	/* possibly generate different scalar ids for r3 and r4 */
> > +	"w1 = 0;"
> > +	"w2 = 0;"
> > +	"w3 = w1;"
> > +	"w4 = w2;"
> > +"l1_%=:"
> > +	/* predictable jump, marks r1 and r2 precise */
> > +	"if w3 == w4 goto +0;"
> > +	"r0 = 0;"
> > +	"exit;"
> > +	:
> > +	: __imm(bpf_ktime_get_ns)
> > +	: __clobber_all);
> > +}
> > +
> > +/* Check that unique scalar IDs are ignored when new verifier state is
> > + * compared to cached verifier state. For this test:
> > + * - cached state has no id on r1
> > + * - new state has a unique id on r1
> > + */
> > +SEC("socket")
> > +__success __log_level(2)
> > +__msg("6: (25) if r6 > 0x7 goto pc+1")
> > +__msg("7: (57) r1 &= 255")
> > +__msg("8: (bf) r2 = r10")
> > +__msg("from 6 to 8: safe")
> > +__msg("processed 12 insns")
> > +__flag(BPF_F_TEST_STATE_FREQ)
> > +__naked void ignore_unique_scalar_ids_cur(void)
> > +{
> > +	asm volatile (
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r6 = r0;"
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r0 &= 0xff;"
> > +	/* r1.id == r0.id */
> > +	"r1 = r0;"
> > +	/* make r1.id unique */
> > +	"r0 = 0;"
> > +	"if r6 > 7 goto l0_%=;"
> > +	/* clear r1 id, but keep the range compatible */
> > +	"r1 &= 0xff;"
> > +"l0_%=:"
> > +	/* get here in two states:
> > +	 * - first: r1 has no id (cached state)
> > +	 * - second: r1 has a unique id (should be considered equivalent)
> > +	 */
> > +	"r2 = r10;"
> > +	"r2 += r1;"
> > +	"exit;"
> > +	:
> > +	: __imm(bpf_ktime_get_ns)
> > +	: __clobber_all);
> > +}
> > +
> > +/* Check that unique scalar IDs are ignored when new verifier state is
> > + * compared to cached verifier state. For this test:
> > + * - cached state has a unique id on r1
> > + * - new state has no id on r1
> > + */
> > +SEC("socket")
> > +__success __log_level(2)
> > +__msg("6: (25) if r6 > 0x7 goto pc+1")
> > +__msg("7: (05) goto pc+1")
> > +__msg("9: (bf) r2 = r10")
> > +__msg("9: safe")
> > +__msg("processed 13 insns")
> > +__flag(BPF_F_TEST_STATE_FREQ)
> > +__naked void ignore_unique_scalar_ids_old(void)
> > +{
> > +	asm volatile (
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r6 = r0;"
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r0 &= 0xff;"
> > +	/* r1.id == r0.id */
> > +	"r1 = r0;"
> > +	/* make r1.id unique */
> > +	"r0 = 0;"
> > +	"if r6 > 7 goto l1_%=;"
> > +	"goto l0_%=;"
> > +"l1_%=:"
> > +	/* clear r1 id, but keep the range compatible */
> > +	"r1 &= 0xff;"
> > +"l0_%=:"
> > +	/* get here in two states:
> > +	 * - first: r1 has a unique id (cached state)
> > +	 * - second: r1 has no id (should be considered equivalent)
> > +	 */
> > +	"r2 = r10;"
> > +	"r2 += r1;"
> > +	"exit;"
> > +	:
> > +	: __imm(bpf_ktime_get_ns)
> > +	: __clobber_all);
> > +}
> > +
> > +/* Check that two different scalar IDs in a verified state can't be
> > + * mapped to the same scalar ID in current state.
> > + */
> > +SEC("socket")
> > +__success __log_level(2)
> > +/* The exit instruction should be reachable from two states,
> > + * use two matches and "processed .. insns" to ensure this.
> > + */
> > +__msg("13: (95) exit")
> > +__msg("13: (95) exit")
> > +__msg("processed 18 insns")
> > +__flag(BPF_F_TEST_STATE_FREQ)
> > +__naked void two_old_ids_one_cur_id(void)
> > +{
> > +	asm volatile (
> > +	/* Give unique scalar IDs to r{6,7} */
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r0 &= 0xff;"
> > +	"r6 = r0;"
> > +	"call %[bpf_ktime_get_ns];"
> > +	"r0 &= 0xff;"
> > +	"r7 = r0;"
> > +	"r0 = 0;"
> > +	/* Maybe make r{6,7} IDs identical */
> > +	"if r6 > r7 goto l0_%=;"
> > +	"goto l1_%=;"
> > +"l0_%=:"
> > +	"r6 = r7;"
> > +"l1_%=:"
> > +	/* Mark r{6,7} precise.
> > +	 * Get here in two states:
> > +	 * - first:  r6{.id=A}, r7{.id=B} (cached state)
> > +	 * - second: r6{.id=A}, r7{.id=A}
> > +	 * Currently we don't want to consider such states equivalent.
> > +	 * Thus, marker instruction "r0 = r0;" would be verified twice.
> > +	 */
> > +	"r2 = r10;"
> > +	"r2 += r6;"
> > +	"r2 += r7;"
> > +	"exit;"
> > +	:
> > +	: __imm(bpf_ktime_get_ns)
> > +	: __clobber_all);
> > +}
> > +
> >  char _license[] SEC("license") = "GPL";
> > -- 
> > 2.40.1
> > 
> >
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
index 8a5203fb14ca..5d56e764fe43 100644
--- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
@@ -341,4 +341,317 @@  __naked void precision_two_ids(void)
 	: __clobber_all);
 }
 
+/* Verify that check_ids() is used by regsafe() for scalars.
+ *
+ * r9 = ... some pointer with range X ...
+ * r6 = ... unbound scalar ID=a ...
+ * r7 = ... unbound scalar ID=b ...
+ * if (r6 > r7) goto +1
+ * r6 = r7
+ * if (r6 > X) goto exit
+ * r9 += r7
+ * *(u8 *)r9 = Y
+ *
+ * The memory access is safe only if r7 is bounded,
+ * which is true for one branch and not true for another.
+ */
+SEC("socket")
+__failure __msg("register with unbounded min value")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_ids_in_regsafe(void)
+{
+	asm volatile (
+	/* Bump allocated stack */
+	"r1 = 0;"
+	"*(u64*)(r10 - 8) = r1;"
+	/* r9 = pointer to stack */
+	"r9 = r10;"
+	"r9 += -8;"
+	/* r7 = ktime_get_ns() */
+	"call %[bpf_ktime_get_ns];"
+	"r7 = r0;"
+	/* r6 = ktime_get_ns() */
+	"call %[bpf_ktime_get_ns];"
+	"r6 = r0;"
+	/* if r6 > r7 is an unpredictable jump */
+	"if r6 > r7 goto l1_%=;"
+	"r7 = r6;"
+"l1_%=:"
+	/* if r6 > 4 exit(0) */
+	"if r7 > 4 goto l2_%=;"
+	/* Access memory at r9[r7] */
+	"r9 += r6;"
+	"r0 = *(u8*)(r9 + 0);"
+"l2_%=:"
+	"r0 = 0;"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* Similar to check_ids_in_regsafe.
+ * The l0 could be reached in two states:
+ *
+ *   (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
+ *   (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
+ *
+ * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
+ * This example would be considered safe without changes to
+ * mark_chain_precision() to track scalar values with equal IDs.
+ */
+SEC("socket")
+__failure __msg("register with unbounded min value")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void check_ids_in_regsafe_2(void)
+{
+	asm volatile (
+	/* Bump allocated stack */
+	"r1 = 0;"
+	"*(u64*)(r10 - 8) = r1;"
+	/* r9 = pointer to stack */
+	"r9 = r10;"
+	"r9 += -8;"
+	/* r8 = ktime_get_ns() */
+	"call %[bpf_ktime_get_ns];"
+	"r8 = r0;"
+	/* r7 = ktime_get_ns() */
+	"call %[bpf_ktime_get_ns];"
+	"r7 = r0;"
+	/* r6 = ktime_get_ns() */
+	"call %[bpf_ktime_get_ns];"
+	"r6 = r0;"
+	/* scratch .id from r0 */
+	"r0 = 0;"
+	/* if r6 > r7 is an unpredictable jump */
+	"if r6 > r7 goto l1_%=;"
+	/* tie r6 and r7 .id */
+	"r6 = r7;"
+"l0_%=:"
+	/* if r7 > 4 exit(0) */
+	"if r7 > 4 goto l2_%=;"
+	/* Access memory at r9[r7] */
+	"r9 += r6;"
+	"r0 = *(u8*)(r9 + 0);"
+"l2_%=:"
+	"r0 = 0;"
+	"exit;"
+"l1_%=:"
+	/* tie r6 and r8 .id */
+	"r6 = r8;"
+	"goto l0_%=;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* Check that scalar IDs *are not* generated on register to register
+ * assignments if source register is a constant.
+ *
+ * If such IDs *are* generated the 'l1' below would be reached in
+ * two states:
+ *
+ *   (1) r1{.id=A}, r2{.id=A}
+ *   (2) r1{.id=C}, r2{.id=C}
+ *
+ * Thus forcing 'if r1 == r2' verification twice.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("11: (1d) if r3 == r4 goto pc+0")
+__msg("frame 0: propagating r3,r4")
+__msg("11: safe")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void no_scalar_id_for_const(void)
+{
+	asm volatile (
+	"call %[bpf_ktime_get_ns];"
+	/* unpredictable jump */
+	"if r0 > 7 goto l0_%=;"
+	/* possibly generate same scalar ids for r3 and r4 */
+	"r1 = 0;"
+	"r1 = r1;"
+	"r3 = r1;"
+	"r4 = r1;"
+	"goto l1_%=;"
+"l0_%=:"
+	/* possibly generate different scalar ids for r3 and r4 */
+	"r1 = 0;"
+	"r2 = 0;"
+	"r3 = r1;"
+	"r4 = r2;"
+"l1_%=:"
+	/* predictable jump, marks r3 and r4 precise */
+	"if r3 == r4 goto +0;"
+	"r0 = 0;"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* Same as no_scalar_id_for_const() but for 32-bit values */
+SEC("socket")
+__success __log_level(2)
+__msg("11: (1e) if w3 == w4 goto pc+0")
+__msg("frame 0: propagating r3,r4")
+__msg("11: safe")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void no_scalar_id_for_const32(void)
+{
+	asm volatile (
+	"call %[bpf_ktime_get_ns];"
+	/* unpredictable jump */
+	"if r0 > 7 goto l0_%=;"
+	/* possibly generate same scalar ids for r3 and r4 */
+	"w1 = 0;"
+	"w1 = w1;"
+	"w3 = w1;"
+	"w4 = w1;"
+	"goto l1_%=;"
+"l0_%=:"
+	/* possibly generate different scalar ids for r3 and r4 */
+	"w1 = 0;"
+	"w2 = 0;"
+	"w3 = w1;"
+	"w4 = w2;"
+"l1_%=:"
+	/* predictable jump, marks r1 and r2 precise */
+	"if w3 == w4 goto +0;"
+	"r0 = 0;"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* Check that unique scalar IDs are ignored when new verifier state is
+ * compared to cached verifier state. For this test:
+ * - cached state has no id on r1
+ * - new state has a unique id on r1
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("6: (25) if r6 > 0x7 goto pc+1")
+__msg("7: (57) r1 &= 255")
+__msg("8: (bf) r2 = r10")
+__msg("from 6 to 8: safe")
+__msg("processed 12 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void ignore_unique_scalar_ids_cur(void)
+{
+	asm volatile (
+	"call %[bpf_ktime_get_ns];"
+	"r6 = r0;"
+	"call %[bpf_ktime_get_ns];"
+	"r0 &= 0xff;"
+	/* r1.id == r0.id */
+	"r1 = r0;"
+	/* make r1.id unique */
+	"r0 = 0;"
+	"if r6 > 7 goto l0_%=;"
+	/* clear r1 id, but keep the range compatible */
+	"r1 &= 0xff;"
+"l0_%=:"
+	/* get here in two states:
+	 * - first: r1 has no id (cached state)
+	 * - second: r1 has a unique id (should be considered equivalent)
+	 */
+	"r2 = r10;"
+	"r2 += r1;"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* Check that unique scalar IDs are ignored when new verifier state is
+ * compared to cached verifier state. For this test:
+ * - cached state has a unique id on r1
+ * - new state has no id on r1
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("6: (25) if r6 > 0x7 goto pc+1")
+__msg("7: (05) goto pc+1")
+__msg("9: (bf) r2 = r10")
+__msg("9: safe")
+__msg("processed 13 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void ignore_unique_scalar_ids_old(void)
+{
+	asm volatile (
+	"call %[bpf_ktime_get_ns];"
+	"r6 = r0;"
+	"call %[bpf_ktime_get_ns];"
+	"r0 &= 0xff;"
+	/* r1.id == r0.id */
+	"r1 = r0;"
+	/* make r1.id unique */
+	"r0 = 0;"
+	"if r6 > 7 goto l1_%=;"
+	"goto l0_%=;"
+"l1_%=:"
+	/* clear r1 id, but keep the range compatible */
+	"r1 &= 0xff;"
+"l0_%=:"
+	/* get here in two states:
+	 * - first: r1 has a unique id (cached state)
+	 * - second: r1 has no id (should be considered equivalent)
+	 */
+	"r2 = r10;"
+	"r2 += r1;"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
+/* Check that two different scalar IDs in a verified state can't be
+ * mapped to the same scalar ID in current state.
+ */
+SEC("socket")
+__success __log_level(2)
+/* The exit instruction should be reachable from two states,
+ * use two matches and "processed .. insns" to ensure this.
+ */
+__msg("13: (95) exit")
+__msg("13: (95) exit")
+__msg("processed 18 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void two_old_ids_one_cur_id(void)
+{
+	asm volatile (
+	/* Give unique scalar IDs to r{6,7} */
+	"call %[bpf_ktime_get_ns];"
+	"r0 &= 0xff;"
+	"r6 = r0;"
+	"call %[bpf_ktime_get_ns];"
+	"r0 &= 0xff;"
+	"r7 = r0;"
+	"r0 = 0;"
+	/* Maybe make r{6,7} IDs identical */
+	"if r6 > r7 goto l0_%=;"
+	"goto l1_%=;"
+"l0_%=:"
+	"r6 = r7;"
+"l1_%=:"
+	/* Mark r{6,7} precise.
+	 * Get here in two states:
+	 * - first:  r6{.id=A}, r7{.id=B} (cached state)
+	 * - second: r6{.id=A}, r7{.id=A}
+	 * Currently we don't want to consider such states equivalent.
+	 * Thus, marker instruction "r0 = r0;" would be verified twice.
+	 */
+	"r2 = r10;"
+	"r2 += r6;"
+	"r2 += r7;"
+	"exit;"
+	:
+	: __imm(bpf_ktime_get_ns)
+	: __clobber_all);
+}
+
 char _license[] SEC("license") = "GPL";