diff mbox series

[v4,bpf-next,2/4] bpf: Recognize that two registers are safe when their ranges match

Message ID 20240302020010.95393-3-alexei.starovoitov@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: Introduce may_goto and cond_break | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-VM_Test-43 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-44 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-45 fail Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-46 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-47 success Logs for x86_64-llvm-18 / veristat
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1060 this patch: 1060
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 7 maintainers not CCed: jolsa@kernel.org yonghong.song@linux.dev martin.lau@linux.dev song@kernel.org sdf@google.com kpsingh@kernel.org haoluo@google.com
netdev/build_clang success Errors and warnings before: 1066 this patch: 1066
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1077 this patch: 1077
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 98 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc

Commit Message

Alexei Starovoitov March 2, 2024, 2 a.m. UTC
From: Alexei Starovoitov <ast@kernel.org>

When open code iterators, bpf_loop or may_goto is used the following two states
are equivalent and safe to prune the search:

cur state: fp-8_w=scalar(id=3,smin=umin=smin32=umin32=2,smax=umax=smax32=umax32=11,var_off=(0x0; 0xf))
old state: fp-8_rw=scalar(id=2,smin=umin=smin32=umin32=1,smax=umax=smax32=umax32=11,var_off=(0x0; 0xf))

In other words "exact" state match should ignore liveness and precision marks,
since open coded iterator logic didn't complete their propagation,
but range_within logic that applies to scalars, ptr_to_mem, map_value, pkt_ptr
is safe to rely on.

Avoid doing such comparison when regular infinite loop detection logic is used,
otherwise bounded loop logic will declare such "infinite loop" as false
positive. Such example is in progs/verifier_loops1.c not_an_inifinite_loop().

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 kernel/bpf/verifier.c | 32 +++++++++++++++++++-------------
 1 file changed, 19 insertions(+), 13 deletions(-)

Comments

Eduard Zingerman March 3, 2024, 2:12 p.m. UTC | #1
On Fri, 2024-03-01 at 18:00 -0800, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> When open code iterators, bpf_loop or may_goto is used the following two states
> are equivalent and safe to prune the search:
> 
> cur state: fp-8_w=scalar(id=3,smin=umin=smin32=umin32=2,smax=umax=smax32=umax32=11,var_off=(0x0; 0xf))
> old state: fp-8_rw=scalar(id=2,smin=umin=smin32=umin32=1,smax=umax=smax32=umax32=11,var_off=(0x0; 0xf))
> 
> In other words "exact" state match should ignore liveness and precision marks,
> since open coded iterator logic didn't complete their propagation,
> but range_within logic that applies to scalars, ptr_to_mem, map_value, pkt_ptr
> is safe to rely on.
>
> Avoid doing such comparison when regular infinite loop detection logic is used,
> otherwise bounded loop logic will declare such "infinite loop" as false
> positive. Such example is in progs/verifier_loops1.c not_an_inifinite_loop().
>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

I think this makes sense, don't see counter-examples at the moment.
One nit below.

Also, I'm curious if there is veristat results impact,
could be huge for some cases with bpf_loop().

[...]

> @@ -17257,7 +17263,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
>  		 */
>  		loop_entry = get_loop_entry(&sl->state);
>  		force_exact = loop_entry && loop_entry->branches > 0;
> -		if (states_equal(env, &sl->state, cur, force_exact)) {
> +		if (states_equal(env, &sl->state, cur, force_exact ? EXACT : NOT_EXACT)) {

Logically this checks same condition as checks for calls_callback() or
is_iter_next_insn() above: whether current state is equivalent to some
old state, where old state had not been tracked to 'exit' for each
possible path yet.
Thus, 'exact' flags used in these checks should be the same:
"force_exact ? RANGE_WITHIN : NOT_EXACT".

>  			if (force_exact)
>  				update_loop_entry(cur, loop_entry);
>  hit:
Alexei Starovoitov March 3, 2024, 5:21 p.m. UTC | #2
On Sun, Mar 3, 2024 at 6:12 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Fri, 2024-03-01 at 18:00 -0800, Alexei Starovoitov wrote:
> > From: Alexei Starovoitov <ast@kernel.org>
> >
> > When open code iterators, bpf_loop or may_goto is used the following two states
> > are equivalent and safe to prune the search:
> >
> > cur state: fp-8_w=scalar(id=3,smin=umin=smin32=umin32=2,smax=umax=smax32=umax32=11,var_off=(0x0; 0xf))
> > old state: fp-8_rw=scalar(id=2,smin=umin=smin32=umin32=1,smax=umax=smax32=umax32=11,var_off=(0x0; 0xf))
> >
> > In other words "exact" state match should ignore liveness and precision marks,
> > since open coded iterator logic didn't complete their propagation,
> > but range_within logic that applies to scalars, ptr_to_mem, map_value, pkt_ptr
> > is safe to rely on.
> >
> > Avoid doing such comparison when regular infinite loop detection logic is used,
> > otherwise bounded loop logic will declare such "infinite loop" as false
> > positive. Such example is in progs/verifier_loops1.c not_an_inifinite_loop().
> >
> > Signed-off-by: Alexei Starovoitov <ast@kernel.org>
>
> I think this makes sense, don't see counter-examples at the moment.
> One nit below.
>
> Also, I'm curious if there is veristat results impact,
> could be huge for some cases with bpf_loop().

No doubt, there will be improvements here and there. For iters.bpf.o:
Insns (A)  Insns (B)  Insns  (DIFF)  States (A)  States (B)  States
(DIFF)  Program
---------  ---------  -------------  ----------  ----------
-------------  -------------------------------
      858        823   -35 (-4.08%)          81          79    -2
(-2.47%)  iter_nested_iters
      137        110  -27 (-19.71%)          12          10   -2
(-16.67%)  iter_obfuscate_counter
     1161       1109   -52 (-4.48%)          92          88    -4
(-4.35%)  iter_subprog_iters
       62         35  -27 (-43.55%)           5           3   -2
(-40.00%)  triple_continue

the rest in this file didn't change. No change for pyperf either.

> [...]
>
> > @@ -17257,7 +17263,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
> >                */
> >               loop_entry = get_loop_entry(&sl->state);
> >               force_exact = loop_entry && loop_entry->branches > 0;
> > -             if (states_equal(env, &sl->state, cur, force_exact)) {
> > +             if (states_equal(env, &sl->state, cur, force_exact ? EXACT : NOT_EXACT)) {
>
> Logically this checks same condition as checks for calls_callback() or
> is_iter_next_insn() above: whether current state is equivalent to some
> old state, where old state had not been tracked to 'exit' for each
> possible path yet.
> Thus, 'exact' flags used in these checks should be the same:
> "force_exact ? RANGE_WITHIN : NOT_EXACT".

Good point. Will change. It should help as well.
Alexei Starovoitov March 5, 2024, 3:44 a.m. UTC | #3
On Sun, Mar 3, 2024 at 9:21 AM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
> > >               loop_entry = get_loop_entry(&sl->state);
> > >               force_exact = loop_entry && loop_entry->branches > 0;
> > > -             if (states_equal(env, &sl->state, cur, force_exact)) {
> > > +             if (states_equal(env, &sl->state, cur, force_exact ? EXACT : NOT_EXACT)) {
> >
> > Logically this checks same condition as checks for calls_callback() or
> > is_iter_next_insn() above: whether current state is equivalent to some
> > old state, where old state had not been tracked to 'exit' for each
> > possible path yet.
> > Thus, 'exact' flags used in these checks should be the same:
> > "force_exact ? RANGE_WITHIN : NOT_EXACT".
>
> Good point. Will change. It should help as well.

While working on that suggestion realized that this patch
has a bug that I'm fixing with extra hunk:

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 410ac8423cf8..1e823c40a6d2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -16471,11 +16471,9 @@ static bool regsafe(struct bpf_verifier_env
*env, struct bpf_reg_state *rold,
        if (!(rold->live & REG_LIVE_READ) && exact != RANGE_WITHIN)
                /* explored state didn't use this */
                return true;
-       if (rold->type == NOT_INIT)
+       if (rold->type == NOT_INIT && exact != RANGE_WITHIN)
                /* explored state can't have used this */
                return true;
-       if (rcur->type == NOT_INIT)
-               return false;

Kinda obvious in retrospect.
diff mbox series

Patch

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 63ef6a38726e..49ff76543adc 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -7830,6 +7830,11 @@  static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env,
 }
 
 static void reset_idmap_scratch(struct bpf_verifier_env *env);
+enum exact_level {
+	NOT_EXACT,
+	EXACT,
+	RANGE_WITHIN
+};
 static bool regs_exact(const struct bpf_reg_state *rold,
 		       const struct bpf_reg_state *rcur,
 		       struct bpf_idmap *idmap);
@@ -16286,8 +16291,8 @@  static int check_btf_info(struct bpf_verifier_env *env,
 }
 
 /* check %cur's range satisfies %old's */
-static bool range_within(struct bpf_reg_state *old,
-			 struct bpf_reg_state *cur)
+static bool range_within(const struct bpf_reg_state *old,
+			 const struct bpf_reg_state *cur)
 {
 	return old->umin_value <= cur->umin_value &&
 	       old->umax_value >= cur->umax_value &&
@@ -16453,12 +16458,13 @@  static bool regs_exact(const struct bpf_reg_state *rold,
 
 /* Returns true if (rold safe implies rcur safe) */
 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
-		    struct bpf_reg_state *rcur, struct bpf_idmap *idmap, bool exact)
+		    struct bpf_reg_state *rcur, struct bpf_idmap *idmap,
+		    enum exact_level exact)
 {
-	if (exact)
+	if (exact == EXACT)
 		return regs_exact(rold, rcur, idmap);
 
-	if (!(rold->live & REG_LIVE_READ))
+	if (!(rold->live & REG_LIVE_READ) && exact != RANGE_WITHIN)
 		/* explored state didn't use this */
 		return true;
 	if (rold->type == NOT_INIT)
@@ -16500,7 +16506,7 @@  static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
 			return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
 			       check_scalar_ids(rold->id, rcur->id, idmap);
 		}
-		if (!rold->precise)
+		if (!rold->precise && exact != RANGE_WITHIN)
 			return true;
 		/* Why check_ids() for scalar registers?
 		 *
@@ -16611,7 +16617,7 @@  static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env,
 }
 
 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
-		      struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact)
+		      struct bpf_func_state *cur, struct bpf_idmap *idmap, enum exact_level exact)
 {
 	int i, spi;
 
@@ -16775,7 +16781,7 @@  static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
  * the current state will reach 'bpf_exit' instruction safely
  */
 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
-			      struct bpf_func_state *cur, bool exact)
+			      struct bpf_func_state *cur, enum exact_level exact)
 {
 	int i;
 
@@ -16802,7 +16808,7 @@  static void reset_idmap_scratch(struct bpf_verifier_env *env)
 static bool states_equal(struct bpf_verifier_env *env,
 			 struct bpf_verifier_state *old,
 			 struct bpf_verifier_state *cur,
-			 bool exact)
+			 enum exact_level exact)
 {
 	int i;
 
@@ -17182,7 +17188,7 @@  static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 			 * => unsafe memory access at 11 would not be caught.
 			 */
 			if (is_iter_next_insn(env, insn_idx) || is_may_goto_insn(env, insn_idx)) {
-				if (states_equal(env, &sl->state, cur, true)) {
+				if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
 					struct bpf_reg_state *iter_state;
 
 					iter_state = get_iter_reg(env, cur, insn_idx);
@@ -17194,13 +17200,13 @@  static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 				goto skip_inf_loop_check;
 			}
 			if (calls_callback(env, insn_idx)) {
-				if (states_equal(env, &sl->state, cur, true))
+				if (states_equal(env, &sl->state, cur, RANGE_WITHIN))
 					goto hit;
 				goto skip_inf_loop_check;
 			}
 			/* attempt to detect infinite loop to avoid unnecessary doomed work */
 			if (states_maybe_looping(&sl->state, cur) &&
-			    states_equal(env, &sl->state, cur, true) &&
+			    states_equal(env, &sl->state, cur, EXACT) &&
 			    !iter_active_depths_differ(&sl->state, cur) &&
 			    sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
 				verbose_linfo(env, insn_idx, "; ");
@@ -17257,7 +17263,7 @@  static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 		 */
 		loop_entry = get_loop_entry(&sl->state);
 		force_exact = loop_entry && loop_entry->branches > 0;
-		if (states_equal(env, &sl->state, cur, force_exact)) {
+		if (states_equal(env, &sl->state, cur, force_exact ? EXACT : NOT_EXACT)) {
 			if (force_exact)
 				update_loop_entry(cur, loop_entry);
 hit: