diff mbox series

[v2,bpf-next,03/10] bpf: enforce exact retval range on subprog/callback exit

Message ID 20231129003620.1049610-4-andrii@kernel.org (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series BPF verifier retval logic fixes | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next
netdev/apply fail Patch does not apply to bpf-next
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-3 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-15 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-16 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-llvm-16 / build / build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-llvm-16 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-llvm-16 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-llvm-16 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-16 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-16 / veristat

Commit Message

Andrii Nakryiko Nov. 29, 2023, 12:36 a.m. UTC
Instead of relying on potentially imprecise tnum representation of
expected return value range for callbacks and subprogs, validate that
umin/umax range satisfy exact expected range of return values.

E.g., if callback would need to return [0, 2] range, tnum can't
represent this precisely and instead will allow [0, 3] range. By
checking umin/umax range, we can make sure that subprog/callback indeed
returns only valid [0, 2] range.

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 include/linux/bpf_verifier.h |  7 ++++++-
 kernel/bpf/verifier.c        | 40 ++++++++++++++++++++++++++----------
 2 files changed, 35 insertions(+), 12 deletions(-)

Comments

Andrii Nakryiko Nov. 29, 2023, 3:40 a.m. UTC | #1
On Tue, Nov 28, 2023 at 4:36 PM Andrii Nakryiko <andrii@kernel.org> wrote:
>
> Instead of relying on potentially imprecise tnum representation of
> expected return value range for callbacks and subprogs, validate that
> umin/umax range satisfy exact expected range of return values.
>
> E.g., if callback would need to return [0, 2] range, tnum can't
> represent this precisely and instead will allow [0, 3] range. By
> checking umin/umax range, we can make sure that subprog/callback indeed
> returns only valid [0, 2] range.
>
> Acked-by: Eduard Zingerman <eddyz87@gmail.com>
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
>  include/linux/bpf_verifier.h |  7 ++++++-
>  kernel/bpf/verifier.c        | 40 ++++++++++++++++++++++++++----------
>  2 files changed, 35 insertions(+), 12 deletions(-)
>

[...]

> @@ -9531,7 +9536,7 @@ static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
>         __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
>         __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
>         callee->in_callback_fn = true;
> -       callee->callback_ret_range = tnum_range(0, 1);
> +       callee->callback_ret_range = retval_range(0, 1);
>         return 0;
>  }
>
> @@ -9560,6 +9565,19 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
>         return is_rbtree_lock_required_kfunc(kfunc_btf_id);
>  }
>
> +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> +{
> +       return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;

argh, I didn't update the core piece of logic to use smin/smax here.

I'll send v3 tomorrow, sorry for the spam...

> +}
> +
> +static struct tnum retval_range_as_tnum(struct bpf_retval_range range)
> +{
> +       if (range.minval == range.maxval)
> +               return tnum_const(range.minval);
> +       else
> +               return tnum_range(range.minval, range.maxval);
> +}
> +

[...]
Shung-Hsi Yu Nov. 29, 2023, 10:55 a.m. UTC | #2
On Tue, Nov 28, 2023 at 04:36:13PM -0800, Andrii Nakryiko wrote:
> Instead of relying on potentially imprecise tnum representation of
> expected return value range for callbacks and subprogs, validate that
> umin/umax range satisfy exact expected range of return values.
> 
> E.g., if callback would need to return [0, 2] range, tnum can't
> represent this precisely and instead will allow [0, 3] range. By
> checking umin/umax range, we can make sure that subprog/callback indeed
> returns only valid [0, 2] range.
> 
> Acked-by: Eduard Zingerman <eddyz87@gmail.com>
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
>  include/linux/bpf_verifier.h |  7 ++++++-
>  kernel/bpf/verifier.c        | 40 ++++++++++++++++++++++++++----------
>  2 files changed, 35 insertions(+), 12 deletions(-)

...

> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -9560,6 +9565,19 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
>  	return is_rbtree_lock_required_kfunc(kfunc_btf_id);
>  }
>  
> +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> +{
> +	return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
> +}
> +
> +static struct tnum retval_range_as_tnum(struct bpf_retval_range range)
> +{
> +	if (range.minval == range.maxval)
> +		return tnum_const(range.minval);
> +	else
> +		return tnum_range(range.minval, range.maxval);
> +}

Nit: find it slightly strange to have retval_range_as_tnum() added here
(patch 3), only to be removed again in the patch 5. As far as I can see
patch 4 doesn't require this, and it is only used once.

Perhaps just replace its use below with tnum_range() instead? (Not
pretty, but will be removed anyway).

> @@ -9597,7 +9612,10 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
>  		if (err)
>  			return err;
>  
> -		if (!tnum_in(range, r0->var_off)) {
> +		/* enforce R0 return value range */
> +		if (!retval_range_within(callee->callback_ret_range, r0)) {
> +			struct tnum range = retval_range_as_tnum(callee->callback_ret_range);
Andrii Nakryiko Nov. 29, 2023, 4:23 p.m. UTC | #3
On Wed, Nov 29, 2023 at 2:56 AM Shung-Hsi Yu <shung-hsi.yu@suse.com> wrote:
>
> On Tue, Nov 28, 2023 at 04:36:13PM -0800, Andrii Nakryiko wrote:
> > Instead of relying on potentially imprecise tnum representation of
> > expected return value range for callbacks and subprogs, validate that
> > umin/umax range satisfy exact expected range of return values.
> >
> > E.g., if callback would need to return [0, 2] range, tnum can't
> > represent this precisely and instead will allow [0, 3] range. By
> > checking umin/umax range, we can make sure that subprog/callback indeed
> > returns only valid [0, 2] range.
> >
> > Acked-by: Eduard Zingerman <eddyz87@gmail.com>
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > ---
> >  include/linux/bpf_verifier.h |  7 ++++++-
> >  kernel/bpf/verifier.c        | 40 ++++++++++++++++++++++++++----------
> >  2 files changed, 35 insertions(+), 12 deletions(-)
>
> ...
>
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -9560,6 +9565,19 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
> >       return is_rbtree_lock_required_kfunc(kfunc_btf_id);
> >  }
> >
> > +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> > +{
> > +     return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
> > +}
> > +
> > +static struct tnum retval_range_as_tnum(struct bpf_retval_range range)
> > +{
> > +     if (range.minval == range.maxval)
> > +             return tnum_const(range.minval);
> > +     else
> > +             return tnum_range(range.minval, range.maxval);
> > +}
>
> Nit: find it slightly strange to have retval_range_as_tnum() added here
> (patch 3), only to be removed again in the patch 5. As far as I can see
> patch 4 doesn't require this, and it is only used once.
>
> Perhaps just replace its use below with tnum_range() instead? (Not
> pretty, but will be removed anyway).
>

I do this to delay the refactoring of verbose_invalid_scalar() which
is used by another piece of logic which I refactor in a separate
patch. If I don't do this temporary retval_range_as_tnum() helper, I
might need to update some more tests that expect exact var_off value
in logs, and I didn't want to do it. Given it's a trivial helper, it
feels like it's not a big deal to keep it for a patch or two before
completing the refactoring.



> > @@ -9597,7 +9612,10 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
> >               if (err)
> >                       return err;
> >
> > -             if (!tnum_in(range, r0->var_off)) {
> > +             /* enforce R0 return value range */
> > +             if (!retval_range_within(callee->callback_ret_range, r0)) {
> > +                     struct tnum range = retval_range_as_tnum(callee->callback_ret_range);
Shung-Hsi Yu Nov. 30, 2023, 5:23 a.m. UTC | #4
On Wed, Nov 29, 2023 at 08:23:38AM -0800, Andrii Nakryiko wrote:
> On Wed, Nov 29, 2023 at 2:56 AM Shung-Hsi Yu <shung-hsi.yu@suse.com> wrote:
> > On Tue, Nov 28, 2023 at 04:36:13PM -0800, Andrii Nakryiko wrote:
> > > Instead of relying on potentially imprecise tnum representation of
> > > expected return value range for callbacks and subprogs, validate that
> > > umin/umax range satisfy exact expected range of return values.
> > >
> > > E.g., if callback would need to return [0, 2] range, tnum can't
> > > represent this precisely and instead will allow [0, 3] range. By
> > > checking umin/umax range, we can make sure that subprog/callback indeed
> > > returns only valid [0, 2] range.
> > >
> > > Acked-by: Eduard Zingerman <eddyz87@gmail.com>
> > > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > > ---
> > >  include/linux/bpf_verifier.h |  7 ++++++-
> > >  kernel/bpf/verifier.c        | 40 ++++++++++++++++++++++++++----------
> > >  2 files changed, 35 insertions(+), 12 deletions(-)
> >
> > ...
> >
> > > --- a/kernel/bpf/verifier.c
> > > +++ b/kernel/bpf/verifier.c
> > > @@ -9560,6 +9565,19 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
> > >       return is_rbtree_lock_required_kfunc(kfunc_btf_id);
> > >  }
> > >
> > > +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> > > +{
> > > +     return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
> > > +}
> > > +
> > > +static struct tnum retval_range_as_tnum(struct bpf_retval_range range)
> > > +{
> > > +     if (range.minval == range.maxval)
> > > +             return tnum_const(range.minval);
> > > +     else
> > > +             return tnum_range(range.minval, range.maxval);
> > > +}
> >
> > Nit: find it slightly strange to have retval_range_as_tnum() added here
> > (patch 3), only to be removed again in the patch 5. As far as I can see
> > patch 4 doesn't require this, and it is only used once.
> >
> > Perhaps just replace its use below with tnum_range() instead? (Not
> > pretty, but will be removed anyway).
> 
> I do this to delay the refactoring of verbose_invalid_scalar() which
> is used by another piece of logic which I refactor in a separate
> patch. If I don't do this temporary retval_range_as_tnum() helper, I
> might need to update some more tests that expect exact var_off value
> in logs, and I didn't want to do it. Given it's a trivial helper, it
> feels like it's not a big deal to keep it for a patch or two before
> completing the refactoring.

Replace retval_range_as_tnum(callee->callback_ret_range) with 

  tnum_range(callee->callback_ret_range.minval,
             callee->callback_ret_range.maxval)

and the verbose_invalid_scalar() signature stays the same; also no var_off
changes because it is just manual inline of retval_range_as_tnum(), as
tnum_range(n, n) == tnum_const(n).

Agree it really is not a big deal, so I won't insist on it.

> > > @@ -9597,7 +9612,10 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
> > >               if (err)
> > >                       return err;
> > >
> > > -             if (!tnum_in(range, r0->var_off)) {
> > > +             /* enforce R0 return value range */
> > > +             if (!retval_range_within(callee->callback_ret_range, r0)) {
> > > +                     struct tnum range = retval_range_as_tnum(callee->callback_ret_range);
Andrii Nakryiko Nov. 30, 2023, 6:41 a.m. UTC | #5
On Wed, Nov 29, 2023 at 9:23 PM Shung-Hsi Yu <shung-hsi.yu@suse.com> wrote:
>
> On Wed, Nov 29, 2023 at 08:23:38AM -0800, Andrii Nakryiko wrote:
> > On Wed, Nov 29, 2023 at 2:56 AM Shung-Hsi Yu <shung-hsi.yu@suse.com> wrote:
> > > On Tue, Nov 28, 2023 at 04:36:13PM -0800, Andrii Nakryiko wrote:
> > > > Instead of relying on potentially imprecise tnum representation of
> > > > expected return value range for callbacks and subprogs, validate that
> > > > umin/umax range satisfy exact expected range of return values.
> > > >
> > > > E.g., if callback would need to return [0, 2] range, tnum can't
> > > > represent this precisely and instead will allow [0, 3] range. By
> > > > checking umin/umax range, we can make sure that subprog/callback indeed
> > > > returns only valid [0, 2] range.
> > > >
> > > > Acked-by: Eduard Zingerman <eddyz87@gmail.com>
> > > > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > > > ---
> > > >  include/linux/bpf_verifier.h |  7 ++++++-
> > > >  kernel/bpf/verifier.c        | 40 ++++++++++++++++++++++++++----------
> > > >  2 files changed, 35 insertions(+), 12 deletions(-)
> > >
> > > ...
> > >
> > > > --- a/kernel/bpf/verifier.c
> > > > +++ b/kernel/bpf/verifier.c
> > > > @@ -9560,6 +9565,19 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
> > > >       return is_rbtree_lock_required_kfunc(kfunc_btf_id);
> > > >  }
> > > >
> > > > +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
> > > > +{
> > > > +     return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
> > > > +}
> > > > +
> > > > +static struct tnum retval_range_as_tnum(struct bpf_retval_range range)
> > > > +{
> > > > +     if (range.minval == range.maxval)
> > > > +             return tnum_const(range.minval);
> > > > +     else
> > > > +             return tnum_range(range.minval, range.maxval);
> > > > +}
> > >
> > > Nit: find it slightly strange to have retval_range_as_tnum() added here
> > > (patch 3), only to be removed again in the patch 5. As far as I can see
> > > patch 4 doesn't require this, and it is only used once.
> > >
> > > Perhaps just replace its use below with tnum_range() instead? (Not
> > > pretty, but will be removed anyway).
> >
> > I do this to delay the refactoring of verbose_invalid_scalar() which
> > is used by another piece of logic which I refactor in a separate
> > patch. If I don't do this temporary retval_range_as_tnum() helper, I
> > might need to update some more tests that expect exact var_off value
> > in logs, and I didn't want to do it. Given it's a trivial helper, it
> > feels like it's not a big deal to keep it for a patch or two before
> > completing the refactoring.
>
> Replace retval_range_as_tnum(callee->callback_ret_range) with
>
>   tnum_range(callee->callback_ret_range.minval,
>              callee->callback_ret_range.maxval)
>
> and the verbose_invalid_scalar() signature stays the same; also no var_off
> changes because it is just manual inline of retval_range_as_tnum(), as
> tnum_range(n, n) == tnum_const(n).

I tried it locally, and I don't have to adjust any new tests, so I'll
inline tnum_range() as you suggested, thanks.


>
> Agree it really is not a big deal, so I won't insist on it.
>
> > > > @@ -9597,7 +9612,10 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
> > > >               if (err)
> > > >                       return err;
> > > >
> > > > -             if (!tnum_in(range, r0->var_off)) {
> > > > +             /* enforce R0 return value range */
> > > > +             if (!retval_range_within(callee->callback_ret_range, r0)) {
> > > > +                     struct tnum range = retval_range_as_tnum(callee->callback_ret_range);
diff mbox series

Patch

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 0c0e1bccad45..3378cc753061 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -275,6 +275,11 @@  struct bpf_reference_state {
 	int callback_ref;
 };
 
+struct bpf_retval_range {
+	s32 minval;
+	s32 maxval;
+};
+
 /* state of the program:
  * type of all registers and stack info
  */
@@ -297,7 +302,7 @@  struct bpf_func_state {
 	 * void foo(void) { bpf_timer_set_callback(,foo); }
 	 */
 	u32 async_entry_cnt;
-	struct tnum callback_ret_range;
+	struct bpf_retval_range callback_ret_range;
 	bool in_callback_fn;
 	bool in_async_callback_fn;
 	bool in_exception_callback_fn;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 849fbf47b5f3..845f46f40e6b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2305,6 +2305,11 @@  static void init_reg_state(struct bpf_verifier_env *env,
 	regs[BPF_REG_FP].frameno = state->frameno;
 }
 
+static struct bpf_retval_range retval_range(s32 minval, s32 maxval)
+{
+	return (struct bpf_retval_range){ minval, maxval };
+}
+
 #define BPF_MAIN_FUNC (-1)
 static void init_func_state(struct bpf_verifier_env *env,
 			    struct bpf_func_state *state,
@@ -2313,7 +2318,7 @@  static void init_func_state(struct bpf_verifier_env *env,
 	state->callsite = callsite;
 	state->frameno = frameno;
 	state->subprogno = subprogno;
-	state->callback_ret_range = tnum_range(0, 0);
+	state->callback_ret_range = retval_range(0, 0);
 	init_reg_state(env, state);
 	mark_verifier_state_scratched(env);
 }
@@ -9396,7 +9401,7 @@  static int set_map_elem_callback_state(struct bpf_verifier_env *env,
 		return err;
 
 	callee->in_callback_fn = true;
-	callee->callback_ret_range = tnum_range(0, 1);
+	callee->callback_ret_range = retval_range(0, 1);
 	return 0;
 }
 
@@ -9418,7 +9423,7 @@  static int set_loop_callback_state(struct bpf_verifier_env *env,
 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
 
 	callee->in_callback_fn = true;
-	callee->callback_ret_range = tnum_range(0, 1);
+	callee->callback_ret_range = retval_range(0, 1);
 	return 0;
 }
 
@@ -9448,7 +9453,7 @@  static int set_timer_callback_state(struct bpf_verifier_env *env,
 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
 	callee->in_async_callback_fn = true;
-	callee->callback_ret_range = tnum_range(0, 1);
+	callee->callback_ret_range = retval_range(0, 1);
 	return 0;
 }
 
@@ -9476,7 +9481,7 @@  static int set_find_vma_callback_state(struct bpf_verifier_env *env,
 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
 	callee->in_callback_fn = true;
-	callee->callback_ret_range = tnum_range(0, 1);
+	callee->callback_ret_range = retval_range(0, 1);
 	return 0;
 }
 
@@ -9499,7 +9504,7 @@  static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
 
 	callee->in_callback_fn = true;
-	callee->callback_ret_range = tnum_range(0, 1);
+	callee->callback_ret_range = retval_range(0, 1);
 	return 0;
 }
 
@@ -9531,7 +9536,7 @@  static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
 	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
 	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
 	callee->in_callback_fn = true;
-	callee->callback_ret_range = tnum_range(0, 1);
+	callee->callback_ret_range = retval_range(0, 1);
 	return 0;
 }
 
@@ -9560,6 +9565,19 @@  static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
 	return is_rbtree_lock_required_kfunc(kfunc_btf_id);
 }
 
+static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
+{
+	return range.minval <= reg->umin_value && reg->umax_value <= range.maxval;
+}
+
+static struct tnum retval_range_as_tnum(struct bpf_retval_range range)
+{
+	if (range.minval == range.maxval)
+		return tnum_const(range.minval);
+	else
+		return tnum_range(range.minval, range.maxval);
+}
+
 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
 {
 	struct bpf_verifier_state *state = env->cur_state, *prev_st;
@@ -9583,9 +9601,6 @@  static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
 
 	caller = state->frame[state->curframe - 1];
 	if (callee->in_callback_fn) {
-		/* enforce R0 return value range [0, 1]. */
-		struct tnum range = callee->callback_ret_range;
-
 		if (r0->type != SCALAR_VALUE) {
 			verbose(env, "R0 not a scalar value\n");
 			return -EACCES;
@@ -9597,7 +9612,10 @@  static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
 		if (err)
 			return err;
 
-		if (!tnum_in(range, r0->var_off)) {
+		/* enforce R0 return value range */
+		if (!retval_range_within(callee->callback_ret_range, r0)) {
+			struct tnum range = retval_range_as_tnum(callee->callback_ret_range);
+
 			verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
 			return -EINVAL;
 		}