diff mbox series

[bpf-next,v4,04/12] bpf: Invalidate slices on destruction of dynptrs on stack

Message ID 20230120070355.1983560-5-memxor@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Dynptr fixes | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-26 fail Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 66 this patch: 66
netdev/cc_maintainers warning 11 maintainers not CCed: linux-kselftest@vger.kernel.org kpsingh@kernel.org haoluo@google.com song@kernel.org yhs@fb.com martin.lau@linux.dev sdf@google.com john.fastabend@gmail.com shuah@kernel.org jolsa@kernel.org mykolal@fb.com
netdev/build_clang success Errors and warnings before: 13 this patch: 13
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 66 this patch: 66
netdev/checkpatch warning WARNING: line length of 110 exceeds 80 columns WARNING: line length of 81 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Kumar Kartikeya Dwivedi Jan. 20, 2023, 7:03 a.m. UTC
The previous commit implemented destroy_if_dynptr_stack_slot. It
destroys the dynptr which given spi belongs to, but still doesn't
invalidate the slices that belong to such a dynptr. While for the case
of referenced dynptr, we don't allow their overwrite and return an error
early, we still allow it and destroy the dynptr for unreferenced dynptr.

To be able to enable precise and scoped invalidation of dynptr slices in
this case, we must be able to associate the source dynptr of slices that
have been obtained using bpf_dynptr_data. When doing destruction, only
slices belonging to the dynptr being destructed should be invalidated,
and nothing else. Currently, dynptr slices belonging to different
dynptrs are indistinguishible.

Hence, allocate a unique id to each dynptr (CONST_PTR_TO_DYNPTR and
those on stack). This will be stored as part of reg->id. Whenever using
bpf_dynptr_data, transfer this unique dynptr id to the returned
PTR_TO_MEM_OR_NULL slice pointer, and store it in a new per-PTR_TO_MEM
dynptr_id register state member.

Finally, after establishing such a relationship between dynptrs and
their slices, implement precise invalidation logic that only invalidates
slices belong to the destroyed dynptr in destroy_if_dynptr_stack_slot.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 include/linux/bpf_verifier.h                  |  5 +-
 kernel/bpf/verifier.c                         | 74 ++++++++++++++++---
 .../testing/selftests/bpf/progs/dynptr_fail.c |  4 +-
 3 files changed, 68 insertions(+), 15 deletions(-)

Comments

Joanne Koong Jan. 20, 2023, 9:45 p.m. UTC | #1
On Thu, Jan 19, 2023 at 11:04 PM Kumar Kartikeya Dwivedi
<memxor@gmail.com> wrote:
>
> The previous commit implemented destroy_if_dynptr_stack_slot. It
> destroys the dynptr which given spi belongs to, but still doesn't
> invalidate the slices that belong to such a dynptr. While for the case
> of referenced dynptr, we don't allow their overwrite and return an error
> early, we still allow it and destroy the dynptr for unreferenced dynptr.
>
> To be able to enable precise and scoped invalidation of dynptr slices in
> this case, we must be able to associate the source dynptr of slices that
> have been obtained using bpf_dynptr_data. When doing destruction, only
> slices belonging to the dynptr being destructed should be invalidated,
> and nothing else. Currently, dynptr slices belonging to different
> dynptrs are indistinguishible.
>
> Hence, allocate a unique id to each dynptr (CONST_PTR_TO_DYNPTR and
> those on stack). This will be stored as part of reg->id. Whenever using
> bpf_dynptr_data, transfer this unique dynptr id to the returned
> PTR_TO_MEM_OR_NULL slice pointer, and store it in a new per-PTR_TO_MEM
> dynptr_id register state member.
>
> Finally, after establishing such a relationship between dynptrs and
> their slices, implement precise invalidation logic that only invalidates
> slices belong to the destroyed dynptr in destroy_if_dynptr_stack_slot.
>
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>

Acked-by: Joanne Koong <joannelkoong@gmail.com>

> ---
>  include/linux/bpf_verifier.h                  |  5 +-
>  kernel/bpf/verifier.c                         | 74 ++++++++++++++++---
>  .../testing/selftests/bpf/progs/dynptr_fail.c |  4 +-
>  3 files changed, 68 insertions(+), 15 deletions(-)
>
> diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
> index 127058cfec47..aa83de1fe755 100644
> --- a/include/linux/bpf_verifier.h
> +++ b/include/linux/bpf_verifier.h
> @@ -70,7 +70,10 @@ struct bpf_reg_state {
>                         u32 btf_id;
>                 };
>
> -               u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
> +               struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
> +                       u32 mem_size;
> +                       u32 dynptr_id; /* for dynptr slices */
> +               };
>
>                 /* For dynptr stack slots */
>                 struct {
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 5c7f29ca94ec..01cb802776fd 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -255,6 +255,7 @@ struct bpf_call_arg_meta {
>         int mem_size;
>         u64 msize_max_value;
>         int ref_obj_id;
> +       int dynptr_id;
>         int map_uid;
>         int func_id;
>         struct btf *btf;
> @@ -750,23 +751,27 @@ static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
>
>  static void __mark_dynptr_reg(struct bpf_reg_state *reg,
>                               enum bpf_dynptr_type type,
> -                             bool first_slot);
> +                             bool first_slot, int dynptr_id);
>
>  static void __mark_reg_not_init(const struct bpf_verifier_env *env,
>                                 struct bpf_reg_state *reg);
>
> -static void mark_dynptr_stack_regs(struct bpf_reg_state *sreg1,
> +static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
> +                                  struct bpf_reg_state *sreg1,
>                                    struct bpf_reg_state *sreg2,
>                                    enum bpf_dynptr_type type)
>  {
> -       __mark_dynptr_reg(sreg1, type, true);
> -       __mark_dynptr_reg(sreg2, type, false);
> +       int id = ++env->id_gen;
> +
> +       __mark_dynptr_reg(sreg1, type, true, id);
> +       __mark_dynptr_reg(sreg2, type, false, id);
>  }
>
> -static void mark_dynptr_cb_reg(struct bpf_reg_state *reg,
> +static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
> +                              struct bpf_reg_state *reg,
>                                enum bpf_dynptr_type type)
>  {
> -       __mark_dynptr_reg(reg, type, true);
> +       __mark_dynptr_reg(reg, type, true, ++env->id_gen);
>  }
>
>  static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
> @@ -795,7 +800,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
>         if (type == BPF_DYNPTR_TYPE_INVALID)
>                 return -EINVAL;
>
> -       mark_dynptr_stack_regs(&state->stack[spi].spilled_ptr,
> +       mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
>                                &state->stack[spi - 1].spilled_ptr, type);
>
>         if (dynptr_type_refcounted(type)) {
> @@ -871,7 +876,9 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
>  static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
>                                         struct bpf_func_state *state, int spi)
>  {
> -       int i;
> +       struct bpf_func_state *fstate;
> +       struct bpf_reg_state *dreg;
> +       int i, dynptr_id;
>
>         /* We always ensure that STACK_DYNPTR is never set partially,
>          * hence just checking for slot_type[0] is enough. This is
> @@ -899,7 +906,19 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
>                 state->stack[spi - 1].slot_type[i] = STACK_INVALID;
>         }
>
> -       /* TODO: Invalidate any slices associated with this dynptr */
> +       dynptr_id = state->stack[spi].spilled_ptr.id;
> +       /* Invalidate any slices associated with this dynptr */
> +       bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
> +               /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
> +               if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
> +                       continue;
> +               if (dreg->dynptr_id == dynptr_id) {
> +                       if (!env->allow_ptr_leaks)
> +                               __mark_reg_not_init(env, dreg);
> +                       else
> +                               __mark_reg_unknown(env, dreg);
> +               }
> +       }));
>
>         /* Do not release reference state, we are destroying dynptr on stack,
>          * not using some helper to release it. Just reset register.
> @@ -1562,7 +1581,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
>  }
>
>  static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
> -                             bool first_slot)
> +                             bool first_slot, int dynptr_id)
>  {
>         /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
>          * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
> @@ -1570,6 +1589,8 @@ static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type ty
>          */
>         __mark_reg_known_zero(reg);
>         reg->type = CONST_PTR_TO_DYNPTR;
> +       /* Give each dynptr a unique id to uniquely associate slices to it. */
> +       reg->id = dynptr_id;
>         reg->dynptr.type = type;
>         reg->dynptr.first_slot = first_slot;
>  }
> @@ -6532,6 +6553,19 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
>         }
>  }
>
> +static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
> +{
> +       struct bpf_func_state *state = func(env, reg);
> +       int spi;
> +
> +       if (reg->type == CONST_PTR_TO_DYNPTR)
> +               return reg->id;
> +       spi = dynptr_get_spi(env, reg);
> +       if (spi < 0)
> +               return spi;
> +       return state->stack[spi].spilled_ptr.id;
> +}
> +
>  static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
>  {
>         struct bpf_func_state *state = func(env, reg);
> @@ -7601,7 +7635,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
>          * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
>          */
>         __mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
> -       mark_dynptr_cb_reg(&callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
> +       mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
>         callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
>
>         /* unused */
> @@ -8107,18 +8141,31 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
>                 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
>                         if (arg_type_is_dynptr(fn->arg_type[i])) {
>                                 struct bpf_reg_state *reg = &regs[BPF_REG_1 + i];
> -                               int ref_obj_id;
> +                               int id, ref_obj_id;
> +
> +                               if (meta.dynptr_id) {
> +                                       verbose(env, "verifier internal error: meta.dynptr_id already set\n");
> +                                       return -EFAULT;
> +                               }
>
>                                 if (meta.ref_obj_id) {
>                                         verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
>                                         return -EFAULT;
>                                 }
>
> +                               id = dynptr_id(env, reg);
> +                               if (id < 0) {
> +                                       verbose(env, "verifier internal error: failed to obtain dynptr id\n");
> +                                       return id;
> +                               }
> +
>                                 ref_obj_id = dynptr_ref_obj_id(env, reg);
>                                 if (ref_obj_id < 0) {
>                                         verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
>                                         return ref_obj_id;
>                                 }
> +
> +                               meta.dynptr_id = id;
>                                 meta.ref_obj_id = ref_obj_id;
>                                 break;
>                         }
> @@ -8275,6 +8322,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
>                 return -EFAULT;
>         }
>
> +       if (is_dynptr_ref_function(func_id))
> +               regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
> +
>         if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
>                 /* For release_reference() */
>                 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
> diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
> index 9dc3f23a8270..e43000c63c66 100644
> --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
> +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
> @@ -67,7 +67,7 @@ static int get_map_val_dynptr(struct bpf_dynptr *ptr)
>   * bpf_ringbuf_submit/discard_dynptr call
>   */
>  SEC("?raw_tp")
> -__failure __msg("Unreleased reference id=1")
> +__failure __msg("Unreleased reference id=2")
>  int ringbuf_missing_release1(void *ctx)
>  {
>         struct bpf_dynptr ptr;
> @@ -80,7 +80,7 @@ int ringbuf_missing_release1(void *ctx)
>  }
>
>  SEC("?raw_tp")
> -__failure __msg("Unreleased reference id=2")
> +__failure __msg("Unreleased reference id=4")
>  int ringbuf_missing_release2(void *ctx)
>  {
>         struct bpf_dynptr ptr1, ptr2;
> --
> 2.39.1
>
diff mbox series

Patch

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 127058cfec47..aa83de1fe755 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -70,7 +70,10 @@  struct bpf_reg_state {
 			u32 btf_id;
 		};
 
-		u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+		struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+			u32 mem_size;
+			u32 dynptr_id; /* for dynptr slices */
+		};
 
 		/* For dynptr stack slots */
 		struct {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5c7f29ca94ec..01cb802776fd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -255,6 +255,7 @@  struct bpf_call_arg_meta {
 	int mem_size;
 	u64 msize_max_value;
 	int ref_obj_id;
+	int dynptr_id;
 	int map_uid;
 	int func_id;
 	struct btf *btf;
@@ -750,23 +751,27 @@  static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
 
 static void __mark_dynptr_reg(struct bpf_reg_state *reg,
 			      enum bpf_dynptr_type type,
-			      bool first_slot);
+			      bool first_slot, int dynptr_id);
 
 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
 				struct bpf_reg_state *reg);
 
-static void mark_dynptr_stack_regs(struct bpf_reg_state *sreg1,
+static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
+				   struct bpf_reg_state *sreg1,
 				   struct bpf_reg_state *sreg2,
 				   enum bpf_dynptr_type type)
 {
-	__mark_dynptr_reg(sreg1, type, true);
-	__mark_dynptr_reg(sreg2, type, false);
+	int id = ++env->id_gen;
+
+	__mark_dynptr_reg(sreg1, type, true, id);
+	__mark_dynptr_reg(sreg2, type, false, id);
 }
 
-static void mark_dynptr_cb_reg(struct bpf_reg_state *reg,
+static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
+			       struct bpf_reg_state *reg,
 			       enum bpf_dynptr_type type)
 {
-	__mark_dynptr_reg(reg, type, true);
+	__mark_dynptr_reg(reg, type, true, ++env->id_gen);
 }
 
 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
@@ -795,7 +800,7 @@  static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
 	if (type == BPF_DYNPTR_TYPE_INVALID)
 		return -EINVAL;
 
-	mark_dynptr_stack_regs(&state->stack[spi].spilled_ptr,
+	mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
 			       &state->stack[spi - 1].spilled_ptr, type);
 
 	if (dynptr_type_refcounted(type)) {
@@ -871,7 +876,9 @@  static void __mark_reg_unknown(const struct bpf_verifier_env *env,
 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
 				        struct bpf_func_state *state, int spi)
 {
-	int i;
+	struct bpf_func_state *fstate;
+	struct bpf_reg_state *dreg;
+	int i, dynptr_id;
 
 	/* We always ensure that STACK_DYNPTR is never set partially,
 	 * hence just checking for slot_type[0] is enough. This is
@@ -899,7 +906,19 @@  static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
 		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
 	}
 
-	/* TODO: Invalidate any slices associated with this dynptr */
+	dynptr_id = state->stack[spi].spilled_ptr.id;
+	/* Invalidate any slices associated with this dynptr */
+	bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
+		/* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
+		if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
+			continue;
+		if (dreg->dynptr_id == dynptr_id) {
+			if (!env->allow_ptr_leaks)
+				__mark_reg_not_init(env, dreg);
+			else
+				__mark_reg_unknown(env, dreg);
+		}
+	}));
 
 	/* Do not release reference state, we are destroying dynptr on stack,
 	 * not using some helper to release it. Just reset register.
@@ -1562,7 +1581,7 @@  static void mark_reg_known_zero(struct bpf_verifier_env *env,
 }
 
 static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
-			      bool first_slot)
+			      bool first_slot, int dynptr_id)
 {
 	/* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
 	 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
@@ -1570,6 +1589,8 @@  static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type ty
 	 */
 	__mark_reg_known_zero(reg);
 	reg->type = CONST_PTR_TO_DYNPTR;
+	/* Give each dynptr a unique id to uniquely associate slices to it. */
+	reg->id = dynptr_id;
 	reg->dynptr.type = type;
 	reg->dynptr.first_slot = first_slot;
 }
@@ -6532,6 +6553,19 @@  int check_func_arg_reg_off(struct bpf_verifier_env *env,
 	}
 }
 
+static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+	struct bpf_func_state *state = func(env, reg);
+	int spi;
+
+	if (reg->type == CONST_PTR_TO_DYNPTR)
+		return reg->id;
+	spi = dynptr_get_spi(env, reg);
+	if (spi < 0)
+		return spi;
+	return state->stack[spi].spilled_ptr.id;
+}
+
 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
 {
 	struct bpf_func_state *state = func(env, reg);
@@ -7601,7 +7635,7 @@  static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
 	 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
 	 */
 	__mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
-	mark_dynptr_cb_reg(&callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
+	mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
 	callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
 
 	/* unused */
@@ -8107,18 +8141,31 @@  static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
 			if (arg_type_is_dynptr(fn->arg_type[i])) {
 				struct bpf_reg_state *reg = &regs[BPF_REG_1 + i];
-				int ref_obj_id;
+				int id, ref_obj_id;
+
+				if (meta.dynptr_id) {
+					verbose(env, "verifier internal error: meta.dynptr_id already set\n");
+					return -EFAULT;
+				}
 
 				if (meta.ref_obj_id) {
 					verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
 					return -EFAULT;
 				}
 
+				id = dynptr_id(env, reg);
+				if (id < 0) {
+					verbose(env, "verifier internal error: failed to obtain dynptr id\n");
+					return id;
+				}
+
 				ref_obj_id = dynptr_ref_obj_id(env, reg);
 				if (ref_obj_id < 0) {
 					verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
 					return ref_obj_id;
 				}
+
+				meta.dynptr_id = id;
 				meta.ref_obj_id = ref_obj_id;
 				break;
 			}
@@ -8275,6 +8322,9 @@  static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
 		return -EFAULT;
 	}
 
+	if (is_dynptr_ref_function(func_id))
+		regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
+
 	if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
 		/* For release_reference() */
 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index 9dc3f23a8270..e43000c63c66 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -67,7 +67,7 @@  static int get_map_val_dynptr(struct bpf_dynptr *ptr)
  * bpf_ringbuf_submit/discard_dynptr call
  */
 SEC("?raw_tp")
-__failure __msg("Unreleased reference id=1")
+__failure __msg("Unreleased reference id=2")
 int ringbuf_missing_release1(void *ctx)
 {
 	struct bpf_dynptr ptr;
@@ -80,7 +80,7 @@  int ringbuf_missing_release1(void *ctx)
 }
 
 SEC("?raw_tp")
-__failure __msg("Unreleased reference id=2")
+__failure __msg("Unreleased reference id=4")
 int ringbuf_missing_release2(void *ctx)
 {
 	struct bpf_dynptr ptr1, ptr2;