diff mbox series

[v3,bpf-next,02/11] bpf: Improve bpf_reg_state space usage for non-owning ref lock

Message ID 20230131180016.3368305-3-davemarchevsky@fb.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series BPF rbtree next-gen datastructure | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/apply fail Patch does not apply to bpf-next
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 fail Logs for test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 fail Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix

Commit Message

Dave Marchevsky Jan. 31, 2023, 6 p.m. UTC
This patch eliminates extra bpf_reg_state memory usage added due to
previous patch keeping a copy of lock identity in reg state for
non-owning refs.

Instead of copying lock identity around, this patch changes
non_owning_ref_lock field to be a bool, taking advantage of the
following:

  * There can currently only be one active lock at a time
  * non-owning refs are only valid in the critical section

So if a verifier_state has an active_lock, any non-owning ref must've
been obtained under that lock, and any non-owning ref not obtained under
that lock must have been invalidated previously. Therefore if a
non-owning ref is associated with a lock, it's the active_lock of the
current state. So we can keep a bool "are we associated with active_lock
of current state" instead of copying lock identity around.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
---
 include/linux/bpf_verifier.h |  2 +-
 kernel/bpf/verifier.c        | 20 ++++++++++----------
 2 files changed, 11 insertions(+), 11 deletions(-)

Comments

Dave Marchevsky Jan. 31, 2023, 6:25 p.m. UTC | #1
On 1/31/23 1:00 PM, Dave Marchevsky wrote:
> This patch eliminates extra bpf_reg_state memory usage added due to
> previous patch keeping a copy of lock identity in reg state for
> non-owning refs.
> 
> Instead of copying lock identity around, this patch changes
> non_owning_ref_lock field to be a bool, taking advantage of the
> following:
> 
>   * There can currently only be one active lock at a time
>   * non-owning refs are only valid in the critical section
> 
> So if a verifier_state has an active_lock, any non-owning ref must've
> been obtained under that lock, and any non-owning ref not obtained under
> that lock must have been invalidated previously. Therefore if a
> non-owning ref is associated with a lock, it's the active_lock of the
> current state. So we can keep a bool "are we associated with active_lock
> of current state" instead of copying lock identity around.
> 
> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
> ---
>  include/linux/bpf_verifier.h |  2 +-
>  kernel/bpf/verifier.c        | 20 ++++++++++----------
>  2 files changed, 11 insertions(+), 11 deletions(-)
> 
> diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
> index 1c6bbde40705..baeb5afb0b81 100644
> --- a/include/linux/bpf_verifier.h
> +++ b/include/linux/bpf_verifier.h
> @@ -84,7 +84,7 @@ struct bpf_reg_state {
>  		struct {
>  			struct btf *btf;
>  			u32 btf_id;
> -			struct bpf_active_lock non_owning_ref_lock;
> +			bool non_owning_ref_lock;
>  		};
>  
>  		u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */

Pahole output after this change:

        union {
                int                range;                /*     8     4 */
                struct {
                        struct bpf_map * map_ptr;        /*     8     8 */
                        u32        map_uid;              /*    16     4 */
                };                                       /*     8    16 */
                struct {
                        struct btf * btf;                /*     8     8 */
                        u32        btf_id;               /*    16     4 */
                        bool       non_owning_ref_lock;  /*    20     1 */
                };                                       /*     8    16 */
                u32                mem_size;             /*     8     4 */
                struct {
                        enum bpf_dynptr_type type;       /*     8     4 */
                        bool       first_slot;           /*    12     1 */
                } dynptr;                                /*     8     8 */
                struct {
                        long unsigned int raw1;          /*     8     8 */
                        long unsigned int raw2;          /*    16     8 */
                } raw;                                   /*     8    16 */
                u32                subprogno;            /*     8     4 */
        };                                               /*     8    16 */

The PTR_TO_BTF_ID union entry was taking 24 bytes in previous commit,
now it's down to 16, so back to same size as before previous commit.
Alexei Starovoitov Feb. 1, 2023, 9:36 p.m. UTC | #2
On Tue, Jan 31, 2023 at 10:00 AM Dave Marchevsky <davemarchevsky@fb.com> wrote:
>  static void invalidate_non_owning_refs(struct bpf_verifier_env *env,
>                                        struct bpf_active_lock *lock)
>  {
> +       struct bpf_active_lock *cur_state_lock;
>         struct bpf_func_state *unused;
>         struct bpf_reg_state *reg;
>
> +       cur_state_lock = &env->cur_state->active_lock;
>         bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
> -               if (reg->non_owning_ref_lock.ptr &&
> -                   reg->non_owning_ref_lock.ptr == lock->ptr &&
> -                   reg->non_owning_ref_lock.id == lock->id)
> +               if (reg->non_owning_ref_lock &&
> +                   cur_state_lock->ptr == lock->ptr &&
> +                   cur_state_lock->id == lock->id)

invalidate_non_owning_refs() is called with &cur_state,
so the last two checks are redundant, but I suspect they hide
the issue with the first check.
Just reg->non_owning_ref_lock is ambiguous.
It needs base_type(reg->type) == PTR_TO_BTF_ID first.
diff mbox series

Patch

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1c6bbde40705..baeb5afb0b81 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -84,7 +84,7 @@  struct bpf_reg_state {
 		struct {
 			struct btf *btf;
 			u32 btf_id;
-			struct bpf_active_lock non_owning_ref_lock;
+			bool non_owning_ref_lock;
 		};
 
 		u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4b1883ffcf21..ed816e824928 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -935,9 +935,8 @@  static void print_verifier_state(struct bpf_verifier_env *env,
 				verbose_a("id=%d", reg->id);
 			if (reg->ref_obj_id)
 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
-			if (reg->non_owning_ref_lock.ptr)
-				verbose_a("non_own_id=(%p,%d)", reg->non_owning_ref_lock.ptr,
-					  reg->non_owning_ref_lock.id);
+			if (reg->non_owning_ref_lock)
+				verbose(env, "non_own_ref,");
 			if (t != SCALAR_VALUE)
 				verbose_a("off=%d", reg->off);
 			if (type_is_pkt_pointer(t))
@@ -4834,7 +4833,7 @@  static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
 		}
 
 		if (type_is_alloc(reg->type) && !reg->ref_obj_id &&
-		    !reg->non_owning_ref_lock.ptr) {
+		    !reg->non_owning_ref_lock) {
 			verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
 			return -EFAULT;
 		}
@@ -7085,13 +7084,15 @@  static int release_reference(struct bpf_verifier_env *env,
 static void invalidate_non_owning_refs(struct bpf_verifier_env *env,
 				       struct bpf_active_lock *lock)
 {
+	struct bpf_active_lock *cur_state_lock;
 	struct bpf_func_state *unused;
 	struct bpf_reg_state *reg;
 
+	cur_state_lock = &env->cur_state->active_lock;
 	bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
-		if (reg->non_owning_ref_lock.ptr &&
-		    reg->non_owning_ref_lock.ptr == lock->ptr &&
-		    reg->non_owning_ref_lock.id == lock->id)
+		if (reg->non_owning_ref_lock &&
+		    cur_state_lock->ptr == lock->ptr &&
+		    cur_state_lock->id == lock->id)
 			__mark_reg_unknown(env, reg);
 	}));
 }
@@ -8718,13 +8719,12 @@  static int ref_set_non_owning_lock(struct bpf_verifier_env *env, struct bpf_reg_
 		return -EFAULT;
 	}
 
-	if (reg->non_owning_ref_lock.ptr) {
+	if (reg->non_owning_ref_lock) {
 		verbose(env, "verifier internal error: non_owning_ref_lock already set\n");
 		return -EFAULT;
 	}
 
-	reg->non_owning_ref_lock.id = state->active_lock.id;
-	reg->non_owning_ref_lock.ptr = state->active_lock.ptr;
+	reg->non_owning_ref_lock = true;
 	return 0;
 }