diff mbox series

[bpf-next,v1,2/8] libbpf: tie struct_ops programs to kernel BTF ids, not to local ids

Message ID 20240227204556.17524-3-eddyz87@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series libbpf: type suffixes and autocreate flag for struct_ops maps | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/build_tools success Errors and warnings before: 1 this patch: 1
netdev/cc_maintainers warning 6 maintainers not CCed: jolsa@kernel.org john.fastabend@gmail.com kpsingh@kernel.org song@kernel.org sdf@google.com haoluo@google.com
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch warning WARNING: line length of 86 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18

Commit Message

Eduard Zingerman Feb. 27, 2024, 8:45 p.m. UTC
Enforce the following existing limitation on struct_ops programs based
on kernel BTF id instead of program-local BTF id:

    struct_ops BPF prog can be re-used between multiple .struct_ops &
    .struct_ops.link as long as it's the same struct_ops struct
    definition and the same function pointer field

This allows reusing same BPF program for versioned struct_ops map
definitions, e.g.:

    SEC("struct_ops/test")
    int BPF_PROG(foo) { ... }

    struct some_ops___v1 { int (*test)(void); };
    struct some_ops___v2 { int (*test)(void); };

    SEC(".struct_ops.link") struct some_ops___v1 a = { .test = foo }
    SEC(".struct_ops.link") struct some_ops___v2 b = { .test = foo }

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 tools/lib/bpf/libbpf.c | 44 ++++++++++++++++++++----------------------
 1 file changed, 21 insertions(+), 23 deletions(-)

Comments

Martin KaFai Lau Feb. 28, 2024, 7:41 a.m. UTC | #1
On 2/27/24 12:45 PM, Eduard Zingerman wrote:
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index abe663927013..c239b75d5816 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -1134,8 +1134,27 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
>   
>   			if (mod_btf)
>   				prog->attach_btf_obj_fd = mod_btf->fd;
> -			prog->attach_btf_id = kern_type_id;
> -			prog->expected_attach_type = kern_member_idx;
> +
> +			/* if we haven't yet processed this BPF program, record proper
> +			 * attach_btf_id and member_idx
> +			 */
> +			if (!prog->attach_btf_id) {
> +				prog->attach_btf_id = kern_type_id;
> +				prog->expected_attach_type = kern_member_idx;
> +			}
> +
> +			/* struct_ops BPF prog can be re-used between multiple
> +			 * .struct_ops & .struct_ops.link as long as it's the
> +			 * same struct_ops struct definition and the same
> +			 * function pointer field
> +			 */
> +			if (prog->attach_btf_id != kern_type_id ||
> +			    prog->expected_attach_type != kern_member_idx) {
> +				pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",

The patch lgtm. A nit is s/reloc/init_kern/.

> +					map->name, prog->name, prog->sec_name, prog->type,
> +					prog->attach_btf_id, prog->expected_attach_type, mname);
> +				return -EINVAL;
> +			}
>   
>   			st_ops->kern_func_off[i] = kern_data_off + kern_moff;
David Vernet Feb. 28, 2024, 5:23 p.m. UTC | #2
On Tue, Feb 27, 2024 at 10:45:50PM +0200, Eduard Zingerman wrote:
> Enforce the following existing limitation on struct_ops programs based
> on kernel BTF id instead of program-local BTF id:
> 
>     struct_ops BPF prog can be re-used between multiple .struct_ops &
>     .struct_ops.link as long as it's the same struct_ops struct
>     definition and the same function pointer field

Am I correct in understanding the code that the prog also has to be at the same
offset in the new type? So if we have for example:

SEC("struct_ops/test")
int BPF_PROG(foo) { ... }

struct some_ops___v1 {
	int (*test)(void);
};

struct some_ops___v2 {
	int (*init)(void);
	int (*test)(void);
};

Then this wouldn't work? If so, would it be possible for libbpf to do something
like invisibly duplicate the prog and create a separate one for each struct_ops
map where it's encountered? It feels like a rather awkward restriction to
impose given that the idea behind the feature is to enable loading one of
multiple possible definitions of a struct_ops type.

> 
> This allows reusing same BPF program for versioned struct_ops map
> definitions, e.g.:
> 
>     SEC("struct_ops/test")
>     int BPF_PROG(foo) { ... }
> 
>     struct some_ops___v1 { int (*test)(void); };
>     struct some_ops___v2 { int (*test)(void); };
> 
>     SEC(".struct_ops.link") struct some_ops___v1 a = { .test = foo }
>     SEC(".struct_ops.link") struct some_ops___v2 b = { .test = foo }
> 
> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
> ---
>  tools/lib/bpf/libbpf.c | 44 ++++++++++++++++++++----------------------
>  1 file changed, 21 insertions(+), 23 deletions(-)
> 
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index abe663927013..c239b75d5816 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -1134,8 +1134,27 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
>  
>  			if (mod_btf)
>  				prog->attach_btf_obj_fd = mod_btf->fd;
> -			prog->attach_btf_id = kern_type_id;
> -			prog->expected_attach_type = kern_member_idx;
> +
> +			/* if we haven't yet processed this BPF program, record proper
> +			 * attach_btf_id and member_idx
> +			 */
> +			if (!prog->attach_btf_id) {
> +				prog->attach_btf_id = kern_type_id;
> +				prog->expected_attach_type = kern_member_idx;
> +			}
> +
> +			/* struct_ops BPF prog can be re-used between multiple
> +			 * .struct_ops & .struct_ops.link as long as it's the
> +			 * same struct_ops struct definition and the same
> +			 * function pointer field
> +			 */
> +			if (prog->attach_btf_id != kern_type_id ||
> +			    prog->expected_attach_type != kern_member_idx) {
> +				pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
> +					map->name, prog->name, prog->sec_name, prog->type,
> +					prog->attach_btf_id, prog->expected_attach_type, mname);
> +				return -EINVAL;
> +			}
>  
>  			st_ops->kern_func_off[i] = kern_data_off + kern_moff;
>  
> @@ -9409,27 +9428,6 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
>  			return -EINVAL;
>  		}
>  
> -		/* if we haven't yet processed this BPF program, record proper
> -		 * attach_btf_id and member_idx
> -		 */
> -		if (!prog->attach_btf_id) {
> -			prog->attach_btf_id = st_ops->type_id;
> -			prog->expected_attach_type = member_idx;
> -		}
> -
> -		/* struct_ops BPF prog can be re-used between multiple
> -		 * .struct_ops & .struct_ops.link as long as it's the
> -		 * same struct_ops struct definition and the same
> -		 * function pointer field
> -		 */
> -		if (prog->attach_btf_id != st_ops->type_id ||
> -		    prog->expected_attach_type != member_idx) {
> -			pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
> -				map->name, prog->name, prog->sec_name, prog->type,
> -				prog->attach_btf_id, prog->expected_attach_type, name);
> -			return -EINVAL;
> -		}
> -
>  		st_ops->progs[member_idx] = prog;
>  	}
>  
> -- 
> 2.43.0
>
Eduard Zingerman Feb. 28, 2024, 5:40 p.m. UTC | #3
On Wed, 2024-02-28 at 11:23 -0600, David Vernet wrote:
> On Tue, Feb 27, 2024 at 10:45:50PM +0200, Eduard Zingerman wrote:
> > Enforce the following existing limitation on struct_ops programs based
> > on kernel BTF id instead of program-local BTF id:
> > 
> >     struct_ops BPF prog can be re-used between multiple .struct_ops &
> >     .struct_ops.link as long as it's the same struct_ops struct
> >     definition and the same function pointer field
> 
> Am I correct in understanding the code that the prog also has to be at the same
> offset in the new type?

Yes, but after this patch it would be offset in current kernel BTF type,
not local BTF type.

> So if we have for example:
> 
> SEC("struct_ops/test")
> int BPF_PROG(foo) { ... }
> 
> struct some_ops___v1 {
> 	int (*test)(void);
> };
> 
> struct some_ops___v2 {
> 	int (*init)(void);
> 	int (*test)(void);
> };

From pov of kernel BTF there is only one 'struct some_ops'.
 
> Then this wouldn't work? If so, would it be possible for libbpf to do something
> like invisibly duplicate the prog and create a separate one for each struct_ops
> map where it's encountered? It feels like a rather awkward restriction to
> impose given that the idea behind the feature is to enable loading one of
> multiple possible definitions of a struct_ops type. 

In combination with the next patch, the idea is to not assign offset
in struct_ops maps which have autocreate == false.

If object corresponding to program above would be opened and
autocreate would be disabled either for some_ops___v1 or some_ops___v2
before load, the program 'test' would get it's offset entry only from
one map. Thus no program duplication would be necessary.

For example, see test case in patch #6:

    struct bpf_testmod_ops___v1 {
    	int (*test_1)(void);
    };

    struct bpf_testmod_ops___v2 {
    	int (*test_1)(void);
    	int (*does_not_exist)(void);
    };

    SEC(".struct_ops.link")
    struct bpf_testmod_ops___v1 testmod_1 = {
    	.test_1 = (void *)test_1
    };

    SEC(".struct_ops.link")
    struct bpf_testmod_ops___v2 testmod_2 = {
    	.test_1 = (void *)test_1,
    	.does_not_exist = (void *)test_2
    };


static void can_load_partial_object(void)
{
	...
	skel = struct_ops_autocreate__open_opts(&opts);
	bpf_program__set_autoload(skel->progs.test_2, false);
	bpf_map__set_autocreate(skel->maps.testmod_2, false);
	struct_ops_autocreate__load(skel);
        ...
}

This should handle your example as well.
Do you find this sufficient or would you still like to have implicit
program duplication logic?
David Vernet Feb. 28, 2024, 5:50 p.m. UTC | #4
On Wed, Feb 28, 2024 at 07:40:33PM +0200, Eduard Zingerman wrote:
> On Wed, 2024-02-28 at 11:23 -0600, David Vernet wrote:
> > On Tue, Feb 27, 2024 at 10:45:50PM +0200, Eduard Zingerman wrote:
> > > Enforce the following existing limitation on struct_ops programs based
> > > on kernel BTF id instead of program-local BTF id:
> > > 
> > >     struct_ops BPF prog can be re-used between multiple .struct_ops &
> > >     .struct_ops.link as long as it's the same struct_ops struct
> > >     definition and the same function pointer field
> > 
> > Am I correct in understanding the code that the prog also has to be at the same
> > offset in the new type?
> 
> Yes, but after this patch it would be offset in current kernel BTF type,
> not local BTF type.

Yes, indeed. I didn't mean to imply that your patch was changing that. I was
asking more generally -- sorry for the confusion.

> > So if we have for example:
> > 
> > SEC("struct_ops/test")
> > int BPF_PROG(foo) { ... }
> > 
> > struct some_ops___v1 {
> > 	int (*test)(void);
> > };
> > 
> > struct some_ops___v2 {
> > 	int (*init)(void);
> > 	int (*test)(void);
> > };
> 
> From pov of kernel BTF there is only one 'struct some_ops'.

Ack

> > Then this wouldn't work? If so, would it be possible for libbpf to do something
> > like invisibly duplicate the prog and create a separate one for each struct_ops
> > map where it's encountered? It feels like a rather awkward restriction to
> > impose given that the idea behind the feature is to enable loading one of
> > multiple possible definitions of a struct_ops type. 
> 
> In combination with the next patch, the idea is to not assign offset
> in struct_ops maps which have autocreate == false.
> 
> If object corresponding to program above would be opened and
> autocreate would be disabled either for some_ops___v1 or some_ops___v2
> before load, the program 'test' would get it's offset entry only from
> one map. Thus no program duplication would be necessary.
> 
> For example, see test case in patch #6:
> 
>     struct bpf_testmod_ops___v1 {
>     	int (*test_1)(void);
>     };
> 
>     struct bpf_testmod_ops___v2 {
>     	int (*test_1)(void);
>     	int (*does_not_exist)(void);
>     };
> 
>     SEC(".struct_ops.link")
>     struct bpf_testmod_ops___v1 testmod_1 = {
>     	.test_1 = (void *)test_1
>     };
> 
>     SEC(".struct_ops.link")
>     struct bpf_testmod_ops___v2 testmod_2 = {
>     	.test_1 = (void *)test_1,
>     	.does_not_exist = (void *)test_2
>     };
> 
> 
> static void can_load_partial_object(void)
> {
> 	...
> 	skel = struct_ops_autocreate__open_opts(&opts);
> 	bpf_program__set_autoload(skel->progs.test_2, false);
> 	bpf_map__set_autocreate(skel->maps.testmod_2, false);
> 	struct_ops_autocreate__load(skel);
>         ...
> }
> 
> This should handle your example as well.
> Do you find this sufficient or would you still like to have implicit
> program duplication logic?

It's definitely fine for now, but IMO it's something to keep in mind for the
future as a usability improvement. Ideally libbpf could internally handle just
creating and loading the type that's actually present on the system, and handle
applying the prog to the correct map, etc on the caller's behalf. Given that
there's only ever a single instance of a struct_ops type on the system, this
seems like a reasonable feature for the library to provide.

Note that this doesn't necessarily require duplicating the prog either, if
libbpf can instead _deduplicate_ the struct_ops maps to only create and load
the one that matches the type on the system.

Not a blocker by any means, but possibly a nice to have down the road.
Andrii Nakryiko Feb. 28, 2024, 11:28 p.m. UTC | #5
On Tue, Feb 27, 2024 at 12:46 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> Enforce the following existing limitation on struct_ops programs based
> on kernel BTF id instead of program-local BTF id:
>
>     struct_ops BPF prog can be re-used between multiple .struct_ops &
>     .struct_ops.link as long as it's the same struct_ops struct
>     definition and the same function pointer field
>
> This allows reusing same BPF program for versioned struct_ops map
> definitions, e.g.:
>
>     SEC("struct_ops/test")
>     int BPF_PROG(foo) { ... }
>
>     struct some_ops___v1 { int (*test)(void); };
>     struct some_ops___v2 { int (*test)(void); };
>
>     SEC(".struct_ops.link") struct some_ops___v1 a = { .test = foo }
>     SEC(".struct_ops.link") struct some_ops___v2 b = { .test = foo }
>
> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
> ---
>  tools/lib/bpf/libbpf.c | 44 ++++++++++++++++++++----------------------
>  1 file changed, 21 insertions(+), 23 deletions(-)
>
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index abe663927013..c239b75d5816 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -1134,8 +1134,27 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
>
>                         if (mod_btf)
>                                 prog->attach_btf_obj_fd = mod_btf->fd;
> -                       prog->attach_btf_id = kern_type_id;
> -                       prog->expected_attach_type = kern_member_idx;
> +
> +                       /* if we haven't yet processed this BPF program, record proper
> +                        * attach_btf_id and member_idx
> +                        */
> +                       if (!prog->attach_btf_id) {
> +                               prog->attach_btf_id = kern_type_id;
> +                               prog->expected_attach_type = kern_member_idx;
> +                       }
> +
> +                       /* struct_ops BPF prog can be re-used between multiple
> +                        * .struct_ops & .struct_ops.link as long as it's the
> +                        * same struct_ops struct definition and the same
> +                        * function pointer field
> +                        */
> +                       if (prog->attach_btf_id != kern_type_id ||
> +                           prog->expected_attach_type != kern_member_idx) {
> +                               pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",

Martin already pointed out s/reloc/init_kern/, but I also find "cannot
use prog" a bit too unactionable. Maybe "invalid reuse of prog"?
"reuse" is the key here to point out that this program is used at
least twice, and that in some incompatible way?

> +                                       map->name, prog->name, prog->sec_name, prog->type,
> +                                       prog->attach_btf_id, prog->expected_attach_type, mname);
> +                               return -EINVAL;
> +                       }
>
>                         st_ops->kern_func_off[i] = kern_data_off + kern_moff;
>
> @@ -9409,27 +9428,6 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
>                         return -EINVAL;
>                 }
>
> -               /* if we haven't yet processed this BPF program, record proper
> -                * attach_btf_id and member_idx
> -                */
> -               if (!prog->attach_btf_id) {
> -                       prog->attach_btf_id = st_ops->type_id;
> -                       prog->expected_attach_type = member_idx;
> -               }
> -
> -               /* struct_ops BPF prog can be re-used between multiple
> -                * .struct_ops & .struct_ops.link as long as it's the
> -                * same struct_ops struct definition and the same
> -                * function pointer field
> -                */
> -               if (prog->attach_btf_id != st_ops->type_id ||
> -                   prog->expected_attach_type != member_idx) {
> -                       pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
> -                               map->name, prog->name, prog->sec_name, prog->type,
> -                               prog->attach_btf_id, prog->expected_attach_type, name);
> -                       return -EINVAL;
> -               }
> -
>                 st_ops->progs[member_idx] = prog;
>         }
>
> --
> 2.43.0
>
Eduard Zingerman Feb. 28, 2024, 11:31 p.m. UTC | #6
On Wed, 2024-02-28 at 15:28 -0800, Andrii Nakryiko wrote:
[...]

> > @@ -1134,8 +1134,27 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
> > 
> >                         if (mod_btf)
> >                                 prog->attach_btf_obj_fd = mod_btf->fd;
> > -                       prog->attach_btf_id = kern_type_id;
> > -                       prog->expected_attach_type = kern_member_idx;
> > +
> > +                       /* if we haven't yet processed this BPF program, record proper
> > +                        * attach_btf_id and member_idx
> > +                        */
> > +                       if (!prog->attach_btf_id) {
> > +                               prog->attach_btf_id = kern_type_id;
> > +                               prog->expected_attach_type = kern_member_idx;
> > +                       }
> > +
> > +                       /* struct_ops BPF prog can be re-used between multiple
> > +                        * .struct_ops & .struct_ops.link as long as it's the
> > +                        * same struct_ops struct definition and the same
> > +                        * function pointer field
> > +                        */
> > +                       if (prog->attach_btf_id != kern_type_id ||
> > +                           prog->expected_attach_type != kern_member_idx) {
> > +                               pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
> 
> Martin already pointed out s/reloc/init_kern/, but I also find "cannot
> use prog" a bit too unactionable. Maybe "invalid reuse of prog"?
> "reuse" is the key here to point out that this program is used at
> least twice, and that in some incompatible way?

Ok, I'll change the wording.
And split the line despite the coding standards.
Andrii Nakryiko Feb. 28, 2024, 11:34 p.m. UTC | #7
On Wed, Feb 28, 2024 at 3:31 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Wed, 2024-02-28 at 15:28 -0800, Andrii Nakryiko wrote:
> [...]
>
> > > @@ -1134,8 +1134,27 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
> > >
> > >                         if (mod_btf)
> > >                                 prog->attach_btf_obj_fd = mod_btf->fd;
> > > -                       prog->attach_btf_id = kern_type_id;
> > > -                       prog->expected_attach_type = kern_member_idx;
> > > +
> > > +                       /* if we haven't yet processed this BPF program, record proper
> > > +                        * attach_btf_id and member_idx
> > > +                        */
> > > +                       if (!prog->attach_btf_id) {
> > > +                               prog->attach_btf_id = kern_type_id;
> > > +                               prog->expected_attach_type = kern_member_idx;
> > > +                       }
> > > +
> > > +                       /* struct_ops BPF prog can be re-used between multiple
> > > +                        * .struct_ops & .struct_ops.link as long as it's the
> > > +                        * same struct_ops struct definition and the same
> > > +                        * function pointer field
> > > +                        */
> > > +                       if (prog->attach_btf_id != kern_type_id ||
> > > +                           prog->expected_attach_type != kern_member_idx) {
> > > +                               pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
> >
> > Martin already pointed out s/reloc/init_kern/, but I also find "cannot
> > use prog" a bit too unactionable. Maybe "invalid reuse of prog"?
> > "reuse" is the key here to point out that this program is used at
> > least twice, and that in some incompatible way?
>
> Ok, I'll change the wording.
> And split the line despite the coding standards.

please don't split format strings, it makes grepping harder
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index abe663927013..c239b75d5816 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1134,8 +1134,27 @@  static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
 
 			if (mod_btf)
 				prog->attach_btf_obj_fd = mod_btf->fd;
-			prog->attach_btf_id = kern_type_id;
-			prog->expected_attach_type = kern_member_idx;
+
+			/* if we haven't yet processed this BPF program, record proper
+			 * attach_btf_id and member_idx
+			 */
+			if (!prog->attach_btf_id) {
+				prog->attach_btf_id = kern_type_id;
+				prog->expected_attach_type = kern_member_idx;
+			}
+
+			/* struct_ops BPF prog can be re-used between multiple
+			 * .struct_ops & .struct_ops.link as long as it's the
+			 * same struct_ops struct definition and the same
+			 * function pointer field
+			 */
+			if (prog->attach_btf_id != kern_type_id ||
+			    prog->expected_attach_type != kern_member_idx) {
+				pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
+					map->name, prog->name, prog->sec_name, prog->type,
+					prog->attach_btf_id, prog->expected_attach_type, mname);
+				return -EINVAL;
+			}
 
 			st_ops->kern_func_off[i] = kern_data_off + kern_moff;
 
@@ -9409,27 +9428,6 @@  static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
 			return -EINVAL;
 		}
 
-		/* if we haven't yet processed this BPF program, record proper
-		 * attach_btf_id and member_idx
-		 */
-		if (!prog->attach_btf_id) {
-			prog->attach_btf_id = st_ops->type_id;
-			prog->expected_attach_type = member_idx;
-		}
-
-		/* struct_ops BPF prog can be re-used between multiple
-		 * .struct_ops & .struct_ops.link as long as it's the
-		 * same struct_ops struct definition and the same
-		 * function pointer field
-		 */
-		if (prog->attach_btf_id != st_ops->type_id ||
-		    prog->expected_attach_type != member_idx) {
-			pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
-				map->name, prog->name, prog->sec_name, prog->type,
-				prog->attach_btf_id, prog->expected_attach_type, name);
-			return -EINVAL;
-		}
-
 		st_ops->progs[member_idx] = prog;
 	}