diff mbox series

[PATCHv3,bpf-next,7/9] selftests/bpf: Allow to use kfunc from testmod.ko in test_verifier

Message ID 20230203162336.608323-8-jolsa@kernel.org (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series bpf: Move kernel test kfuncs into bpf_testmod | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 6 maintainers not CCed: linux-kselftest@vger.kernel.org shuah@kernel.org song@kernel.org martin.lau@linux.dev mykolal@fb.com kpsingh@kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning CHECK: No space is necessary after a cast WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline fail Was 0 now: 1
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-36 fail Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc

Commit Message

Jiri Olsa Feb. 3, 2023, 4:23 p.m. UTC
Currently the test_verifier allows test to specify kfunc symbol
and search for it in the kernel BTF.

Adding the possibility to search for kfunc also in bpf_testmod
module when it's not found in kernel BTF.

To find bpf_testmod btf we need to get back SYS_ADMIN cap.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
 tools/testing/selftests/bpf/test_verifier.c | 161 +++++++++++++++++---
 1 file changed, 139 insertions(+), 22 deletions(-)

Comments

David Vernet Feb. 7, 2023, 3:34 p.m. UTC | #1
On Fri, Feb 03, 2023 at 05:23:34PM +0100, Jiri Olsa wrote:
> Currently the test_verifier allows test to specify kfunc symbol
> and search for it in the kernel BTF.
> 
> Adding the possibility to search for kfunc also in bpf_testmod
> module when it's not found in kernel BTF.
> 
> To find bpf_testmod btf we need to get back SYS_ADMIN cap.

This observation and any subsequent discussion is certainly outside the
scope of your patch set, but it feels like a bit of a weird /
inconsistent UX to force users to have SYS_ADMIN cap for loading kfuncs
from modules, but not from vmlinux BTF.

I realize that you need to have SYS_ADMIN cap for BPF_PROG_GET_FD_BY_ID,
BPF_MAP_GET_FD_BY_ID, etc, so the consistency makes sense there, but it
would be nice if we could eventually make the UX consistent for programs
linking against module kfuncs, because I don't really see the difference
in terms of permissions from the user's perspective.

> 
> Signed-off-by: Jiri Olsa <jolsa@kernel.org>

LGTM in general -- just left one comment below.

Acked-by: David Vernet <void@manifault.com>

> ---
>  tools/testing/selftests/bpf/test_verifier.c | 161 +++++++++++++++++---
>  1 file changed, 139 insertions(+), 22 deletions(-)
> 
> diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
> index 14f11f2dfbce..0a570195be37 100644
> --- a/tools/testing/selftests/bpf/test_verifier.c
> +++ b/tools/testing/selftests/bpf/test_verifier.c
> @@ -879,8 +879,140 @@ static int create_map_kptr(void)
>  	return fd;
>  }
>  
> +static void set_root(bool set)
> +{
> +	__u64 caps;
> +
> +	if (set) {
> +		if (cap_enable_effective(1ULL << CAP_SYS_ADMIN, &caps))
> +			perror("cap_disable_effective(CAP_SYS_ADMIN)");
> +	} else {
> +		if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps))
> +			perror("cap_disable_effective(CAP_SYS_ADMIN)");
> +	}
> +}
> +
> +static inline __u64 ptr_to_u64(const void *ptr)
> +{
> +	return (__u64) (unsigned long) ptr;

Small nit / suggestion -- IMO this is slightly preferable just to keep
it a bit more in-line with the C-standard:

return (uintptr_t)ptr;

The standard of course doesn't dictate that you can do
ptr -> uintptr_t -> __u64 -> uintptr_t -> ptr, but it at least does dictate that you can do
ptr -> uintptr_t -> ptr, whereas it does not say the same for
ptr -> unsigned long -> ptr

Also, I don't think the 'inline' keyword is necessary. The compiler will
probably figure this out on its own.

> +}
> +
> +static struct btf *btf__load_testmod_btf(struct btf *vmlinux)

Would be nice if some of this code could be shared from libbpf at some
point, but ok, a cleanup for another time.

> +{
> +	struct bpf_btf_info info;
> +	__u32 len = sizeof(info);
> +	struct btf *btf = NULL;
> +	char name[64];
> +	__u32 id = 0;
> +	int err, fd;
> +
> +	/* Iterate all loaded BTF objects and find bpf_testmod,
> +	 * we need SYS_ADMIN cap for that.
> +	 */
> +	set_root(true);
> +
> +	while (true) {
> +		err = bpf_btf_get_next_id(id, &id);
> +		if (err) {
> +			if (errno == ENOENT)
> +				break;
> +			perror("bpf_btf_get_next_id failed");
> +			break;
> +		}
> +
> +		fd = bpf_btf_get_fd_by_id(id);
> +		if (fd < 0) {
> +			if (errno == ENOENT)
> +				continue;
> +			perror("bpf_btf_get_fd_by_id failed");
> +			break;
> +		}
> +
> +		memset(&info, 0, sizeof(info));
> +		info.name_len = sizeof(name);
> +		info.name = ptr_to_u64(name);
> +		len = sizeof(info);
> +
> +		err = bpf_obj_get_info_by_fd(fd, &info, &len);
> +		if (err) {
> +			close(fd);
> +			perror("bpf_obj_get_info_by_fd failed");
> +			break;
> +		}
> +
> +		if (strcmp("bpf_testmod", name)) {
> +			close(fd);
> +			continue;
> +		}
> +
> +		btf = btf__load_from_kernel_by_id_split(id, vmlinux);
> +		if (!btf) {
> +			close(fd);
> +			break;
> +		}
> +
> +		/* We need the fd to stay open so it can be used in fd_array.
> +		 * The final cleanup call to btf__free will free btf object
> +		 * and close the file descriptor.
> +		 */
> +		btf__set_fd(btf, fd);
> +		break;
> +	}
> +
> +	set_root(false);
> +	return btf;
> +}
> +
> +static struct btf *testmod_btf;
> +static struct btf *vmlinux_btf;
> +
> +static void kfuncs_cleanup(void)
> +{
> +	btf__free(testmod_btf);
> +	btf__free(vmlinux_btf);
> +}
> +
> +static void fixup_prog_kfuncs(struct bpf_insn *prog, int *fd_array,
> +			      struct kfunc_btf_id_pair *fixup_kfunc_btf_id)
> +{
> +	/* Patch in kfunc BTF IDs */
> +	while (fixup_kfunc_btf_id->kfunc) {
> +		int btf_id = 0;
> +
> +		/* try to find kfunc in kernel BTF */
> +		vmlinux_btf = vmlinux_btf ?: btf__load_vmlinux_btf();
> +		if (vmlinux_btf) {
> +			btf_id = btf__find_by_name_kind(vmlinux_btf,
> +							fixup_kfunc_btf_id->kfunc,
> +							BTF_KIND_FUNC);
> +			btf_id = btf_id < 0 ? 0 : btf_id;
> +		}
> +
> +		/* kfunc not found in kernel BTF, try bpf_testmod BTF */
> +		if (!btf_id) {
> +			testmod_btf = testmod_btf ?: btf__load_testmod_btf(vmlinux_btf);
> +			if (testmod_btf) {
> +				btf_id = btf__find_by_name_kind(testmod_btf,
> +								fixup_kfunc_btf_id->kfunc,
> +								BTF_KIND_FUNC);
> +				btf_id = btf_id < 0 ? 0 : btf_id;
> +				if (btf_id) {
> +					/* We put bpf_testmod module fd into fd_array
> +					 * and its index 1 into instruction 'off'.
> +					 */
> +					*fd_array = btf__fd(testmod_btf);
> +					prog[fixup_kfunc_btf_id->insn_idx].off = 1;
> +				}
> +			}
> +		}
> +
> +		prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
> +		fixup_kfunc_btf_id++;
> +	}
> +}
> +
>  static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
> -			  struct bpf_insn *prog, int *map_fds)
> +			  struct bpf_insn *prog, int *map_fds, int *fd_array)
>  {
>  	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
>  	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
> @@ -905,7 +1037,6 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
>  	int *fixup_map_ringbuf = test->fixup_map_ringbuf;
>  	int *fixup_map_timer = test->fixup_map_timer;
>  	int *fixup_map_kptr = test->fixup_map_kptr;
> -	struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
>  
>  	if (test->fill_helper) {
>  		test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
> @@ -1106,25 +1237,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
>  		} while (*fixup_map_kptr);
>  	}
>  
> -	/* Patch in kfunc BTF IDs */
> -	if (fixup_kfunc_btf_id->kfunc) {
> -		struct btf *btf;
> -		int btf_id;
> -
> -		do {
> -			btf_id = 0;
> -			btf = btf__load_vmlinux_btf();
> -			if (btf) {
> -				btf_id = btf__find_by_name_kind(btf,
> -								fixup_kfunc_btf_id->kfunc,
> -								BTF_KIND_FUNC);
> -				btf_id = btf_id < 0 ? 0 : btf_id;
> -			}
> -			btf__free(btf);
> -			prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
> -			fixup_kfunc_btf_id++;
> -		} while (fixup_kfunc_btf_id->kfunc);
> -	}
> +	fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
>  }
>  
>  struct libcap {
> @@ -1451,6 +1564,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
>  	int run_errs, run_successes;
>  	int map_fds[MAX_NR_MAPS];
>  	const char *expected_err;
> +	int fd_array[2] = { -1, -1 };
>  	int saved_errno;
>  	int fixup_skips;
>  	__u32 pflags;
> @@ -1464,7 +1578,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
>  	if (!prog_type)
>  		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
>  	fixup_skips = skips;
> -	do_test_fixup(test, prog_type, prog, map_fds);
> +	do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]);
>  	if (test->fill_insns) {
>  		prog = test->fill_insns;
>  		prog_len = test->prog_len;
> @@ -1498,6 +1612,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
>  	else
>  		opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
>  	opts.prog_flags = pflags;
> +	if (fd_array[1] != -1)
> +		opts.fd_array = &fd_array[0];
>  
>  	if ((prog_type == BPF_PROG_TYPE_TRACING ||
>  	     prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) {
> @@ -1740,6 +1856,7 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
>  	}
>  
>  	unload_bpf_testmod(verbose);
> +	kfuncs_cleanup();
>  
>  	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
>  	       skips, errors);
> -- 
> 2.39.1
>
Jiri Olsa Feb. 8, 2023, 10:09 a.m. UTC | #2
On Tue, Feb 07, 2023 at 09:34:26AM -0600, David Vernet wrote:
> On Fri, Feb 03, 2023 at 05:23:34PM +0100, Jiri Olsa wrote:
> > Currently the test_verifier allows test to specify kfunc symbol
> > and search for it in the kernel BTF.
> > 
> > Adding the possibility to search for kfunc also in bpf_testmod
> > module when it's not found in kernel BTF.
> > 
> > To find bpf_testmod btf we need to get back SYS_ADMIN cap.
> 
> This observation and any subsequent discussion is certainly outside the
> scope of your patch set, but it feels like a bit of a weird /
> inconsistent UX to force users to have SYS_ADMIN cap for loading kfuncs
> from modules, but not from vmlinux BTF.
> 
> I realize that you need to have SYS_ADMIN cap for BPF_PROG_GET_FD_BY_ID,
> BPF_MAP_GET_FD_BY_ID, etc, so the consistency makes sense there, but it
> would be nice if we could eventually make the UX consistent for programs
> linking against module kfuncs, because I don't really see the difference
> in terms of permissions from the user's perspective.

right, it's tricky.. I'm not sure if BPF_PROG_GET_FD_BY_ID could
work just with CAP_BPF.. will check

> 
> > 
> > Signed-off-by: Jiri Olsa <jolsa@kernel.org>
> 
> LGTM in general -- just left one comment below.
> 
> Acked-by: David Vernet <void@manifault.com>
> 
> > ---
> >  tools/testing/selftests/bpf/test_verifier.c | 161 +++++++++++++++++---
> >  1 file changed, 139 insertions(+), 22 deletions(-)
> > 
> > diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
> > index 14f11f2dfbce..0a570195be37 100644
> > --- a/tools/testing/selftests/bpf/test_verifier.c
> > +++ b/tools/testing/selftests/bpf/test_verifier.c
> > @@ -879,8 +879,140 @@ static int create_map_kptr(void)
> >  	return fd;
> >  }
> >  
> > +static void set_root(bool set)
> > +{
> > +	__u64 caps;
> > +
> > +	if (set) {
> > +		if (cap_enable_effective(1ULL << CAP_SYS_ADMIN, &caps))
> > +			perror("cap_disable_effective(CAP_SYS_ADMIN)");
> > +	} else {
> > +		if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps))
> > +			perror("cap_disable_effective(CAP_SYS_ADMIN)");
> > +	}
> > +}
> > +
> > +static inline __u64 ptr_to_u64(const void *ptr)
> > +{
> > +	return (__u64) (unsigned long) ptr;
> 
> Small nit / suggestion -- IMO this is slightly preferable just to keep
> it a bit more in-line with the C-standard:
> 
> return (uintptr_t)ptr;
> 
> The standard of course doesn't dictate that you can do
> ptr -> uintptr_t -> __u64 -> uintptr_t -> ptr, but it at least does dictate that you can do
> ptr -> uintptr_t -> ptr, whereas it does not say the same for
> ptr -> unsigned long -> ptr
> 
> Also, I don't think the 'inline' keyword is necessary. The compiler will
> probably figure this out on its own.

I copy&paste the ptr_to_u64 from some other test, sounds good, will check

> 
> > +}
> > +
> > +static struct btf *btf__load_testmod_btf(struct btf *vmlinux)
> 
> Would be nice if some of this code could be shared from libbpf at some
> point, but ok, a cleanup for another time.

ok

thanks,
jirka

> 
> > +{
> > +	struct bpf_btf_info info;
> > +	__u32 len = sizeof(info);
> > +	struct btf *btf = NULL;
> > +	char name[64];
> > +	__u32 id = 0;
> > +	int err, fd;
> > +
> > +	/* Iterate all loaded BTF objects and find bpf_testmod,
> > +	 * we need SYS_ADMIN cap for that.
> > +	 */
> > +	set_root(true);
> > +
> > +	while (true) {
> > +		err = bpf_btf_get_next_id(id, &id);
> > +		if (err) {
> > +			if (errno == ENOENT)
> > +				break;
> > +			perror("bpf_btf_get_next_id failed");
> > +			break;
> > +		}
> > +
> > +		fd = bpf_btf_get_fd_by_id(id);
> > +		if (fd < 0) {
> > +			if (errno == ENOENT)
> > +				continue;
> > +			perror("bpf_btf_get_fd_by_id failed");
> > +			break;
> > +		}
> > +
> > +		memset(&info, 0, sizeof(info));
> > +		info.name_len = sizeof(name);
> > +		info.name = ptr_to_u64(name);
> > +		len = sizeof(info);
> > +
> > +		err = bpf_obj_get_info_by_fd(fd, &info, &len);
> > +		if (err) {
> > +			close(fd);
> > +			perror("bpf_obj_get_info_by_fd failed");
> > +			break;
> > +		}
> > +
> > +		if (strcmp("bpf_testmod", name)) {
> > +			close(fd);
> > +			continue;
> > +		}
> > +
> > +		btf = btf__load_from_kernel_by_id_split(id, vmlinux);
> > +		if (!btf) {
> > +			close(fd);
> > +			break;
> > +		}
> > +
> > +		/* We need the fd to stay open so it can be used in fd_array.
> > +		 * The final cleanup call to btf__free will free btf object
> > +		 * and close the file descriptor.
> > +		 */
> > +		btf__set_fd(btf, fd);
> > +		break;
> > +	}
> > +
> > +	set_root(false);
> > +	return btf;
> > +}
> > +
> > +static struct btf *testmod_btf;
> > +static struct btf *vmlinux_btf;
> > +
> > +static void kfuncs_cleanup(void)
> > +{
> > +	btf__free(testmod_btf);
> > +	btf__free(vmlinux_btf);
> > +}
> > +
> > +static void fixup_prog_kfuncs(struct bpf_insn *prog, int *fd_array,
> > +			      struct kfunc_btf_id_pair *fixup_kfunc_btf_id)
> > +{
> > +	/* Patch in kfunc BTF IDs */
> > +	while (fixup_kfunc_btf_id->kfunc) {
> > +		int btf_id = 0;
> > +
> > +		/* try to find kfunc in kernel BTF */
> > +		vmlinux_btf = vmlinux_btf ?: btf__load_vmlinux_btf();
> > +		if (vmlinux_btf) {
> > +			btf_id = btf__find_by_name_kind(vmlinux_btf,
> > +							fixup_kfunc_btf_id->kfunc,
> > +							BTF_KIND_FUNC);
> > +			btf_id = btf_id < 0 ? 0 : btf_id;
> > +		}
> > +
> > +		/* kfunc not found in kernel BTF, try bpf_testmod BTF */
> > +		if (!btf_id) {
> > +			testmod_btf = testmod_btf ?: btf__load_testmod_btf(vmlinux_btf);
> > +			if (testmod_btf) {
> > +				btf_id = btf__find_by_name_kind(testmod_btf,
> > +								fixup_kfunc_btf_id->kfunc,
> > +								BTF_KIND_FUNC);
> > +				btf_id = btf_id < 0 ? 0 : btf_id;
> > +				if (btf_id) {
> > +					/* We put bpf_testmod module fd into fd_array
> > +					 * and its index 1 into instruction 'off'.
> > +					 */
> > +					*fd_array = btf__fd(testmod_btf);
> > +					prog[fixup_kfunc_btf_id->insn_idx].off = 1;
> > +				}
> > +			}
> > +		}
> > +
> > +		prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
> > +		fixup_kfunc_btf_id++;
> > +	}
> > +}
> > +
> >  static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
> > -			  struct bpf_insn *prog, int *map_fds)
> > +			  struct bpf_insn *prog, int *map_fds, int *fd_array)
> >  {
> >  	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
> >  	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
> > @@ -905,7 +1037,6 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
> >  	int *fixup_map_ringbuf = test->fixup_map_ringbuf;
> >  	int *fixup_map_timer = test->fixup_map_timer;
> >  	int *fixup_map_kptr = test->fixup_map_kptr;
> > -	struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
> >  
> >  	if (test->fill_helper) {
> >  		test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
> > @@ -1106,25 +1237,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
> >  		} while (*fixup_map_kptr);
> >  	}
> >  
> > -	/* Patch in kfunc BTF IDs */
> > -	if (fixup_kfunc_btf_id->kfunc) {
> > -		struct btf *btf;
> > -		int btf_id;
> > -
> > -		do {
> > -			btf_id = 0;
> > -			btf = btf__load_vmlinux_btf();
> > -			if (btf) {
> > -				btf_id = btf__find_by_name_kind(btf,
> > -								fixup_kfunc_btf_id->kfunc,
> > -								BTF_KIND_FUNC);
> > -				btf_id = btf_id < 0 ? 0 : btf_id;
> > -			}
> > -			btf__free(btf);
> > -			prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
> > -			fixup_kfunc_btf_id++;
> > -		} while (fixup_kfunc_btf_id->kfunc);
> > -	}
> > +	fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
> >  }
> >  
> >  struct libcap {
> > @@ -1451,6 +1564,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
> >  	int run_errs, run_successes;
> >  	int map_fds[MAX_NR_MAPS];
> >  	const char *expected_err;
> > +	int fd_array[2] = { -1, -1 };
> >  	int saved_errno;
> >  	int fixup_skips;
> >  	__u32 pflags;
> > @@ -1464,7 +1578,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
> >  	if (!prog_type)
> >  		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
> >  	fixup_skips = skips;
> > -	do_test_fixup(test, prog_type, prog, map_fds);
> > +	do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]);
> >  	if (test->fill_insns) {
> >  		prog = test->fill_insns;
> >  		prog_len = test->prog_len;
> > @@ -1498,6 +1612,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
> >  	else
> >  		opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
> >  	opts.prog_flags = pflags;
> > +	if (fd_array[1] != -1)
> > +		opts.fd_array = &fd_array[0];
> >  
> >  	if ((prog_type == BPF_PROG_TYPE_TRACING ||
> >  	     prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) {
> > @@ -1740,6 +1856,7 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
> >  	}
> >  
> >  	unload_bpf_testmod(verbose);
> > +	kfuncs_cleanup();
> >  
> >  	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
> >  	       skips, errors);
> > -- 
> > 2.39.1
> >
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 14f11f2dfbce..0a570195be37 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -879,8 +879,140 @@  static int create_map_kptr(void)
 	return fd;
 }
 
+static void set_root(bool set)
+{
+	__u64 caps;
+
+	if (set) {
+		if (cap_enable_effective(1ULL << CAP_SYS_ADMIN, &caps))
+			perror("cap_disable_effective(CAP_SYS_ADMIN)");
+	} else {
+		if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps))
+			perror("cap_disable_effective(CAP_SYS_ADMIN)");
+	}
+}
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+	return (__u64) (unsigned long) ptr;
+}
+
+static struct btf *btf__load_testmod_btf(struct btf *vmlinux)
+{
+	struct bpf_btf_info info;
+	__u32 len = sizeof(info);
+	struct btf *btf = NULL;
+	char name[64];
+	__u32 id = 0;
+	int err, fd;
+
+	/* Iterate all loaded BTF objects and find bpf_testmod,
+	 * we need SYS_ADMIN cap for that.
+	 */
+	set_root(true);
+
+	while (true) {
+		err = bpf_btf_get_next_id(id, &id);
+		if (err) {
+			if (errno == ENOENT)
+				break;
+			perror("bpf_btf_get_next_id failed");
+			break;
+		}
+
+		fd = bpf_btf_get_fd_by_id(id);
+		if (fd < 0) {
+			if (errno == ENOENT)
+				continue;
+			perror("bpf_btf_get_fd_by_id failed");
+			break;
+		}
+
+		memset(&info, 0, sizeof(info));
+		info.name_len = sizeof(name);
+		info.name = ptr_to_u64(name);
+		len = sizeof(info);
+
+		err = bpf_obj_get_info_by_fd(fd, &info, &len);
+		if (err) {
+			close(fd);
+			perror("bpf_obj_get_info_by_fd failed");
+			break;
+		}
+
+		if (strcmp("bpf_testmod", name)) {
+			close(fd);
+			continue;
+		}
+
+		btf = btf__load_from_kernel_by_id_split(id, vmlinux);
+		if (!btf) {
+			close(fd);
+			break;
+		}
+
+		/* We need the fd to stay open so it can be used in fd_array.
+		 * The final cleanup call to btf__free will free btf object
+		 * and close the file descriptor.
+		 */
+		btf__set_fd(btf, fd);
+		break;
+	}
+
+	set_root(false);
+	return btf;
+}
+
+static struct btf *testmod_btf;
+static struct btf *vmlinux_btf;
+
+static void kfuncs_cleanup(void)
+{
+	btf__free(testmod_btf);
+	btf__free(vmlinux_btf);
+}
+
+static void fixup_prog_kfuncs(struct bpf_insn *prog, int *fd_array,
+			      struct kfunc_btf_id_pair *fixup_kfunc_btf_id)
+{
+	/* Patch in kfunc BTF IDs */
+	while (fixup_kfunc_btf_id->kfunc) {
+		int btf_id = 0;
+
+		/* try to find kfunc in kernel BTF */
+		vmlinux_btf = vmlinux_btf ?: btf__load_vmlinux_btf();
+		if (vmlinux_btf) {
+			btf_id = btf__find_by_name_kind(vmlinux_btf,
+							fixup_kfunc_btf_id->kfunc,
+							BTF_KIND_FUNC);
+			btf_id = btf_id < 0 ? 0 : btf_id;
+		}
+
+		/* kfunc not found in kernel BTF, try bpf_testmod BTF */
+		if (!btf_id) {
+			testmod_btf = testmod_btf ?: btf__load_testmod_btf(vmlinux_btf);
+			if (testmod_btf) {
+				btf_id = btf__find_by_name_kind(testmod_btf,
+								fixup_kfunc_btf_id->kfunc,
+								BTF_KIND_FUNC);
+				btf_id = btf_id < 0 ? 0 : btf_id;
+				if (btf_id) {
+					/* We put bpf_testmod module fd into fd_array
+					 * and its index 1 into instruction 'off'.
+					 */
+					*fd_array = btf__fd(testmod_btf);
+					prog[fixup_kfunc_btf_id->insn_idx].off = 1;
+				}
+			}
+		}
+
+		prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
+		fixup_kfunc_btf_id++;
+	}
+}
+
 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
-			  struct bpf_insn *prog, int *map_fds)
+			  struct bpf_insn *prog, int *map_fds, int *fd_array)
 {
 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
@@ -905,7 +1037,6 @@  static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
 	int *fixup_map_ringbuf = test->fixup_map_ringbuf;
 	int *fixup_map_timer = test->fixup_map_timer;
 	int *fixup_map_kptr = test->fixup_map_kptr;
-	struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
 
 	if (test->fill_helper) {
 		test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -1106,25 +1237,7 @@  static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
 		} while (*fixup_map_kptr);
 	}
 
-	/* Patch in kfunc BTF IDs */
-	if (fixup_kfunc_btf_id->kfunc) {
-		struct btf *btf;
-		int btf_id;
-
-		do {
-			btf_id = 0;
-			btf = btf__load_vmlinux_btf();
-			if (btf) {
-				btf_id = btf__find_by_name_kind(btf,
-								fixup_kfunc_btf_id->kfunc,
-								BTF_KIND_FUNC);
-				btf_id = btf_id < 0 ? 0 : btf_id;
-			}
-			btf__free(btf);
-			prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
-			fixup_kfunc_btf_id++;
-		} while (fixup_kfunc_btf_id->kfunc);
-	}
+	fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
 }
 
 struct libcap {
@@ -1451,6 +1564,7 @@  static void do_test_single(struct bpf_test *test, bool unpriv,
 	int run_errs, run_successes;
 	int map_fds[MAX_NR_MAPS];
 	const char *expected_err;
+	int fd_array[2] = { -1, -1 };
 	int saved_errno;
 	int fixup_skips;
 	__u32 pflags;
@@ -1464,7 +1578,7 @@  static void do_test_single(struct bpf_test *test, bool unpriv,
 	if (!prog_type)
 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 	fixup_skips = skips;
-	do_test_fixup(test, prog_type, prog, map_fds);
+	do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]);
 	if (test->fill_insns) {
 		prog = test->fill_insns;
 		prog_len = test->prog_len;
@@ -1498,6 +1612,8 @@  static void do_test_single(struct bpf_test *test, bool unpriv,
 	else
 		opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
 	opts.prog_flags = pflags;
+	if (fd_array[1] != -1)
+		opts.fd_array = &fd_array[0];
 
 	if ((prog_type == BPF_PROG_TYPE_TRACING ||
 	     prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) {
@@ -1740,6 +1856,7 @@  static int do_test(bool unpriv, unsigned int from, unsigned int to)
 	}
 
 	unload_bpf_testmod(verbose);
+	kfuncs_cleanup();
 
 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
 	       skips, errors);